diff --git a/.gitignore b/.gitignore index 9e087dab479..f9fc9f70931 100644 --- a/.gitignore +++ b/.gitignore @@ -14,3 +14,4 @@ hbase-*/test *.iws *.iml *.ipr +patchprocess/ diff --git a/bin/hbase b/bin/hbase index 87138377dce..6430dc53fc6 100755 --- a/bin/hbase +++ b/bin/hbase @@ -34,8 +34,9 @@ # HBASE_CLASSPATH_PREFIX Extra Java CLASSPATH entries that should be # prefixed to the system classpath. # -# HBASE_HEAPSIZE The maximum amount of heap to use, in MB. -# Default is 1000. +# HBASE_HEAPSIZE The maximum amount of heap to use. +# Default is unset and uses the JVMs default setting +# (usually 1/4th of the available memory). # # HBASE_LIBRARY_PATH HBase additions to JAVA_LIBRARY_PATH for adding # native libraries. @@ -214,14 +215,17 @@ if [ "$HBASE_LIBRARY_PATH" != "" ]; then fi #If avail, add Hadoop to the CLASSPATH and to the JAVA_LIBRARY_PATH -HADOOP_IN_PATH=$(PATH="${HADOOP_HOME:-${HADOOP_PREFIX}}/bin:$PATH" which hadoop 2>/dev/null) -if [ -f ${HADOOP_IN_PATH} ]; then - HADOOP_JAVA_LIBRARY_PATH=$(HADOOP_CLASSPATH="$CLASSPATH" ${HADOOP_IN_PATH} \ - org.apache.hadoop.hbase.util.GetJavaProperty java.library.path 2>/dev/null) - if [ -n "$HADOOP_JAVA_LIBRARY_PATH" ]; then - JAVA_LIBRARY_PATH=$(append_path "${JAVA_LIBRARY_PATH}" "$HADOOP_JAVA_LIBRARY_PATH") +# Allow this functionality to be disabled +if [ "$HBASE_DISABLE_HADOOP_CLASSPATH_LOOKUP" != "true" ] ; then + HADOOP_IN_PATH=$(PATH="${HADOOP_HOME:-${HADOOP_PREFIX}}/bin:$PATH" which hadoop 2>/dev/null) + if [ -f ${HADOOP_IN_PATH} ]; then + HADOOP_JAVA_LIBRARY_PATH=$(HADOOP_CLASSPATH="$CLASSPATH" ${HADOOP_IN_PATH} \ + org.apache.hadoop.hbase.util.GetJavaProperty java.library.path 2>/dev/null) + if [ -n "$HADOOP_JAVA_LIBRARY_PATH" ]; then + JAVA_LIBRARY_PATH=$(append_path "${JAVA_LIBRARY_PATH}" "$HADOOP_JAVA_LIBRARY_PATH") + fi + CLASSPATH=$(append_path "${CLASSPATH}" `${HADOOP_IN_PATH} classpath 2>/dev/null`) fi - CLASSPATH=$(append_path "${CLASSPATH}" `${HADOOP_IN_PATH} classpath 2>/dev/null`) fi # Add user-specified CLASSPATH last diff --git a/bin/hbase-cleanup.sh b/bin/hbase-cleanup.sh index 40633fc6b14..596e366a308 100755 --- a/bin/hbase-cleanup.sh +++ b/bin/hbase-cleanup.sh @@ -31,7 +31,7 @@ # HBASE_SSH_OPTS Options passed to ssh when running remote commands. # -usage="Usage: hbase-cleanup.sh (--cleanZk|--cleanHdfs|--cleanAll)" +usage="Usage: hbase-cleanup.sh (--cleanZk|--cleanHdfs|--cleanAll|--cleanAcls)" bin=`dirname "$0"` bin=`cd "$bin">/dev/null; pwd` @@ -40,7 +40,7 @@ bin=`cd "$bin">/dev/null; pwd` . "$bin"/hbase-config.sh case $1 in - --cleanZk|--cleanHdfs|--cleanAll) + --cleanZk|--cleanHdfs|--cleanAll|--cleanAcls) matches="yes" ;; *) ;; esac @@ -90,6 +90,11 @@ execute_hdfs_command() { "$bin"/hbase org.apache.hadoop.fs.FsShell $command 2>&1 } +execute_clean_acls() { + command=$1; + "$bin"/hbase org.apache.hadoop.hbase.zookeeper.ZkAclReset $command 2>&1 +} + clean_up() { case $1 in --cleanZk) @@ -102,6 +107,9 @@ clean_up() { execute_zk_command "rmr ${zparent}"; execute_hdfs_command "-rmr ${hrootdir}" ;; + --cleanAcls) + execute_clean_acls; + ;; *) ;; esac diff --git a/bin/hbase-config.sh b/bin/hbase-config.sh index 2bca169a57c..12e4f40425a 100644 --- a/bin/hbase-config.sh +++ b/bin/hbase-config.sh @@ -126,14 +126,12 @@ export MALLOC_ARENA_MAX=${MALLOC_ARENA_MAX:-4} if [ -z "$JAVA_HOME" ]; then cat 1>&2 < http://java.sun.com/javase/downloads/ < | +| > http://www.oracle.com/technetwork/java/javase/downloads | | | | HBase requires Java 1.7 or later. | -| NOTE: This script will find Sun Java whether you install using the | -| binary or the RPM based installer. | +======================================================================+ EOF exit 1 diff --git a/bin/hbase.cmd b/bin/hbase.cmd index db7d856e4f7..91c2aabd4a3 100644 --- a/bin/hbase.cmd +++ b/bin/hbase.cmd @@ -28,8 +28,9 @@ @rem @rem HBASE_CLASSPATH Extra Java CLASSPATH entries. @rem -@rem HBASE_HEAPSIZE The maximum amount of heap to use, in MB. -@rem Default is 1000. +@rem HBASE_HEAPSIZE The maximum amount of heap to use. +@rem Default is unset and uses the JVMs default setting +@rem (usually 1/4th of the available memory). @rem @rem HBASE_OPTS Extra Java runtime options. @rem @@ -87,7 +88,7 @@ if "%hbase-command%"=="" ( goto :eof ) -set JAVA_HEAP_MAX=-Xmx1000m +set JAVA_HEAP_MAX="" set JAVA_OFFHEAP_MAX="" rem check envvars which might override default args diff --git a/bin/local-master-backup.sh b/bin/local-master-backup.sh index 36a70fa965f..e1a80cd3a7e 100755 --- a/bin/local-master-backup.sh +++ b/bin/local-master-backup.sh @@ -1,4 +1,4 @@ -#!/bin/sh +#!/usr/bin/env bash #/** # * Copyright 2007 The Apache Software Foundation # * @@ -54,5 +54,9 @@ shift; for i in $* do - run_master $cmd $i + if [[ "$i" =~ ^[0-9]+$ ]]; then + run_master $cmd $i + else + echo "Invalid argument" + fi done diff --git a/bin/local-regionservers.sh b/bin/local-regionservers.sh index e2cea6d12ad..f744ee1727c 100755 --- a/bin/local-regionservers.sh +++ b/bin/local-regionservers.sh @@ -1,4 +1,4 @@ -#!/bin/sh +#!/usr/bin/env bash #/** # * Copyright 2007 The Apache Software Foundation # * @@ -52,5 +52,9 @@ shift; for i in $* do - run_regionserver $cmd $i + if [[ "$i" =~ ^[0-9]+$ ]]; then + run_regionserver $cmd $i + else + echo "Invalid argument" + fi done diff --git a/bin/region_mover.rb b/bin/region_mover.rb index 565b0d5a1a6..3259564f922 100644 --- a/bin/region_mover.rb +++ b/bin/region_mover.rb @@ -29,7 +29,7 @@ import org.apache.hadoop.hbase.client.HBaseAdmin import org.apache.hadoop.hbase.client.Get import org.apache.hadoop.hbase.client.Scan import org.apache.hadoop.hbase.client.HTable -import org.apache.hadoop.hbase.client.HConnectionManager +import org.apache.hadoop.hbase.client.ConnectionFactory import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter; import org.apache.hadoop.hbase.filter.InclusiveStopFilter; import org.apache.hadoop.hbase.filter.FilterList; @@ -100,7 +100,7 @@ def isSuccessfulScan(admin, r) scan = Scan.new(r.getStartKey(), r.getStartKey()) scan.setBatch(1) scan.setCaching(1) - scan.setFilter(FilterList.new(FirstKeyOnlyFilter.new(),InclusiveStopFilter().new(r.getStartKey()))) + scan.setFilter(FilterList.new(FirstKeyOnlyFilter.new(),InclusiveStopFilter.new(r.getStartKey()))) begin table = HTable.new(admin.getConfiguration(), r.getTableName()) scanner = table.getScanner(scan) @@ -243,7 +243,7 @@ end # Now get list of regions on targetServer def getRegions(config, servername) - connection = HConnectionManager::getConnection(config); + connection = ConnectionFactory::createConnection(config); return ProtobufUtil::getOnlineRegions(connection.getAdmin(ServerName.valueOf(servername))); end diff --git a/bin/region_status.rb b/bin/region_status.rb index 878d58ab5df..52af49eb62d 100644 --- a/bin/region_status.rb +++ b/bin/region_status.rb @@ -54,9 +54,9 @@ import org.apache.hadoop.hbase.client.Scan import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter import org.apache.hadoop.hbase.util.Bytes import org.apache.hadoop.hbase.HRegionInfo -import org.apache.hadoop.hbase.client.MetaScanner +import org.apache.hadoop.hbase.MetaTableAccessor import org.apache.hadoop.hbase.HTableDescriptor -import org.apache.hadoop.hbase.client.HConnectionManager +import org.apache.hadoop.hbase.client.ConnectionFactory # disable debug logging on this script for clarity log_level = org.apache.log4j.Level::ERROR @@ -138,8 +138,8 @@ while true if $tablename.nil? server_count = admin.getClusterStatus().getRegionsCount() else - connection = HConnectionManager::getConnection(config); - server_count = MetaScanner::allTableRegions(config, connection, $TableName ,false).size() + connection = ConnectionFactory::createConnection(config); + server_count = MetaTableAccessor::allTableRegions(connection, $TableName).size() end print "Region Status: #{server_count} / #{meta_count}\n" if SHOULD_WAIT and server_count < meta_count diff --git a/conf/hbase-env.cmd b/conf/hbase-env.cmd index bf7f25aa97d..003d1b388bd 100644 --- a/conf/hbase-env.cmd +++ b/conf/hbase-env.cmd @@ -24,10 +24,11 @@ @rem Extra Java CLASSPATH elements. Optional. @rem set HBASE_CLASSPATH= -@rem The maximum amount of heap to use, in MB. Default is 1000. +@rem The maximum amount of heap to use. Default is left to JVM default. @rem set HBASE_HEAPSIZE=1000 -@rem Uncomment below if you intend to use off heap cache. +@rem Uncomment below if you intend to use off heap cache. For example, to allocate 8G of +@rem offheap, set the value to "8G". @rem set HBASE_OFFHEAPSIZE=1000 @rem For example, to allocate 8G of offheap, to 8G: diff --git a/conf/hbase-env.sh b/conf/hbase-env.sh index 1693b178460..2bbde1a6f97 100644 --- a/conf/hbase-env.sh +++ b/conf/hbase-env.sh @@ -31,14 +31,12 @@ # Extra Java CLASSPATH elements. Optional. # export HBASE_CLASSPATH= -# The maximum amount of heap to use, in MB. Default is 1000. -# export HBASE_HEAPSIZE=1000 +# The maximum amount of heap to use. Default is left to JVM default. +# export HBASE_HEAPSIZE=1G -# Uncomment below if you intend to use off heap cache. -# export HBASE_OFFHEAPSIZE=1000 - -# For example, to allocate 8G of offheap, to 8G: -# export HBASE_OFFHEAPSIZE=8G +# Uncomment below if you intend to use off heap cache. For example, to allocate 8G of +# offheap, set the value to "8G". +# export HBASE_OFFHEAPSIZE=1G # Extra Java runtime options. # Below are what we set by default. May only work with SUN JVM. diff --git a/conf/log4j.properties b/conf/log4j.properties index 472fc03afd9..cd417d7cb01 100644 --- a/conf/log4j.properties +++ b/conf/log4j.properties @@ -76,13 +76,14 @@ log4j.appender.console.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m% log4j.logger.org.apache.zookeeper=INFO #log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG log4j.logger.org.apache.hadoop.hbase=INFO +log4j.logger.org.apache.hadoop.hbase.META=INFO # Make these two classes INFO-level. Make them DEBUG to see more zk debug. log4j.logger.org.apache.hadoop.hbase.zookeeper.ZKUtil=INFO log4j.logger.org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher=INFO #log4j.logger.org.apache.hadoop.dfs=DEBUG # Set this class to log INFO only otherwise its OTT # Enable this to get detailed connection error/retry logging. -# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=TRACE +# log4j.logger.org.apache.hadoop.hbase.client.ConnectionImplementation=TRACE # Uncomment this line to enable tracing on _every_ RPC call (this can be a lot of output) @@ -90,5 +91,4 @@ log4j.logger.org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher=INFO # Uncomment the below if you want to remove logging of client region caching' # and scan of hbase:meta messages -# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=INFO -# log4j.logger.org.apache.hadoop.hbase.client.MetaScanner=INFO +# log4j.logger.org.apache.hadoop.hbase.client.ConnectionImplementation=INFO diff --git a/dev-support/check_compatibility.sh b/dev-support/check_compatibility.sh index 1b6089b8e8a..5410854cea6 100755 --- a/dev-support/check_compatibility.sh +++ b/dev-support/check_compatibility.sh @@ -65,6 +65,7 @@ a branch (e.g. 0.98), or a particular commit hash. If ref2 is omitted, master will be used. Options: + -a, --all Do not filter by interface annotations. -b, --binary-only Only run the check for binary compatibility. -f, --force-download Download dependencies (i.e. Java ACC), even if they are already present. @@ -88,8 +89,8 @@ __EOF GETOPT=${GETOPT:-/usr/bin/env getopt} # Parse command line arguments and check for proper syntax. -if ! ARG_LIST=$(${GETOPT} -q -o bfhno:qr:s \ - -l binary-only,force-download,help,no-checkout,options:,quick,repo:,source-only \ +if ! ARG_LIST=$(${GETOPT} -q -o abfhno:qr:s \ + -l all,binary-only,force-download,help,no-checkout,options:,quick,repo:,source-only \ -- "${@}"); then usage >&2 exit 1 @@ -98,6 +99,9 @@ eval set -- "${ARG_LIST[@]}" while ((${#})); do case "${1}" in + -a | --all ) + ALL=true + shift 1 ;; -b | --binary-only ) JAVA_ACC_COMMAND+=(-binary) shift 1 ;; @@ -244,10 +248,12 @@ fi # Generate annotation list dynamically; this way, there's no chance the file # gets stale and you have better visiblity into what classes are actually analyzed. +declare -a ANNOTATION_LIST ANNOTATION_LIST+=(InterfaceAudience.Public) +ANNOTATION_LIST+=(InterfaceAudience.LimitedPrivate) if ! [ -f ${SCRIPT_DIRECTORY}/target/compatibility/annotations ]; then cat > ${SCRIPT_DIRECTORY}/target/compatibility/annotations << __EOF -$(tr " " "\n" <<< "${ANNOTATION_LIST}") +$(tr " " "\n" <<< "${ANNOTATION_LIST[@]}") __EOF fi @@ -257,7 +263,9 @@ JAVA_ACC_COMMAND+=(-v1 ${COMMIT[1]} -v2 ${COMMIT[2]}) JAVA_ACC_COMMAND+=(-d1 ${JARS[1]} -d2 ${JARS[2]}) JAVA_ACC_COMMAND+=(-report-path \ ${SCRIPT_DIRECTORY}/target/compatibility/report/${COMMIT[1]}_${COMMIT[2]}_compat_report.html) -JAVA_ACC_COMMAND+=(-annotations-list ${SCRIPT_DIRECTORY}/target/compatibility/annotations) +if [ "${ALL}" != "true" ] ; then + JAVA_ACC_COMMAND+=(-annotations-list ${SCRIPT_DIRECTORY}/target/compatibility/annotations) +fi # Delete any existing report folder under /dev-support/target/compatibility. rm -rf ${SCRIPT_DIRECTORY}/target/compatibility/report diff --git a/dev-support/checkstyle_report.py b/dev-support/checkstyle_report.py old mode 100644 new mode 100755 diff --git a/dev-support/findHangingTests.py b/dev-support/findHangingTests.py old mode 100644 new mode 100755 diff --git a/dev-support/jenkinsEnv.sh b/dev-support/jenkinsEnv.sh index 499245c2e40..d2e4f6551d4 100755 --- a/dev-support/jenkinsEnv.sh +++ b/dev-support/jenkinsEnv.sh @@ -25,7 +25,7 @@ export CLOVER_HOME=/home/jenkins/tools/clover/latest export MAVEN_HOME=/home/jenkins/tools/maven/latest export PATH=$PATH:$JAVA_HOME/bin:$ANT_HOME/bin: -export MAVEN_OPTS="-Xmx3100M -XX:-UsePerfData" +export MAVEN_OPTS="${MAVEN_OPTS:-"-Xmx3100M -XX:-UsePerfData -XX:MaxPermSize=256m"}" ulimit -n diff --git a/dev-support/publish_hbase_website.sh b/dev-support/publish_hbase_website.sh index 0350a6d5fab..072de8dcf04 100755 --- a/dev-support/publish_hbase_website.sh +++ b/dev-support/publish_hbase_website.sh @@ -83,12 +83,12 @@ echo "Updating Git" git checkout master git pull -# Generate the site to ~/git/hbase/target/stage +# Generate the site to ~/git/hbase/target/site if [ $INTERACTIVE ]; then read -p "Build the site? (y/n)" yn case $yn in [Yy]* ) - mvn clean package javadoc:aggregate site site:stage -DskipTests + mvn clean package javadoc:aggregate site post-site site:stage -DskipTests status=$? if [ $status -ne 0 ]; then echo "The website does not build. Aborting." @@ -101,7 +101,7 @@ if [ $INTERACTIVE ]; then esac else echo "Building the site in auto mode." - mvn clean package javadoc:aggregate site site:stage -DskipTests + mvn clean package javadoc:aggregate site post-site site:stage -DskipTests status=$? if [ $status != 0 ]; then echo "The website does not build. Aborting." @@ -151,7 +151,7 @@ fi # Delete known auto-generated content from trunk echo "Deleting known auto-generated content from SVN" -rm -rf apidocs devapidocs xref xref-test book book.html java.html +rm -rf apidocs devapidocs xref xref-test book book.html java.html apache_hbase_reference_guide.pdf* # Copy generated site to svn -- cp takes different options on Darwin and GNU echo "Copying the generated site to SVN" @@ -161,18 +161,20 @@ elif [ `uname` == "Linux" ]; then COPYOPTS='-au' fi -cp $COPYOPTS $GIT_DIR/target/site/* . +cp $COPYOPTS $GIT_DIR/target/staging/* . # Look for things we need to fix up in svn echo "Untracked files: svn add" -svn status |grep '?' |sed -e "s/[[:space:]]//g"|cut -d '?' -f 2|while read i - do svn add $i +svn status |grep '?' |sed -e "s/[[:space:]]//g"|cut -d '?' -f 2|while read i; do + svn add $i + echo "Added $i" done echo "Locally deleted files: svn del" -svn status |grep '!' |sed -e "s/[[:space:]]//g"|cut -d '!' -f 2|while read i - do svn del $i +svn status |grep '!' |sed -e "s/[[:space:]]//g"|cut -d '!' -f 2|while read i; do + svn del $i + echo "Deleted $i" done # Display the proposed changes. I filtered out @@ -196,12 +198,12 @@ SVN_NUM_DIFF=`expr $SVN_NEW_NUMFILES - $SVN_OLD_NUMFILES|sed 's/-//g'` # The whole site is only 500 MB so a difference of 10 MB is huge # In this case, we should abort because something is wrong # Leaving this commented out for now until we get some benchmarks -#if [ $SVN_SIZE_DIFF > 10 -o $SVN_NUM_DIFF > 50 ]; then -# echo "This commit would cause the website to change sizes by \ -# $SVN_DIFF MB and $SVN_NUM_DIFF files. There is likely a problem. -# Aborting." -# exit 1 -#fi +if [ $SVN_SIZE_DIFF > 10 -o $SVN_NUM_DIFF > 50 ]; then + echo "This commit would cause the website to change sizes by \ + $SVN_DIFF MB and $SVN_NUM_DIFF files. There is likely a problem. + Aborting." + exit 1 +fi if [ $INTERACTIVE ]; then diff --git a/dev-support/test-patch.properties b/dev-support/test-patch.properties index 2995f8e5228..5f5cc320a4d 100644 --- a/dev-support/test-patch.properties +++ b/dev-support/test-patch.properties @@ -13,13 +13,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -MAVEN_OPTS="-Xmx3100M" +MAVEN_OPTS="${MAVEN_OPTS:-"-Xmx3100M"}" # The number of acceptable warning for *all* modules # Please update the per-module test-patch.properties if you update this file. OK_RELEASEAUDIT_WARNINGS=0 -OK_FINDBUGS_WARNINGS=95 # Allow two warnings. Javadoc complains about sun.misc.Unsafe use. See HBASE-7457 OK_JAVADOC_WARNINGS=2 @@ -27,4 +26,8 @@ MAX_LINE_LENGTH=100 # All supported branches for testing with precommit build # branch-1.x should apprear before branch-1 since the latter is a prefix -BRANCH_NAMES="0.94 0.98 branch-1.0 branch-1 master" +BRANCH_NAMES="0.94 0.98 branch-1.0 branch-1 master hbase-12439 hbase-11339" + +# All supported Hadoop versions that we want to test the compilation with +HADOOP2_VERSIONS="2.4.1 2.5.2 2.6.0" +HADOOP3_VERSIONS="3.0.0-SNAPSHOT" diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh index 0754503528d..cbcc0aad268 100755 --- a/dev-support/test-patch.sh +++ b/dev-support/test-patch.sh @@ -210,7 +210,7 @@ checkout () { findBranchNameFromPatchName() { local patchName=$1 for LOCAL_BRANCH_NAME in $BRANCH_NAMES; do - if [[ $patchName =~ .*$LOCAL_BRANCH_NAME.* ]]; then + if [[ $patchName =~ /jira/secure/attachment/[0-9]*/.*$LOCAL_BRANCH_NAME ]]; then BRANCH_NAME=$LOCAL_BRANCH_NAME break fi @@ -230,14 +230,42 @@ checkoutBranch() { echo "" if [[ $JENKINS == "true" ]] ; then if [[ "$BRANCH_NAME" != "master" ]]; then - echo "${GIT} checkout ${BRANCH_NAME}" - ${GIT} checkout ${BRANCH_NAME} + echo "origin/${BRANCH_NAME} HEAD is commit `${GIT} rev-list origin/${BRANCH_NAME} -1`" + echo "${GIT} checkout -f `${GIT} rev-list origin/${BRANCH_NAME} -1`" + ${GIT} checkout -f `${GIT} rev-list origin/${BRANCH_NAME} -1` echo "${GIT} status" ${GIT} status fi fi } +############################################################################### +### Collect findbugs reports +collectFindbugsReports() { + name=$1 + basedir=$2 + patch_dir=$3 + for file in $(find $basedir -name findbugsXml.xml) + do + relative_file=${file#$basedir/} # strip leading $basedir prefix + if [ ! $relative_file == "target/findbugsXml.xml" ]; then + module_suffix=${relative_file%/target/findbugsXml.xml} # strip trailing path + module_suffix=`basename ${module_suffix}` + fi + + cp $file $patch_dir/${name}FindbugsWarnings${module_suffix}.xml + $FINDBUGS_HOME/bin/setBugDatabaseInfo -name $name \ + $patch_dir/${name}FindbugsWarnings${module_suffix}.xml \ + $patch_dir/${name}FindbugsWarnings${module_suffix}.xml + done + xml_file=$patch_dir/${name}FindbugsWarnings.xml + html_file=$patch_dir/${name}FindbugsWarnings.html + $FINDBUGS_HOME/bin/unionBugs -withMessages \ + -output $xml_file $patch_dir/${name}FindbugsWarnings*.xml + $FINDBUGS_HOME/bin/convertXmlToText -html $xml_file $html_file + file $xml_file $html_file +} + ############################################################################### setup () { ### Download latest patch file (ignoring .htm and .html) when run from patch process @@ -280,9 +308,8 @@ setup () { fi fi ### exit if warnings are NOT defined in the properties file - if [ -z "$OK_FINDBUGS_WARNINGS" ] || [[ -z "$OK_JAVADOC_WARNINGS" ]] || [[ -z $OK_RELEASEAUDIT_WARNINGS ]] ; then + if [[ -z "$OK_JAVADOC_WARNINGS" ]] || [[ -z $OK_RELEASEAUDIT_WARNINGS ]] ; then echo "Please define the following properties in test-patch.properties file" - echo "OK_FINDBUGS_WARNINGS" echo "OK_RELEASEAUDIT_WARNINGS" echo "OK_JAVADOC_WARNINGS" cleanupAndExit 1 @@ -296,10 +323,12 @@ setup () { echo "======================================================================" echo "" echo "" - echo "$MVN clean package checkstyle:checkstyle-aggregate -DskipTests -D${PROJECT_NAME}PatchProcess > $PATCH_DIR/trunkJavacWarnings.txt 2>&1" + echo "$MVN clean package checkstyle:checkstyle-aggregate findbugs:findbugs -DskipTests \ + -D${PROJECT_NAME}PatchProcess > $PATCH_DIR/trunkJavacWarnings.txt 2>&1" export MAVEN_OPTS="${MAVEN_OPTS}" # build core and tests - $MVN clean package checkstyle:checkstyle-aggregate -DskipTests -D${PROJECT_NAME}PatchProcess > $PATCH_DIR/trunkJavacWarnings.txt 2>&1 + $MVN clean package checkstyle:checkstyle-aggregate findbugs:findbugs -DskipTests \ + -D${PROJECT_NAME}PatchProcess > $PATCH_DIR/trunkJavacWarnings.txt 2>&1 if [[ $? != 0 ]] ; then ERR=`$GREP -A 5 'Compilation failure' $PATCH_DIR/trunkJavacWarnings.txt` echo "Trunk compilation is broken? @@ -307,6 +336,7 @@ setup () { cleanupAndExit 1 fi mv target/checkstyle-result.xml $PATCH_DIR/trunkCheckstyle.xml + collectFindbugsReports trunk $BASEDIR $PATCH_DIR } ############################################################################### @@ -360,6 +390,16 @@ checkTests () { return 0 fi fi + srcReferences=`${GREP} "diff --git" "${PATCH_DIR}/patch" | ${GREP} "src/main" | \ + ${GREP} -v "src/main/asciidoc" | ${GREP} -v "src/main/site" -c` + if [[ $srcReferences == 0 ]] ; then + echo "The patch doesn't appear to alter any code that requires tests." + JIRA_COMMENT="$JIRA_COMMENT + + {color:green}+0 tests included{color}. The patch appears to be a documentation, build, + or dev-support patch that doesn't require tests." + return 0 + fi JIRA_COMMENT="$JIRA_COMMENT {color:red}-1 tests included{color}. The patch doesn't appear to include any new or modified tests. @@ -377,17 +417,21 @@ checkTests () { ### Check there are no compilation errors, passing a file to be parsed. checkCompilationErrors() { local file=$1 + hadoopVersion="" + if [ "$#" -ne 1 ]; then + hadoopVersion="with Hadoop version $2" + fi COMPILATION_ERROR=false eval $(awk '/ERROR/ {print "COMPILATION_ERROR=true"}' $file) if $COMPILATION_ERROR ; then ERRORS=$($AWK '/ERROR/ { print $0 }' $file) echo "======================================================================" - echo "There are compilation errors." + echo "There are compilation errors $hadoopVersion." echo "======================================================================" echo "$ERRORS" JIRA_COMMENT="$JIRA_COMMENT - {color:red}-1 javac{color}. The patch appears to cause mvn compile goal to fail. + {color:red}-1 javac{color}. The patch appears to cause mvn compile goal to fail $hadoopVersion. Compilation errors resume: $ERRORS @@ -468,6 +512,29 @@ checkAntiPatterns () { return 0 } +############################################################################### +### Check that there are no incorrect annotations +checkInterfaceAudience () { + echo "" + echo "" + echo "======================================================================" + echo "======================================================================" + echo " Checking against hadoop InterfaceAudience." + echo "======================================================================" + echo "======================================================================" + echo "" + echo "" + warnings=`$GREP 'import org.apache.hadoop.classification' $PATCH_DIR/patch` + if [[ $warnings != "" ]]; then + JIRA_COMMENT="$JIRA_COMMENT + + {color:red}-1 InterfaceAudience{color}. The patch appears to contain InterfaceAudience from hadoop rather than hbase: + $warnings." + return 1 + fi + return 0 +} + ############################################################################### ### Check there are no javadoc warnings checkJavadocWarnings () { @@ -504,6 +571,31 @@ $JIRA_COMMENT_FOOTER" return 0 } +checkBuildWithHadoopVersions() { + echo "" + echo "" + echo "======================================================================" + echo "======================================================================" + echo " Building with all supported Hadoop versions ." + echo "======================================================================" + echo "======================================================================" + echo "" + echo "" + export MAVEN_OPTS="${MAVEN_OPTS}" + for HADOOP2_VERSION in $HADOOP2_VERSIONS ; do + echo "$MVN clean install -DskipTests -D${PROJECT_NAME}PatchProcess -Dhadoop-two.version=$HADOOP2_VERSION > $PATCH_DIR/patchJavacWithHadoop-$HADOOP2_VERSION.txt 2>&1" + $MVN clean install -DskipTests -D${PROJECT_NAME}PatchProcess -Dhadoop-two.version=$HADOOP2_VERSION > $PATCH_DIR/patchJavacWithHadoop-$HADOOP2_VERSION.txt 2>&1 + checkCompilationErrors $PATCH_DIR/patchJavacWithHadoop-$HADOOP2_VERSION.txt $HADOOP2_VERSION + done + + # TODO: add Hadoop3 versions and compilation here when we get the hadoop.profile=3.0 working + + JIRA_COMMENT="$JIRA_COMMENT + + {color:green}+1 hadoop versions{color}. The patch compiles with all supported hadoop versions ($HADOOP2_VERSIONS)" + return 0 +} + ############################################################################### ### Check there are no changes in the number of Javac warnings checkJavacWarnings () { @@ -596,7 +688,7 @@ checkProtocErrors () { checkProtocCompilationErrors $PATCH_DIR/patchProtocErrors.txt JIRA_COMMENT="$JIRA_COMMENT - {color:green}+1 javac{color}. The applied patch does not increase the total number of javac compiler warnings." + {color:green}+1 protoc{color}. The applied patch does not increase the total number of protoc compiler warnings." return 0 } @@ -665,41 +757,36 @@ checkFindbugsWarnings () { {color:red}-1 findbugs{color}. The patch appears to cause Findbugs (version ${findbugs_version}) to fail." return 1 fi - - findbugsWarnings=0 - for file in $(find $BASEDIR -name findbugsXml.xml) - do - relative_file=${file#$BASEDIR/} # strip leading $BASEDIR prefix - if [ ! $relative_file == "target/findbugsXml.xml" ]; then - module_suffix=${relative_file%/target/findbugsXml.xml} # strip trailing path - module_suffix=`basename ${module_suffix}` - fi - - cp $file $PATCH_DIR/patchFindbugsWarnings${module_suffix}.xml - $FINDBUGS_HOME/bin/setBugDatabaseInfo -timestamp "01/01/2000" \ - $PATCH_DIR/patchFindbugsWarnings${module_suffix}.xml \ - $PATCH_DIR/patchFindbugsWarnings${module_suffix}.xml - newFindbugsWarnings=`$FINDBUGS_HOME/bin/filterBugs -first "01/01/2000" $PATCH_DIR/patchFindbugsWarnings${module_suffix}.xml \ - $PATCH_DIR/newPatchFindbugsWarnings${module_suffix}.xml | $AWK '{print $1}'` - echo "Found $newFindbugsWarnings Findbugs warnings ($file)" - findbugsWarnings=$((findbugsWarnings+newFindbugsWarnings)) - echo "$FINDBUGS_HOME/bin/convertXmlToText -html $PATCH_DIR/newPatchFindbugsWarnings${module_suffix}.xml $PATCH_DIR/newPatchFindbugsWarnings${module_suffix}.html" - $FINDBUGS_HOME/bin/convertXmlToText -html $PATCH_DIR/newPatchFindbugsWarnings${module_suffix}.xml $PATCH_DIR/newPatchFindbugsWarnings${module_suffix}.html - file $PATCH_DIR/newPatchFindbugsWarnings${module_suffix}.xml $PATCH_DIR/newPatchFindbugsWarnings${module_suffix}.html - JIRA_COMMENT_FOOTER="Findbugs warnings: $BUILD_URL/artifact/patchprocess/newPatchFindbugsWarnings${module_suffix}.html -$JIRA_COMMENT_FOOTER" - done - ### if current warnings greater than OK_FINDBUGS_WARNINGS - if [[ $findbugsWarnings -gt $OK_FINDBUGS_WARNINGS ]] ; then + collectFindbugsReports patch $BASEDIR $PATCH_DIR + #this files are generated by collectFindbugsReports() named with its first argument + patch_xml=$PATCH_DIR/patchFindbugsWarnings.xml + trunk_xml=$PATCH_DIR/trunkFindbugsWarnings.xml + # combine them to one database + combined_xml=$PATCH_DIR/combinedFindbugsWarnings.xml + new_xml=$PATCH_DIR/newFindbugsWarnings.xml + new_html=$PATCH_DIR/newFindbugsWarnings.html + $FINDBUGS_HOME/bin/computeBugHistory -useAnalysisTimes -withMessages \ + -output $combined_xml $trunk_xml $patch_xml + findbugsWarnings=$($FINDBUGS_HOME/bin/filterBugs -first patch $combined_xml $new_xml) + findbugsFixedWarnings=$($FINDBUGS_HOME/bin/filterBugs -fixed patch $combined_xml $new_xml) + $FINDBUGS_HOME/bin/convertXmlToText -html $new_xml $new_html + file $new_xml $new_html + JIRA_COMMENT_FOOTER="Release Findbugs (version ${findbugs_version}) \ + warnings: $BUILD_URL/artifact/patchprocess/newFindbugsWarnings.html +$JIRA_COMMENT_FOOTER" + ### if current warnings greater than 0, fail + if [[ $findbugsWarnings -gt 0 ]] ; then JIRA_COMMENT="$JIRA_COMMENT - {color:red}-1 findbugs{color}. The patch appears to introduce `expr $(($findbugsWarnings-$OK_FINDBUGS_WARNINGS))` new Findbugs (version ${findbugs_version}) warnings." + {color:red}-1 findbugs{color}. The patch appears to introduce $findbugsWarnings \ + new Findbugs (version ${findbugs_version}) warnings." return 1 fi JIRA_COMMENT="$JIRA_COMMENT - {color:green}+1 findbugs{color}. The patch does not introduce any new Findbugs (version ${findbugs_version}) warnings." + {color:green}+1 findbugs{color}. The patch does not introduce any \ + new Findbugs (version ${findbugs_version}) warnings." return 0 } @@ -956,6 +1043,8 @@ fi checkAntiPatterns (( RESULT = RESULT + $? )) +checkBuildWithHadoopVersions +(( RESULT = RESULT + $? )) checkJavacWarnings (( RESULT = RESULT + $? )) checkProtocErrors @@ -964,6 +1053,8 @@ checkJavadocWarnings (( RESULT = RESULT + $? )) checkCheckstyleErrors (( RESULT = RESULT + $? )) +checkInterfaceAudience +(( RESULT = RESULT + $? )) checkFindbugsWarnings (( RESULT = RESULT + $? )) checkReleaseAuditWarnings diff --git a/hbase-annotations/src/main/asciidoc/.gitignore b/hbase-annotations/src/main/asciidoc/.gitignore deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/hbase-assembly/src/main/asciidoc/.gitignore b/hbase-assembly/src/main/asciidoc/.gitignore deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/hbase-checkstyle/src/main/asciidoc/.gitignore b/hbase-checkstyle/src/main/asciidoc/.gitignore deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/hbase-client/pom.xml b/hbase-client/pom.xml index 29fa4fe0446..84d4162ab8f 100644 --- a/hbase-client/pom.xml +++ b/hbase-client/pom.xml @@ -34,25 +34,6 @@ - - maven-compiler-plugin - - - default-compile - - ${java.default.compiler} - true - - - - default-testCompile - - ${java.default.compiler} - true - - - - org.apache.maven.plugins maven-site-plugin @@ -91,6 +72,36 @@ maven-source-plugin + + + + + org.eclipse.m2e + lifecycle-mapping + 1.0.0 + + + + + + org.apache.maven.plugins + maven-compiler-plugin + [3.2,) + + compile + + + + + + + + + + + + diff --git a/hbase-client/src/main/asciidoc/.gitignore b/hbase-client/src/main/asciidoc/.gitignore deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java index 2791a042992..75fa642bb59 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java @@ -118,6 +118,9 @@ public class ClusterStatus extends VersionedWritable { * @return the names of region servers on the dead list */ public Collection getDeadServerNames() { + if (deadServers == null) { + return Collections.emptyList(); + } return Collections.unmodifiableCollection(deadServers); } @@ -125,14 +128,14 @@ public class ClusterStatus extends VersionedWritable { * @return the number of region servers in the cluster */ public int getServersSize() { - return liveServers.size(); + return liveServers != null ? liveServers.size() : 0; } /** * @return the number of dead region servers in the cluster */ public int getDeadServers() { - return deadServers.size(); + return deadServers != null ? deadServers.size() : 0; } /** @@ -148,8 +151,10 @@ public class ClusterStatus extends VersionedWritable { */ public int getRegionsCount() { int count = 0; - for (Map.Entry e: this.liveServers.entrySet()) { - count += e.getValue().getNumberOfRegions(); + if (liveServers != null && !liveServers.isEmpty()) { + for (Map.Entry e: this.liveServers.entrySet()) { + count += e.getValue().getNumberOfRegions(); + } } return count; } @@ -159,8 +164,10 @@ public class ClusterStatus extends VersionedWritable { */ public int getRequestsCount() { int count = 0; - for (Map.Entry e: this.liveServers.entrySet()) { - count += e.getValue().getNumberOfRequests(); + if (liveServers != null && !liveServers.isEmpty()) { + for (Map.Entry e: this.liveServers.entrySet()) { + count += e.getValue().getNumberOfRequests(); + } } return count; } @@ -222,6 +229,9 @@ public class ClusterStatus extends VersionedWritable { } public Collection getServers() { + if (liveServers == null) { + return Collections.emptyList(); + } return Collections.unmodifiableCollection(this.liveServers.keySet()); } @@ -237,13 +247,16 @@ public class ClusterStatus extends VersionedWritable { * @return the number of backup masters in the cluster */ public int getBackupMastersSize() { - return this.backupMasters.size(); + return backupMasters != null ? backupMasters.size() : 0; } /** * @return the names of backup masters */ public Collection getBackupMasters() { + if (backupMasters == null) { + return Collections.emptyList(); + } return Collections.unmodifiableCollection(this.backupMasters); } @@ -252,7 +265,7 @@ public class ClusterStatus extends VersionedWritable { * @return Server's load or null if not found. */ public ServerLoad getLoad(final ServerName sn) { - return this.liveServers.get(sn); + return liveServers != null ? liveServers.get(sn) : null; } @InterfaceAudience.Private @@ -303,27 +316,41 @@ public class ClusterStatus extends VersionedWritable { public String toString() { StringBuilder sb = new StringBuilder(1024); sb.append("Master: " + master); - sb.append("\nNumber of backup masters: " + backupMasters.size()); - for (ServerName serverName: backupMasters) { - sb.append("\n " + serverName); + + int backupMastersSize = getBackupMastersSize(); + sb.append("\nNumber of backup masters: " + backupMastersSize); + if (backupMastersSize > 0) { + for (ServerName serverName: backupMasters) { + sb.append("\n " + serverName); + } } - sb.append("\nNumber of live region servers: " + liveServers.size()); - for (ServerName serverName: liveServers.keySet()) { - sb.append("\n " + serverName.getServerName()); + int serversSize = getServersSize(); + sb.append("\nNumber of live region servers: " + serversSize); + if (serversSize > 0) { + for (ServerName serverName: liveServers.keySet()) { + sb.append("\n " + serverName.getServerName()); + } } - sb.append("\nNumber of dead region servers: " + deadServers.size()); - for (ServerName serverName: deadServers) { - sb.append("\n " + serverName); + int deadServerSize = getDeadServers(); + sb.append("\nNumber of dead region servers: " + deadServerSize); + if (deadServerSize > 0) { + for (ServerName serverName: deadServers) { + sb.append("\n " + serverName); + } } sb.append("\nAverage load: " + getAverageLoad()); sb.append("\nNumber of requests: " + getRequestsCount()); sb.append("\nNumber of regions: " + getRegionsCount()); - sb.append("\nNumber of regions in transition: " + intransition.size()); - for (RegionState state: intransition.values()) { - sb.append("\n " + state.toDescriptiveString()); + + int ritSize = (intransition != null) ? intransition.size() : 0; + sb.append("\nNumber of regions in transition: " + ritSize); + if (ritSize > 0) { + for (RegionState state: intransition.values()) { + sb.append("\n " + state.toDescriptiveString()); + } } return sb.toString(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/CoprocessorEnvironment.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/CoprocessorEnvironment.java index 37f1a33ed71..362439af0ae 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/CoprocessorEnvironment.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/CoprocessorEnvironment.java @@ -20,7 +20,7 @@ import java.util.concurrent.ExecutorService; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.client.HTableInterface; +import org.apache.hadoop.hbase.client.Table; /** * Coprocessor environment state. @@ -50,14 +50,14 @@ public interface CoprocessorEnvironment { * @return an interface for accessing the given table * @throws IOException */ - HTableInterface getTable(TableName tableName) throws IOException; + Table getTable(TableName tableName) throws IOException; /** * @return an interface for accessing the given table using the passed executor to run batch * operations * @throws IOException */ - HTableInterface getTable(TableName tableName, ExecutorService service) throws IOException; + Table getTable(TableName tableName, ExecutorService service) throws IOException; /** * @return the classloader for the loaded coprocessor instance diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java index 13f8163a724..589488656c5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java @@ -742,7 +742,7 @@ public class HColumnDescriptor implements Comparable { String compressTagsStr = getValue(COMPRESS_TAGS); boolean compressTags = DEFAULT_COMPRESS_TAGS; if (compressTagsStr != null) { - compressTags = Boolean.valueOf(compressTagsStr); + compressTags = Boolean.parseBoolean(compressTagsStr); } return compressTags; } @@ -755,7 +755,7 @@ public class HColumnDescriptor implements Comparable { String compressTagsStr = getValue(COMPRESS_TAGS); boolean compressTags = DEFAULT_COMPRESS_TAGS; if (compressTagsStr != null) { - compressTags = Boolean.valueOf(compressTagsStr); + compressTags = Boolean.parseBoolean(compressTagsStr); } return compressTags; } @@ -786,8 +786,9 @@ public class HColumnDescriptor implements Comparable { */ public boolean isInMemory() { String value = getValue(HConstants.IN_MEMORY); - if (value != null) - return Boolean.valueOf(value).booleanValue(); + if (value != null) { + return Boolean.parseBoolean(value); + } return DEFAULT_IN_MEMORY; } @@ -835,7 +836,7 @@ public class HColumnDescriptor implements Comparable { */ public int getTimeToLive() { String value = getValue(TTL); - return (value != null)? Integer.valueOf(value).intValue(): DEFAULT_TTL; + return (value != null)? Integer.parseInt(value) : DEFAULT_TTL; } /** @@ -851,7 +852,7 @@ public class HColumnDescriptor implements Comparable { */ public int getMinVersions() { String value = getValue(MIN_VERSIONS); - return (value != null)? Integer.valueOf(value).intValue(): 0; + return (value != null)? Integer.parseInt(value) : 0; } /** @@ -869,8 +870,9 @@ public class HColumnDescriptor implements Comparable { */ public boolean isBlockCacheEnabled() { String value = getValue(BLOCKCACHE); - if (value != null) - return Boolean.valueOf(value).booleanValue(); + if (value != null) { + return Boolean.parseBoolean(value); + } return DEFAULT_BLOCKCACHE; } @@ -908,7 +910,7 @@ public class HColumnDescriptor implements Comparable { public int getScope() { byte[] value = getValue(REPLICATION_SCOPE_BYTES); if (value != null) { - return Integer.valueOf(Bytes.toString(value)); + return Integer.parseInt(Bytes.toString(value)); } return DEFAULT_REPLICATION_SCOPE; } @@ -974,7 +976,9 @@ public class HColumnDescriptor implements Comparable { private boolean setAndGetBoolean(final String key, final boolean defaultSetting) { String value = getValue(key); - if (value != null) return Boolean.valueOf(value).booleanValue(); + if (value != null) { + return Boolean.parseBoolean(value); + } return defaultSetting; } @@ -1213,7 +1217,7 @@ public class HColumnDescriptor implements Comparable { @Override public int hashCode() { int result = Bytes.hashCode(this.name); - result ^= Byte.valueOf(COLUMN_DESCRIPTOR_VERSION).hashCode(); + result ^= (int) COLUMN_DESCRIPTOR_VERSION; result ^= values.hashCode(); result ^= configuration.hashCode(); return result; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java index 51352bb7aa4..a0ab48461b9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java @@ -38,6 +38,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.client.Durability; +import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair; @@ -176,6 +177,14 @@ public class HTableDescriptor implements Comparable { private static final Bytes REGION_REPLICATION_KEY = new Bytes(Bytes.toBytes(REGION_REPLICATION)); + /** + * INTERNAL flag to indicate whether or not the memstore should be replicated + * for read-replicas (CONSISTENCY => TIMELINE). + */ + public static final String REGION_MEMSTORE_REPLICATION = "REGION_MEMSTORE_REPLICATION"; + private static final Bytes REGION_MEMSTORE_REPLICATION_KEY = + new Bytes(Bytes.toBytes(REGION_MEMSTORE_REPLICATION)); + /** Default durability for HTD is USE_DEFAULT, which defaults to HBase-global default value */ private static final Durability DEFAULT_DURABLITY = Durability.USE_DEFAULT; @@ -210,6 +219,8 @@ public class HTableDescriptor implements Comparable { public static final int DEFAULT_REGION_REPLICATION = 1; + public static final boolean DEFAULT_REGION_MEMSTORE_REPLICATION = true; + private final static Map DEFAULT_VALUES = new HashMap(); private final static Set RESERVED_KEYWORDS @@ -1073,6 +1084,31 @@ public class HTableDescriptor implements Comparable { return this; } + /** + * @return true if the read-replicas memstore replication is enabled. + */ + public boolean hasRegionMemstoreReplication() { + return isSomething(REGION_MEMSTORE_REPLICATION_KEY, DEFAULT_REGION_MEMSTORE_REPLICATION); + } + + /** + * Enable or Disable the memstore replication from the primary region to the replicas. + * The replication will be used only for meta operations (e.g. flush, compaction, ...) + * + * @param memstoreReplication true if the new data written to the primary region + * should be replicated. + * false if the secondaries can tollerate to have new + * data only when the primary flushes the memstore. + */ + public HTableDescriptor setRegionMemstoreReplication(boolean memstoreReplication) { + setValue(REGION_MEMSTORE_REPLICATION_KEY, memstoreReplication ? TRUE : FALSE); + // If the memstore replication is setup, we do not have to wait for observing a flush event + // from primary before starting to serve reads, because gaps from replication is not applicable + setConfiguration(RegionReplicaUtil.REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY, + Boolean.toString(memstoreReplication)); + return this; + } + /** * Returns all the column family names of the current table. The map of * HTableDescriptor contains mapping of family name to HColumnDescriptors. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java index 2e6723a64aa..ea29e4f41c9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java @@ -17,7 +17,9 @@ */ package org.apache.hadoop.hbase; +import javax.annotation.Nonnull; import javax.annotation.Nullable; +import java.io.Closeable; import java.io.IOException; import java.io.InterruptedIOException; import java.util.ArrayList; @@ -38,12 +40,11 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.Consistency; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionLocator; @@ -60,6 +61,7 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.util.ExceptionUtil; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.PairOfSameType; import org.apache.hadoop.hbase.util.Threads; @@ -118,6 +120,7 @@ public class MetaTableAccessor { */ private static final Log LOG = LogFactory.getLog(MetaTableAccessor.class); + private static final Log METALOG = LogFactory.getLog("org.apache.hadoop.hbase.META"); static final byte [] META_REGION_PREFIX; static { @@ -129,6 +132,37 @@ public class MetaTableAccessor { META_REGION_PREFIX, 0, len); } + /** + * Lists all of the table regions currently in META. + * Deprecated, keep there until some test use this. + * @param connection what we will use + * @param tableName table to list + * @return Map of all user-space regions to servers + * @throws java.io.IOException + * @deprecated use {@link #getTableRegionsAndLocations}, region can have multiple locations + */ + @Deprecated + public static NavigableMap allTableRegions( + Connection connection, final TableName tableName) throws IOException { + final NavigableMap regions = + new TreeMap(); + Visitor visitor = new TableVisitorBase(tableName) { + @Override + public boolean visitInternal(Result result) throws IOException { + RegionLocations locations = getRegionLocations(result); + if (locations == null) return true; + for (HRegionLocation loc : locations.getRegionLocations()) { + if (loc != null) { + HRegionInfo regionInfo = loc.getRegionInfo(); + regions.put(regionInfo, loc.getServerName()); + } + } + return true; + } + }; + scanMetaForTableRegions(connection, visitor, tableName); + return regions; + } @InterfaceAudience.Private public enum QueryType { @@ -167,7 +201,7 @@ public class MetaTableAccessor { public static void fullScanRegions(Connection connection, final Visitor visitor) throws IOException { - fullScan(connection, visitor, null, QueryType.REGION); + scanMeta(connection, null, null, QueryType.REGION, visitor); } /** @@ -189,20 +223,7 @@ public class MetaTableAccessor { public static void fullScanTables(Connection connection, final Visitor visitor) throws IOException { - fullScan(connection, visitor, null, QueryType.TABLE); - } - - /** - * Performs a full scan of hbase:meta. - * @param connection connection we're using - * @param visitor Visitor invoked against each row. - * @param type scanned part of meta - * @throws IOException - */ - public static void fullScan(Connection connection, - final Visitor visitor, QueryType type) - throws IOException { - fullScan(connection, visitor, null, type); + scanMeta(connection, null, null, QueryType.TABLE, visitor); } /** @@ -215,7 +236,7 @@ public class MetaTableAccessor { public static List fullScan(Connection connection, QueryType type) throws IOException { CollectAllVisitor v = new CollectAllVisitor(); - fullScan(connection, v, null, type); + scanMeta(connection, null, null, type, v); return v.getResults(); } @@ -228,21 +249,10 @@ public class MetaTableAccessor { static Table getMetaHTable(final Connection connection) throws IOException { // We used to pass whole CatalogTracker in here, now we just pass in Connection - if (connection == null || connection.isClosed()) { + if (connection == null) { throw new NullPointerException("No connection"); - } - // If the passed in 'connection' is 'managed' -- i.e. every second test uses - // a Table or an HBaseAdmin with managed connections -- then doing - // connection.getTable will throw an exception saying you are NOT to use - // managed connections getting tables. Leaving this as it is for now. Will - // revisit when inclined to change all tests. User code probaby makes use of - // managed connections too so don't change it till post hbase 1.0. - // - // There should still be a way to use this method with an unmanaged connection. - if (connection instanceof ClusterConnection) { - if (((ClusterConnection) connection).isManaged()) { - return new HTable(TableName.META_TABLE_NAME, connection); - } + } else if (connection.isClosed()) { + throw new IOException("connection is closed"); } return connection.getTable(TableName.META_TABLE_NAME); } @@ -385,6 +395,28 @@ public class MetaTableAccessor { || getTableState(connection, tableName) != null; } + /** + * Lists all of the regions currently in META. + * + * @param connection to connect with + * @param excludeOfflinedSplitParents False if we are to include offlined/splitparents regions, + * true and we'll leave out offlined regions from returned list + * @return List of all user-space regions. + * @throws IOException + */ + @VisibleForTesting + public static List getAllRegions(Connection connection, + boolean excludeOfflinedSplitParents) + throws IOException { + List> result; + + result = getTableRegionsAndLocations(connection, null, + excludeOfflinedSplitParents); + + return getListOfHRegionInfos(result); + + } + /** * Gets all of the regions of the specified table. Do not use this method * to get meta table regions, use methods in MetaTableLocator instead. @@ -441,15 +473,52 @@ public class MetaTableAccessor { /** * @param tableName table we're working with - * @return Place to start Scan in hbase:meta when passed a - * tableName; returns <tableName&rt; <,&rt; <,&rt; + * @return start row for scanning META according to query type */ - static byte [] getTableStartRowForMeta(TableName tableName) { - byte [] startRow = new byte[tableName.getName().length + 2]; - System.arraycopy(tableName.getName(), 0, startRow, 0, tableName.getName().length); - startRow[startRow.length - 2] = HConstants.DELIMITER; - startRow[startRow.length - 1] = HConstants.DELIMITER; - return startRow; + public static byte[] getTableStartRowForMeta(TableName tableName, QueryType type) { + if (tableName == null) { + return null; + } + switch (type) { + case REGION: + byte[] startRow = new byte[tableName.getName().length + 2]; + System.arraycopy(tableName.getName(), 0, startRow, 0, tableName.getName().length); + startRow[startRow.length - 2] = HConstants.DELIMITER; + startRow[startRow.length - 1] = HConstants.DELIMITER; + return startRow; + case ALL: + case TABLE: + default: + return tableName.getName(); + } + } + + /** + * @param tableName table we're working with + * @return stop row for scanning META according to query type + */ + public static byte[] getTableStopRowForMeta(TableName tableName, QueryType type) { + if (tableName == null) { + return null; + } + final byte[] stopRow; + switch (type) { + case REGION: + stopRow = new byte[tableName.getName().length + 3]; + System.arraycopy(tableName.getName(), 0, stopRow, 0, tableName.getName().length); + stopRow[stopRow.length - 3] = ' '; + stopRow[stopRow.length - 2] = HConstants.DELIMITER; + stopRow[stopRow.length - 1] = HConstants.DELIMITER; + break; + case ALL: + case TABLE: + default: + stopRow = new byte[tableName.getName().length + 1]; + System.arraycopy(tableName.getName(), 0, stopRow, 0, tableName.getName().length); + stopRow[stopRow.length - 1] = ' '; + break; + } + return stopRow; } /** @@ -461,18 +530,39 @@ public class MetaTableAccessor { * @param tableName bytes of table's name * @return configured Scan object */ - public static Scan getScanForTableName(TableName tableName) { - String strName = tableName.getNameAsString(); + @Deprecated + public static Scan getScanForTableName(Connection connection, TableName tableName) { // Start key is just the table name with delimiters - byte[] startKey = Bytes.toBytes(strName + ",,"); + byte[] startKey = getTableStartRowForMeta(tableName, QueryType.REGION); // Stop key appends the smallest possible char to the table name - byte[] stopKey = Bytes.toBytes(strName + " ,,"); + byte[] stopKey = getTableStopRowForMeta(tableName, QueryType.REGION); - Scan scan = new Scan(startKey); + Scan scan = getMetaScan(connection); + scan.setStartRow(startKey); scan.setStopRow(stopKey); return scan; } + private static Scan getMetaScan(Connection connection) { + return getMetaScan(connection, Integer.MAX_VALUE); + } + + private static Scan getMetaScan(Connection connection, int rowUpperLimit) { + Scan scan = new Scan(); + int scannerCaching = connection.getConfiguration() + .getInt(HConstants.HBASE_META_SCANNER_CACHING, + HConstants.DEFAULT_HBASE_META_SCANNER_CACHING); + if (connection.getConfiguration().getBoolean(HConstants.USE_META_REPLICAS, + HConstants.DEFAULT_USE_META_REPLICAS)) { + scan.setConsistency(Consistency.TIMELINE); + } + if (rowUpperLimit <= scannerCaching) { + scan.setSmall(true); + } + int rows = Math.min(rowUpperLimit, scannerCaching); + scan.setCaching(rows); + return scan; + } /** * Do not use this method to get meta table regions, use methods in MetaTableLocator instead. * @param connection connection we're using @@ -489,15 +579,15 @@ public class MetaTableAccessor { /** * Do not use this method to get meta table regions, use methods in MetaTableLocator instead. * @param connection connection we're using - * @param tableName table to work with + * @param tableName table to work with, can be null for getting all regions * @param excludeOfflinedSplitParents don't return split parents * @return Return list of regioninfos and server addresses. * @throws IOException */ public static List> getTableRegionsAndLocations( - Connection connection, final TableName tableName, + Connection connection, @Nullable final TableName tableName, final boolean excludeOfflinedSplitParents) throws IOException { - if (tableName.equals(TableName.META_TABLE_NAME)) { + if (tableName != null && tableName.equals(TableName.META_TABLE_NAME)) { throw new IOException("This method can't be used to locate meta regions;" + " use MetaTableLocator instead"); } @@ -514,7 +604,6 @@ public class MetaTableAccessor { return true; } HRegionInfo hri = current.getRegionLocation().getRegionInfo(); - if (!isInsideTable(hri, tableName)) return false; if (excludeOfflinedSplitParents && hri.isSplitParent()) return true; // Else call super and add this Result to the collection. return super.visit(r); @@ -533,7 +622,10 @@ public class MetaTableAccessor { } } }; - fullScan(connection, visitor, getTableStartRowForMeta(tableName), QueryType.REGION); + scanMeta(connection, + getTableStartRowForMeta(tableName, QueryType.REGION), + getTableStopRowForMeta(tableName, QueryType.REGION), + QueryType.REGION, visitor); return visitor.getResults(); } @@ -565,7 +657,7 @@ public class MetaTableAccessor { } } }; - fullScan(connection, v, QueryType.REGION); + scanMeta(connection, null, null, QueryType.REGION, v); return hris; } @@ -591,62 +683,140 @@ public class MetaTableAccessor { return true; } }; - fullScan(connection, v, QueryType.ALL); + scanMeta(connection, null, null, QueryType.ALL, v); + } + + public static void scanMetaForTableRegions(Connection connection, + Visitor visitor, TableName tableName) throws IOException { + scanMeta(connection, tableName, QueryType.REGION, Integer.MAX_VALUE, visitor); + } + + public static void scanMeta(Connection connection, TableName table, + QueryType type, int maxRows, final Visitor visitor) throws IOException { + scanMeta(connection, getTableStartRowForMeta(table, type), getTableStopRowForMeta(table, type), + type, maxRows, visitor); + } + + public static void scanMeta(Connection connection, + @Nullable final byte[] startRow, @Nullable final byte[] stopRow, + QueryType type, final Visitor visitor) throws IOException { + scanMeta(connection, startRow, stopRow, type, Integer.MAX_VALUE, visitor); } /** - * Performs a full scan of a catalog table. + * Performs a scan of META table for given table starting from + * given row. + * * @param connection connection we're using - * @param visitor Visitor invoked against each row. - * @param startrow Where to start the scan. Pass null if want to begin scan - * at first row. - * @param type scanned part of meta - * hbase:meta, the default (pass false to scan hbase:meta) + * @param visitor visitor to call + * @param tableName table withing we scan + * @param row start scan from this row + * @param rowLimit max number of rows to return * @throws IOException */ - public static void fullScan(Connection connection, - final Visitor visitor, @Nullable final byte[] startrow, QueryType type) throws IOException { - fullScan(connection, visitor, startrow, type, false); - } + public static void scanMeta(Connection connection, + final Visitor visitor, final TableName tableName, + final byte[] row, final int rowLimit) + throws IOException { - /** - * Performs a full scan of a catalog table. - * @param connection connection we're using - * @param visitor Visitor invoked against each row. - * @param startrow Where to start the scan. Pass null if want to begin scan - * at first row. - * @param type scanned part of meta - * @param raw read raw data including Delete tumbstones - * hbase:meta, the default (pass false to scan hbase:meta) - * @throws IOException - */ - public static void fullScan(Connection connection, - final Visitor visitor, @Nullable final byte[] startrow, QueryType type, boolean raw) - throws IOException { - Scan scan = new Scan(); - scan.setRaw(raw); - if (startrow != null) scan.setStartRow(startrow); - if (startrow == null) { - int caching = connection.getConfiguration() - .getInt(HConstants.HBASE_META_SCANNER_CACHING, 100); - scan.setCaching(caching); + byte[] startRow = null; + byte[] stopRow = null; + if (tableName != null) { + startRow = + getTableStartRowForMeta(tableName, QueryType.REGION); + if (row != null) { + HRegionInfo closestRi = + getClosestRegionInfo(connection, tableName, row); + startRow = HRegionInfo + .createRegionName(tableName, closestRi.getStartKey(), HConstants.ZEROES, false); + } + stopRow = + getTableStopRowForMeta(tableName, QueryType.REGION); } + scanMeta(connection, startRow, stopRow, QueryType.REGION, rowLimit, visitor); + } + + + /** + * Performs a scan of META table. + * @param connection connection we're using + * @param startRow Where to start the scan. Pass null if want to begin scan + * at first row. + * @param stopRow Where to stop the scan. Pass null if want to scan all rows + * from the start one + * @param type scanned part of meta + * @param maxRows maximum rows to return + * @param visitor Visitor invoked against each row. + * @throws IOException + */ + public static void scanMeta(Connection connection, + @Nullable final byte[] startRow, @Nullable final byte[] stopRow, + QueryType type, int maxRows, final Visitor visitor) + throws IOException { + int rowUpperLimit = maxRows > 0 ? maxRows : Integer.MAX_VALUE; + Scan scan = getMetaScan(connection, rowUpperLimit); + for (byte[] family : type.getFamilies()) { scan.addFamily(family); } - Table metaTable = getMetaHTable(connection); - ResultScanner scanner = null; - try { - scanner = metaTable.getScanner(scan); - Result data; - while((data = scanner.next()) != null) { - if (data.isEmpty()) continue; - // Break if visit returns false. - if (!visitor.visit(data)) break; + if (startRow != null) scan.setStartRow(startRow); + if (stopRow != null) scan.setStopRow(stopRow); + + if (LOG.isTraceEnabled()) { + LOG.trace("Scanning META" + + " starting at row=" + Bytes.toStringBinary(startRow) + + " stopping at row=" + Bytes.toStringBinary(stopRow) + + " for max=" + rowUpperLimit + + " with caching=" + scan.getCaching()); + } + + int currentRow = 0; + try (Table metaTable = getMetaHTable(connection)) { + try (ResultScanner scanner = metaTable.getScanner(scan)) { + Result data; + while ((data = scanner.next()) != null) { + if (data.isEmpty()) continue; + // Break if visit returns false. + if (!visitor.visit(data)) break; + if (++currentRow >= rowUpperLimit) break; + } } - } finally { - if (scanner != null) scanner.close(); - metaTable.close(); + } + if (visitor != null && visitor instanceof Closeable) { + try { + ((Closeable) visitor).close(); + } catch (Throwable t) { + ExceptionUtil.rethrowIfInterrupt(t); + LOG.debug("Got exception in closing the meta scanner visitor", t); + } + } + } + + /** + * @return Get closest metatable region row to passed row + * @throws java.io.IOException + */ + @Nonnull + public static HRegionInfo getClosestRegionInfo(Connection connection, + @Nonnull final TableName tableName, + @Nonnull final byte[] row) + throws IOException { + byte[] searchRow = HRegionInfo.createRegionName(tableName, row, HConstants.NINES, false); + Scan scan = getMetaScan(connection, 1); + scan.setReversed(true); + scan.setStartRow(searchRow); + try (ResultScanner resultScanner = getMetaHTable(connection).getScanner(scan)) { + Result result = resultScanner.next(); + if (result == null) { + throw new TableNotFoundException("Cannot find row in META " + + " for table: " + tableName + ", row=" + Bytes.toStringBinary(row)); + } + HRegionInfo regionInfo = getHRegionInfo(result); + if (regionInfo == null) { + throw new IOException("HRegionInfo was null or empty in Meta for " + + tableName + ", row=" + Bytes.toStringBinary(row)); + } + return regionInfo; } } @@ -976,6 +1146,12 @@ public class MetaTableAccessor { boolean visit(final Result r) throws IOException; } + /** + * Implementations 'visit' a catalog table row but with close() at the end. + */ + public interface CloseableVisitor extends Visitor, Closeable { + } + /** * A {@link Visitor} that collects content out of passed {@link Result}. */ @@ -1009,6 +1185,59 @@ public class MetaTableAccessor { } } + /** + * A Visitor that skips offline regions and split parents + */ + public static abstract class DefaultVisitorBase implements Visitor { + + public DefaultVisitorBase() { + super(); + } + + public abstract boolean visitInternal(Result rowResult) throws IOException; + + @Override + public boolean visit(Result rowResult) throws IOException { + HRegionInfo info = getHRegionInfo(rowResult); + if (info == null) { + return true; + } + + //skip over offline and split regions + if (!(info.isOffline() || info.isSplit())) { + return visitInternal(rowResult); + } + return true; + } + } + + /** + * A Visitor for a table. Provides a consistent view of the table's + * hbase:meta entries during concurrent splits (see HBASE-5986 for details). This class + * does not guarantee ordered traversal of meta entries, and can block until the + * hbase:meta entries for daughters are available during splits. + */ + public static abstract class TableVisitorBase extends DefaultVisitorBase { + private TableName tableName; + + public TableVisitorBase(TableName tableName) { + super(); + this.tableName = tableName; + } + + @Override + public final boolean visit(Result rowResult) throws IOException { + HRegionInfo info = getHRegionInfo(rowResult); + if (info == null) { + return true; + } + if (!(info.getTable().equals(tableName))) { + return false; + } + return super.visit(rowResult); + } + } + /** * Count regions in hbase:meta for passed table. * @param c Configuration object @@ -1113,6 +1342,9 @@ public class MetaTableAccessor { */ private static void put(final Table t, final Put p) throws IOException { try { + if (METALOG.isDebugEnabled()) { + METALOG.debug(mutationToString(p)); + } t.put(p); } finally { t.close(); @@ -1129,6 +1361,9 @@ public class MetaTableAccessor { throws IOException { Table t = getMetaHTable(connection); try { + if (METALOG.isDebugEnabled()) { + METALOG.debug(mutationsToString(ps)); + } t.put(ps); } finally { t.close(); @@ -1158,6 +1393,9 @@ public class MetaTableAccessor { throws IOException { Table t = getMetaHTable(connection); try { + if (METALOG.isDebugEnabled()) { + METALOG.debug(mutationsToString(deletes)); + } t.delete(deletes); } finally { t.close(); @@ -1202,7 +1440,10 @@ public class MetaTableAccessor { throws IOException { Table t = getMetaHTable(connection); try { - t.batch(mutations); + if (METALOG.isDebugEnabled()) { + METALOG.debug(mutationsToString(mutations)); + } + t.batch(mutations, new Object[mutations.size()]); } catch (InterruptedException e) { InterruptedIOException ie = new InterruptedIOException(e.getMessage()); ie.initCause(e); @@ -1253,6 +1494,9 @@ public class MetaTableAccessor { Put put = makePutFromRegionInfo(regionInfo); addDaughtersToPut(put, splitA, splitB); meta.put(put); + if (METALOG.isDebugEnabled()) { + METALOG.debug(mutationToString(put)); + } if (LOG.isDebugEnabled()) { LOG.debug("Added " + regionInfo.getRegionNameAsString()); } @@ -1464,6 +1708,9 @@ public class MetaTableAccessor { CoprocessorRpcChannel channel = table.coprocessorService(row); MultiRowMutationProtos.MutateRowsRequest.Builder mmrBuilder = MultiRowMutationProtos.MutateRowsRequest.newBuilder(); + if (METALOG.isDebugEnabled()) { + METALOG.debug(mutationsToString(mutations)); + } for (Mutation mutation : mutations) { if (mutation instanceof Put) { mmrBuilder.addMutationRequest(ProtobufUtil.toMutation( @@ -1659,4 +1906,28 @@ public class MetaTableAccessor { p.addImmutable(getCatalogFamily(), getSeqNumColumn(replicaId), now, null); return p; } + + private static String mutationsToString(Mutation ... mutations) throws IOException { + StringBuilder sb = new StringBuilder(); + String prefix = ""; + for (Mutation mutation : mutations) { + sb.append(prefix).append(mutationToString(mutation)); + prefix = ", "; + } + return sb.toString(); + } + + private static String mutationsToString(List mutations) throws IOException { + StringBuilder sb = new StringBuilder(); + String prefix = ""; + for (Mutation mutation : mutations) { + sb.append(prefix).append(mutationToString(mutation)); + prefix = ", "; + } + return sb.toString(); + } + + private static String mutationToString(Mutation p) throws IOException { + return p.getClass().getSimpleName() + p.toJSON(); + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java index 794e8b217fa..a6e846eb28c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java @@ -20,9 +20,12 @@ package org.apache.hadoop.hbase; +import java.util.List; + import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos; +import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Strings; @@ -153,6 +156,13 @@ public class RegionLoad { return regionLoadPB.getCompleteSequenceId(); } + /** + * @return completed sequence id per store. + */ + public List getStoreCompleteSequenceId() { + return regionLoadPB.getStoreCompleteSequenceIdList(); + } + /** * @return the uncompressed size of the storefiles in MB. */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLocations.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLocations.java index 2a3156b47a0..211de17f242 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLocations.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLocations.java @@ -21,6 +21,7 @@ package org.apache.hadoop.hbase; import java.util.Collection; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.util.Bytes; /** @@ -200,9 +201,15 @@ public class RegionLocations { // in case of region replication going down, we might have a leak here. int max = other.locations.length; + HRegionInfo regionInfo = null; for (int i = 0; i < max; i++) { HRegionLocation thisLoc = this.getRegionLocation(i); HRegionLocation otherLoc = other.getRegionLocation(i); + if (regionInfo == null && otherLoc != null && otherLoc.getRegionInfo() != null) { + // regionInfo is the first non-null HRI from other RegionLocations. We use it to ensure that + // all replica region infos belong to the same region with same region id. + regionInfo = otherLoc.getRegionInfo(); + } HRegionLocation selectedLoc = selectRegionLocation(thisLoc, otherLoc, true, false); @@ -218,6 +225,18 @@ public class RegionLocations { } } + // ensure that all replicas share the same start code. Otherwise delete them + if (newLocations != null && regionInfo != null) { + for (int i=0; i < newLocations.length; i++) { + if (newLocations[i] != null) { + if (!RegionReplicaUtil.isReplicasForSameRegion(regionInfo, + newLocations[i].getRegionInfo())) { + newLocations[i] = null; + } + } + } + } + return newLocations == null ? this : new RegionLocations(newLocations); } @@ -264,6 +283,15 @@ public class RegionLocations { HRegionLocation[] newLocations = new HRegionLocation[Math.max(locations.length, replicaId +1)]; System.arraycopy(locations, 0, newLocations, 0, locations.length); newLocations[replicaId] = location; + // ensure that all replicas share the same start code. Otherwise delete them + for (int i=0; i < newLocations.length; i++) { + if (newLocations[i] != null) { + if (!RegionReplicaUtil.isReplicasForSameRegion(location.getRegionInfo(), + newLocations[i].getRegionInfo())) { + newLocations[i] = null; + } + } + } return new RegionLocations(newLocations); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java index 9141659b514..7813b4aa65c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java @@ -28,8 +28,11 @@ import java.util.TreeSet; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor; +import org.apache.hadoop.hbase.replication.ReplicationLoadSink; +import org.apache.hadoop.hbase.replication.ReplicationLoadSource; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Strings; @@ -52,7 +55,7 @@ public class ServerLoad { private int totalStaticBloomSizeKB = 0; private long totalCompactingKVs = 0; private long currentCompactedKVs = 0; - + public ServerLoad(ClusterStatusProtos.ServerLoad serverLoad) { this.serverLoad = serverLoad; for (ClusterStatusProtos.RegionLoad rl: serverLoad.getRegionLoadsList()) { @@ -70,7 +73,7 @@ public class ServerLoad { totalCompactingKVs += rl.getTotalCompactingKVs(); currentCompactedKVs += rl.getCurrentCompactedKVs(); } - + } // NOTE: Function name cannot start with "get" because then an OpenDataException is thrown because @@ -177,6 +180,26 @@ public class ServerLoad { return serverLoad.getInfoServerPort(); } + /** + * Call directly from client such as hbase shell + * @return the list of ReplicationLoadSource + */ + public List getReplicationLoadSourceList() { + return ProtobufUtil.toReplicationLoadSourceList(serverLoad.getReplLoadSourceList()); + } + + /** + * Call directly from client such as hbase shell + * @return ReplicationLoadSink + */ + public ReplicationLoadSink getReplicationLoadSink() { + if (serverLoad.hasReplLoadSink()) { + return ProtobufUtil.toReplicationLoadSink(serverLoad.getReplLoadSink()); + } else { + return null; + } + } + /** * Originally, this method factored in the effect of requests going to the * server as well. However, this does not interact very well with the current diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractClientScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractClientScanner.java index 54c97d7ed95..dc325a3cfe1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractClientScanner.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractClientScanner.java @@ -23,14 +23,12 @@ import java.util.Iterator; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.metrics.ScanMetrics; -import org.apache.hadoop.hbase.util.Bytes; /** * Helper class for custom client scanners. */ @InterfaceAudience.Private public abstract class AbstractClientScanner implements ResultScanner { - protected ScanMetrics scanMetrics; /** @@ -38,14 +36,19 @@ public abstract class AbstractClientScanner implements ResultScanner { */ protected void initScanMetrics(Scan scan) { // check if application wants to collect scan metrics - byte[] enableMetrics = scan.getAttribute( - Scan.SCAN_ATTRIBUTES_METRICS_ENABLE); - if (enableMetrics != null && Bytes.toBoolean(enableMetrics)) { + if (scan.isScanMetricsEnabled()) { scanMetrics = new ScanMetrics(); } } - // TODO: should this be at ResultScanner? ScanMetrics is not public API it seems. + /** + * Used internally accumulating metrics on scan. To + * enable collection of metrics on a Scanner, call {@link Scan#setScanMetricsEnabled(boolean)}. + * These metrics are cleared at key transition points. Metrics are accumulated in the + * {@link Scan} object itself. + * @see Scan#getScanMetrics() + * @return Returns the running {@link ScanMetrics} instance or null if scan metrics not enabled. + */ public ScanMetrics getScanMetrics() { return scanMetrics; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index 81e983e2d0b..fcefc401dac 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -702,6 +702,13 @@ public interface Admin extends Abortable, Closeable { */ boolean balancer() throws IOException; + /** + * Query the current state of the balancer + * + * @return true if the balancer is enabled, false otherwise. + */ + boolean isBalancerEnabled() throws IOException; + /** * Enable/Disable the catalog janitor * diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java index 1900a250f53..10f23303dbf 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java @@ -744,7 +744,7 @@ class AsyncProcess { private final Batch.Callback callback; private final BatchErrors errors; - private final ConnectionManager.ServerErrorTracker errorsByServer; + private final ConnectionImplementation.ServerErrorTracker errorsByServer; private final ExecutorService pool; private final Set> callsInProgress; @@ -1743,8 +1743,8 @@ class AsyncProcess { * We may benefit from connection-wide tracking of server errors. * @return ServerErrorTracker to use, null if there is no ServerErrorTracker on this connection */ - protected ConnectionManager.ServerErrorTracker createServerErrorTracker() { - return new ConnectionManager.ServerErrorTracker( + protected ConnectionImplementation.ServerErrorTracker createServerErrorTracker() { + return new ConnectionImplementation.ServerErrorTracker( this.serverTrackerTimeout, this.numTries); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutator.java index 3b91078edfc..4424cece700 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutator.java @@ -60,7 +60,7 @@ import java.util.List; * @see Connection * @since 1.0.0 */ -@InterfaceAudience.Private +@InterfaceAudience.Public @InterfaceStability.Evolving public interface BufferedMutator extends Closeable { /** @@ -122,6 +122,8 @@ public interface BufferedMutator extends Closeable { /** * Listens for asynchronous exceptions on a {@link BufferedMutator}. */ + @InterfaceAudience.Public + @InterfaceStability.Evolving interface ExceptionListener { public void onException(RetriesExhaustedWithDetailsException exception, BufferedMutator mutator) throws RetriesExhaustedWithDetailsException; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java index 54e7ccd41a1..249edec3d5f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java @@ -42,7 +42,7 @@ import java.util.concurrent.TimeUnit; * @since 1.0.0 */ @InterfaceAudience.Private -@InterfaceStability.Stable +@InterfaceStability.Evolving public class BufferedMutatorImpl implements BufferedMutator { private static final Log LOG = LogFactory.getLog(BufferedMutatorImpl.class); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java index d31642a88d4..ccd8c2d9033 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java @@ -19,7 +19,10 @@ package org.apache.hadoop.hbase.client; import java.io.IOException; import java.io.InterruptedIOException; +import java.util.ArrayList; +import java.util.Arrays; import java.util.LinkedList; +import java.util.List; import java.util.concurrent.ExecutorService; import org.apache.commons.logging.Log; @@ -44,8 +47,6 @@ import org.apache.hadoop.hbase.util.Bytes; import com.google.common.annotations.VisibleForTesting; -import static org.apache.hadoop.hbase.client.ReversedClientScanner.createClosestRowBefore; - /** * Implements the scanner interface for the HBase client. * If there are multiple regions in a table, this scanner will iterate @@ -54,6 +55,9 @@ import static org.apache.hadoop.hbase.client.ReversedClientScanner.createClosest @InterfaceAudience.Private public class ClientScanner extends AbstractClientScanner { private final Log LOG = LogFactory.getLog(this.getClass()); + // A byte array in which all elements are the max byte, and it is used to + // construct closest front row + static byte[] MAX_BYTE_ARRAY = Bytes.createMaxByteArray(9); protected Scan scan; protected boolean closed = false; // Current region scanner is against. Gets cleared if current region goes @@ -61,6 +65,18 @@ public class ClientScanner extends AbstractClientScanner { protected HRegionInfo currentRegion = null; protected ScannerCallableWithReplicas callable = null; protected final LinkedList cache = new LinkedList(); + /** + * A list of partial results that have been returned from the server. This list should only + * contain results if this scanner does not have enough partial results to form the complete + * result. + */ + protected final LinkedList partialResults = new LinkedList(); + /** + * The row for which we are accumulating partial Results (i.e. the row of the Results stored + * inside partialResults). Changes to partialResultsRow and partialResults are kept in sync + * via the methods {@link #addToPartialResults(Result)} and {@link #clearPartialResults()} + */ + protected byte[] partialResultsRow = null; protected final int caching; protected long lastNext; // Keep lastResult returned successfully in case we have to reset scanner. @@ -285,7 +301,7 @@ public class ClientScanner extends AbstractClientScanner { return callable.isAnyRPCcancelled(); } - static Result[] call(ScannerCallableWithReplicas callable, + Result[] call(ScannerCallableWithReplicas callable, RpcRetryingCaller caller, int scannerTimeout) throws IOException, RuntimeException { if (Thread.interrupted()) { @@ -317,9 +333,9 @@ public class ClientScanner extends AbstractClientScanner { * machine; for scan/map reduce scenarios, we will have multiple scans running at the same time. * * By default, scan metrics are disabled; if the application wants to collect them, this - * behavior can be turned on by calling calling: - * - * scan.setAttribute(SCAN_ATTRIBUTES_METRICS_ENABLE, Bytes.toBytes(Boolean.TRUE)) + * behavior can be turned on by calling calling {@link Scan#setScanMetricsEnabled(boolean)} + * + *

This invocation clears the scan metrics. Metrics are aggregated in the Scan instance. */ protected void writeScanMetrics() { if (this.scanMetrics == null || scanMetricsPublished) { @@ -337,112 +353,7 @@ public class ClientScanner extends AbstractClientScanner { return null; } if (cache.size() == 0) { - Result [] values = null; - long remainingResultSize = maxScannerResultSize; - int countdown = this.caching; - // We need to reset it if it's a new callable that was created - // with a countdown in nextScanner - callable.setCaching(this.caching); - // This flag is set when we want to skip the result returned. We do - // this when we reset scanner because it split under us. - boolean retryAfterOutOfOrderException = true; - do { - try { - // Server returns a null values if scanning is to stop. Else, - // returns an empty array if scanning is to go on and we've just - // exhausted current region. - values = call(callable, caller, scannerTimeout); - // When the replica switch happens, we need to do certain operations - // again. The callable will openScanner with the right startkey - // but we need to pick up from there. Bypass the rest of the loop - // and let the catch-up happen in the beginning of the loop as it - // happens for the cases where we see exceptions. Since only openScanner - // would have happened, values would be null - if (values == null && callable.switchedToADifferentReplica()) { - this.currentRegion = callable.getHRegionInfo(); - continue; - } - retryAfterOutOfOrderException = true; - } catch (DoNotRetryIOException e) { - // DNRIOEs are thrown to make us break out of retries. Some types of DNRIOEs want us - // to reset the scanner and come back in again. - if (e instanceof UnknownScannerException) { - long timeout = lastNext + scannerTimeout; - // If we are over the timeout, throw this exception to the client wrapped in - // a ScannerTimeoutException. Else, it's because the region moved and we used the old - // id against the new region server; reset the scanner. - if (timeout < System.currentTimeMillis()) { - long elapsed = System.currentTimeMillis() - lastNext; - ScannerTimeoutException ex = new ScannerTimeoutException( - elapsed + "ms passed since the last invocation, " + - "timeout is currently set to " + scannerTimeout); - ex.initCause(e); - throw ex; - } - } else { - // If exception is any but the list below throw it back to the client; else setup - // the scanner and retry. - Throwable cause = e.getCause(); - if ((cause != null && cause instanceof NotServingRegionException) || - (cause != null && cause instanceof RegionServerStoppedException) || - e instanceof OutOfOrderScannerNextException) { - // Pass - // It is easier writing the if loop test as list of what is allowed rather than - // as a list of what is not allowed... so if in here, it means we do not throw. - } else { - throw e; - } - } - // Else, its signal from depths of ScannerCallable that we need to reset the scanner. - if (this.lastResult != null) { - // The region has moved. We need to open a brand new scanner at - // the new location. - // Reset the startRow to the row we've seen last so that the new - // scanner starts at the correct row. Otherwise we may see previously - // returned rows again. - // (ScannerCallable by now has "relocated" the correct region) - if(scan.isReversed()){ - scan.setStartRow(createClosestRowBefore(lastResult.getRow())); - }else { - scan.setStartRow(Bytes.add(lastResult.getRow(), new byte[1])); - } - } - if (e instanceof OutOfOrderScannerNextException) { - if (retryAfterOutOfOrderException) { - retryAfterOutOfOrderException = false; - } else { - // TODO: Why wrap this in a DNRIOE when it already is a DNRIOE? - throw new DoNotRetryIOException("Failed after retry of " + - "OutOfOrderScannerNextException: was there a rpc timeout?", e); - } - } - // Clear region. - this.currentRegion = null; - // Set this to zero so we don't try and do an rpc and close on remote server when - // the exception we got was UnknownScanner or the Server is going down. - callable = null; - // This continue will take us to while at end of loop where we will set up new scanner. - continue; - } - long currentTime = System.currentTimeMillis(); - if (this.scanMetrics != null) { - this.scanMetrics.sumOfMillisSecBetweenNexts.addAndGet(currentTime-lastNext); - } - lastNext = currentTime; - if (values != null && values.length > 0) { - for (Result rs : values) { - cache.add(rs); - // We don't make Iterator here - for (Cell cell : rs.rawCells()) { - remainingResultSize -= CellUtil.estimatedHeapSizeOf(cell); - } - countdown--; - this.lastResult = rs; - } - } - // Values == null means server-side filter has determined we must STOP - } while (remainingResultSize > 0 && countdown > 0 && - possiblyNextScanner(countdown, values == null)); + loadCache(); } if (cache.size() > 0) { @@ -454,6 +365,309 @@ public class ClientScanner extends AbstractClientScanner { return null; } + @VisibleForTesting + public int getCacheSize() { + return cache != null ? cache.size() : 0; + } + + /** + * Contact the servers to load more {@link Result}s in the cache. + */ + protected void loadCache() throws IOException { + Result[] values = null; + long remainingResultSize = maxScannerResultSize; + int countdown = this.caching; + + // We need to reset it if it's a new callable that was created + // with a countdown in nextScanner + callable.setCaching(this.caching); + // This flag is set when we want to skip the result returned. We do + // this when we reset scanner because it split under us. + boolean retryAfterOutOfOrderException = true; + // We don't expect that the server will have more results for us if + // it doesn't tell us otherwise. We rely on the size or count of results + boolean serverHasMoreResults = false; + do { + try { + // Server returns a null values if scanning is to stop. Else, + // returns an empty array if scanning is to go on and we've just + // exhausted current region. + values = call(callable, caller, scannerTimeout); + + // When the replica switch happens, we need to do certain operations + // again. The callable will openScanner with the right startkey + // but we need to pick up from there. Bypass the rest of the loop + // and let the catch-up happen in the beginning of the loop as it + // happens for the cases where we see exceptions. Since only openScanner + // would have happened, values would be null + if (values == null && callable.switchedToADifferentReplica()) { + // Any accumulated partial results are no longer valid since the callable will + // openScanner with the correct startkey and we must pick up from there + clearPartialResults(); + this.currentRegion = callable.getHRegionInfo(); + continue; + } + retryAfterOutOfOrderException = true; + } catch (DoNotRetryIOException e) { + // An exception was thrown which makes any partial results that we were collecting + // invalid. The scanner will need to be reset to the beginning of a row. + clearPartialResults(); + + // DNRIOEs are thrown to make us break out of retries. Some types of DNRIOEs want us + // to reset the scanner and come back in again. + if (e instanceof UnknownScannerException) { + long timeout = lastNext + scannerTimeout; + // If we are over the timeout, throw this exception to the client wrapped in + // a ScannerTimeoutException. Else, it's because the region moved and we used the old + // id against the new region server; reset the scanner. + if (timeout < System.currentTimeMillis()) { + long elapsed = System.currentTimeMillis() - lastNext; + ScannerTimeoutException ex = + new ScannerTimeoutException(elapsed + "ms passed since the last invocation, " + + "timeout is currently set to " + scannerTimeout); + ex.initCause(e); + throw ex; + } + } else { + // If exception is any but the list below throw it back to the client; else setup + // the scanner and retry. + Throwable cause = e.getCause(); + if ((cause != null && cause instanceof NotServingRegionException) || + (cause != null && cause instanceof RegionServerStoppedException) || + e instanceof OutOfOrderScannerNextException) { + // Pass + // It is easier writing the if loop test as list of what is allowed rather than + // as a list of what is not allowed... so if in here, it means we do not throw. + } else { + throw e; + } + } + // Else, its signal from depths of ScannerCallable that we need to reset the scanner. + if (this.lastResult != null) { + // The region has moved. We need to open a brand new scanner at + // the new location. + // Reset the startRow to the row we've seen last so that the new + // scanner starts at the correct row. Otherwise we may see previously + // returned rows again. + // (ScannerCallable by now has "relocated" the correct region) + if (scan.isReversed()) { + scan.setStartRow(createClosestRowBefore(lastResult.getRow())); + } else { + scan.setStartRow(Bytes.add(lastResult.getRow(), new byte[1])); + } + } + if (e instanceof OutOfOrderScannerNextException) { + if (retryAfterOutOfOrderException) { + retryAfterOutOfOrderException = false; + } else { + // TODO: Why wrap this in a DNRIOE when it already is a DNRIOE? + throw new DoNotRetryIOException("Failed after retry of " + + "OutOfOrderScannerNextException: was there a rpc timeout?", e); + } + } + // Clear region. + this.currentRegion = null; + // Set this to zero so we don't try and do an rpc and close on remote server when + // the exception we got was UnknownScanner or the Server is going down. + callable = null; + + // This continue will take us to while at end of loop where we will set up new scanner. + continue; + } + long currentTime = System.currentTimeMillis(); + if (this.scanMetrics != null) { + this.scanMetrics.sumOfMillisSecBetweenNexts.addAndGet(currentTime - lastNext); + } + lastNext = currentTime; + // Groom the array of Results that we received back from the server before adding that + // Results to the scanner's cache. If partial results are not allowed to be seen by the + // caller, all book keeping will be performed within this method. + List resultsToAddToCache = getResultsToAddToCache(values); + if (!resultsToAddToCache.isEmpty()) { + for (Result rs : resultsToAddToCache) { + cache.add(rs); + // We don't make Iterator here + for (Cell cell : rs.rawCells()) { + remainingResultSize -= CellUtil.estimatedHeapSizeOf(cell); + } + countdown--; + this.lastResult = rs; + } + } + // We expect that the server won't have more results for us when we exhaust + // the size (bytes or count) of the results returned. If the server *does* inform us that + // there are more results, we want to avoid possiblyNextScanner(...). Only when we actually + // get results is the moreResults context valid. + if (null != values && values.length > 0 && callable.hasMoreResultsContext()) { + // Only adhere to more server results when we don't have any partialResults + // as it keeps the outer loop logic the same. + serverHasMoreResults = callable.getServerHasMoreResults() & partialResults.isEmpty(); + } + // Values == null means server-side filter has determined we must STOP + // !partialResults.isEmpty() means that we are still accumulating partial Results for a + // row. We should not change scanners before we receive all the partial Results for that + // row. + } while (remainingResultSize > 0 && countdown > 0 && !serverHasMoreResults + && (!partialResults.isEmpty() || possiblyNextScanner(countdown, values == null))); + } + + /** + * This method ensures all of our book keeping regarding partial results is kept up to date. This + * method should be called once we know that the results we received back from the RPC request do + * not contain errors. We return a list of results that should be added to the cache. In general, + * this list will contain all NON-partial results from the input array (unless the client has + * specified that they are okay with receiving partial results) + * @return the list of results that should be added to the cache. + * @throws IOException + */ + protected List getResultsToAddToCache(Result[] resultsFromServer) throws IOException { + int resultSize = resultsFromServer != null ? resultsFromServer.length : 0; + List resultsToAddToCache = new ArrayList(resultSize); + + final boolean isBatchSet = scan != null && scan.getBatch() > 0; + final boolean allowPartials = scan != null && scan.getAllowPartialResults(); + + // If the caller has indicated in their scan that they are okay with seeing partial results, + // then simply add all results to the list. Note that since scan batching also returns results + // for a row in pieces we treat batch being set as equivalent to allowing partials. The + // implication of treating batching as equivalent to partial results is that it is possible + // the caller will receive a result back where the number of cells in the result is less than + // the batch size even though it may not be the last group of cells for that row. + if (allowPartials || isBatchSet) { + addResultsToList(resultsToAddToCache, resultsFromServer, 0, resultsFromServer.length); + return resultsToAddToCache; + } + + // If no results were returned it indicates that we have the all the partial results necessary + // to construct the complete result. + if (resultsFromServer == null || resultsFromServer.length == 0) { + if (!partialResults.isEmpty()) { + resultsToAddToCache.add(Result.createCompleteResult(partialResults)); + clearPartialResults(); + } + + return resultsToAddToCache; + } + + // In every RPC response there should be at most a single partial result. Furthermore, if + // there is a partial result, it is guaranteed to be in the last position of the array. + Result last = resultsFromServer[resultsFromServer.length - 1]; + Result partial = last.isPartial() ? last : null; + + if (LOG.isTraceEnabled()) { + StringBuilder sb = new StringBuilder(); + sb.append("number results from RPC: ").append(resultsFromServer.length).append(","); + sb.append("partial != null: ").append(partial != null).append(","); + sb.append("number of partials so far: ").append(partialResults.size()); + LOG.trace(sb.toString()); + } + + // There are three possibilities cases that can occur while handling partial results + // + // 1. (partial != null && partialResults.isEmpty()) + // This is the first partial result that we have received. It should be added to + // the list of partialResults and await the next RPC request at which point another + // portion of the complete result will be received + // + // 2. !partialResults.isEmpty() + // Since our partialResults list is not empty it means that we have been accumulating partial + // Results for a particular row. We cannot form the complete/whole Result for that row until + // all partials for the row have been received. Thus we loop through all of the Results + // returned from the server and determine whether or not all partial Results for the row have + // been received. We know that we have received all of the partial Results for the row when: + // i) We notice a row change in the Results + // ii) We see a Result for the partial row that is NOT marked as a partial Result + // + // 3. (partial == null && partialResults.isEmpty()) + // Business as usual. We are not accumulating partial results and there wasn't a partial result + // in the RPC response. This means that all of the results we received from the server are + // complete and can be added directly to the cache + if (partial != null && partialResults.isEmpty()) { + addToPartialResults(partial); + + // Exclude the last result, it's a partial + addResultsToList(resultsToAddToCache, resultsFromServer, 0, resultsFromServer.length - 1); + } else if (!partialResults.isEmpty()) { + for (int i = 0; i < resultsFromServer.length; i++) { + Result result = resultsFromServer[i]; + + // This result is from the same row as the partial Results. Add it to the list of partials + // and check if it was the last partial Result for that row + if (Bytes.equals(partialResultsRow, result.getRow())) { + addToPartialResults(result); + + // If the result is not a partial, it is a signal to us that it is the last Result we + // need to form the complete Result client-side + if (!result.isPartial()) { + resultsToAddToCache.add(Result.createCompleteResult(partialResults)); + clearPartialResults(); + } + } else { + // The row of this result differs from the row of the partial results we have received so + // far. If our list of partials isn't empty, this is a signal to form the complete Result + // since the row has now changed + if (!partialResults.isEmpty()) { + resultsToAddToCache.add(Result.createCompleteResult(partialResults)); + clearPartialResults(); + } + + // It's possible that in one response from the server we receive the final partial for + // one row and receive a partial for a different row. Thus, make sure that all Results + // are added to the proper list + if (result.isPartial()) { + addToPartialResults(result); + } else { + resultsToAddToCache.add(result); + } + } + } + } else { // partial == null && partialResults.isEmpty() -- business as usual + addResultsToList(resultsToAddToCache, resultsFromServer, 0, resultsFromServer.length); + } + + return resultsToAddToCache; + } + + /** + * A convenience method for adding a Result to our list of partials. This method ensure that only + * Results that belong to the same row as the other partials can be added to the list. + * @param result The result that we want to add to our list of partial Results + * @throws IOException + */ + private void addToPartialResults(final Result result) throws IOException { + final byte[] row = result.getRow(); + if (partialResultsRow != null && !Bytes.equals(row, partialResultsRow)) { + throw new IOException("Partial result row does not match. All partial results must come " + + "from the same row. partialResultsRow: " + Bytes.toString(partialResultsRow) + "row: " + + Bytes.toString(row)); + } + partialResultsRow = row; + partialResults.add(result); + } + + /** + * Convenience method for clearing the list of partials and resetting the partialResultsRow. + */ + private void clearPartialResults() { + partialResults.clear(); + partialResultsRow = null; + } + + /** + * Helper method for adding results between the indices [start, end) to the outputList + * @param outputList the list that results will be added to + * @param inputArray the array that results are taken from + * @param start beginning index (inclusive) + * @param end ending index (exclusive) + */ + private void addResultsToList(List outputList, Result[] inputArray, int start, int end) { + if (inputArray == null || start < 0 || end > inputArray.length) return; + + for (int i = start; i < end; i++) { + outputList.add(inputArray[i]); + } + } + @Override public void close() { if (!scanMetricsPublished) writeScanMetrics(); @@ -476,4 +690,26 @@ public class ClientScanner extends AbstractClientScanner { } closed = true; } + + /** + * Create the closest row before the specified row + * @param row + * @return a new byte array which is the closest front row of the specified one + */ + protected static byte[] createClosestRowBefore(byte[] row) { + if (row == null) { + throw new IllegalArgumentException("The passed row is empty"); + } + if (Bytes.equals(row, HConstants.EMPTY_BYTE_ARRAY)) { + return MAX_BYTE_ARRAY; + } + if (row[row.length - 1] == 0) { + return Arrays.copyOf(row, row.length - 1); + } else { + byte[] closestFrontRow = Arrays.copyOf(row, row.length); + closestFrontRow[row.length - 1] = (byte) ((closestFrontRow[row.length - 1] & 0xff) - 1); + closestFrontRow = Bytes.add(closestFrontRow, MAX_BYTE_ARRAY); + return closestFrontRow; + } + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientSmallReversedScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientSmallReversedScanner.java index 35b3d88256c..28502dc0244 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientSmallReversedScanner.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientSmallReversedScanner.java @@ -31,9 +31,13 @@ import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.ClientSmallScanner.SmallScannerCallable; +import org.apache.hadoop.hbase.client.ClientSmallScanner.SmallScannerCallableFactory; import org.apache.hadoop.hbase.ipc.RpcControllerFactory; import org.apache.hadoop.hbase.util.Bytes; +import com.google.common.annotations.VisibleForTesting; + /** * Client scanner for small reversed scan. Generally, only one RPC is called to fetch the * scan results, unless the results cross multiple regions or the row count of @@ -45,34 +49,83 @@ import org.apache.hadoop.hbase.util.Bytes; public class ClientSmallReversedScanner extends ReversedClientScanner { private static final Log LOG = LogFactory.getLog(ClientSmallReversedScanner.class); private ScannerCallableWithReplicas smallScanCallable = null; - private byte[] skipRowOfFirstResult = null; + private SmallScannerCallableFactory callableFactory; /** - * Create a new ReversibleClientScanner for the specified table Note that the - * passed {@link Scan}'s start row maybe changed changed. + * Create a new ReversibleClientScanner for the specified table. Take note that the passed + * {@link Scan} 's start row maybe changed changed. * - * @param conf The {@link Configuration} to use. - * @param scan {@link Scan} to use in this scanner - * @param tableName The table that we wish to rangeGet - * @param connection Connection identifying the cluster + * @param conf + * The {@link Configuration} to use. + * @param scan + * {@link Scan} to use in this scanner + * @param tableName + * The table that we wish to rangeGet + * @param connection + * Connection identifying the cluster * @param rpcFactory + * Factory used to create the {@link RpcRetryingCaller} + * @param controllerFactory + * Factory used to access RPC payloads + * @param pool + * Threadpool for RPC threads + * @param primaryOperationTimeout + * Call timeout * @throws IOException + * If the remote call fails */ public ClientSmallReversedScanner(final Configuration conf, final Scan scan, - final TableName tableName, ClusterConnection connection, - RpcRetryingCallerFactory rpcFactory, RpcControllerFactory controllerFactory, - ExecutorService pool, int primaryOperationTimeout) throws IOException { - super(conf, scan, tableName, connection, rpcFactory, controllerFactory, pool, - primaryOperationTimeout); + final TableName tableName, ClusterConnection connection, RpcRetryingCallerFactory rpcFactory, + RpcControllerFactory controllerFactory, ExecutorService pool, int primaryOperationTimeout) + throws IOException { + this(conf, scan, tableName, connection, rpcFactory, controllerFactory, pool, + primaryOperationTimeout, new SmallScannerCallableFactory()); } /** - * Gets a scanner for following scan. Move to next region or continue from the - * last result or start from the start row. + * Create a new ReversibleClientScanner for the specified table. Take note that the passed + * {@link Scan}'s start row may be changed. + * + * @param conf + * The {@link Configuration} to use. + * @param scan + * {@link Scan} to use in this scanner + * @param tableName + * The table that we wish to rangeGet + * @param connection + * Connection identifying the cluster + * @param rpcFactory + * Factory used to create the {@link RpcRetryingCaller} + * @param controllerFactory + * Factory used to access RPC payloads + * @param pool + * Threadpool for RPC threads + * @param primaryOperationTimeout + * Call timeout + * @param callableFactory + * Factory used to create the {@link SmallScannerCallable} + * @throws IOException + * If the remote call fails + */ + @VisibleForTesting + ClientSmallReversedScanner(final Configuration conf, final Scan scan, final TableName tableName, + ClusterConnection connection, RpcRetryingCallerFactory rpcFactory, + RpcControllerFactory controllerFactory, ExecutorService pool, int primaryOperationTimeout, + SmallScannerCallableFactory callableFactory) throws IOException { + super(conf, scan, tableName, connection, rpcFactory, controllerFactory, pool, + primaryOperationTimeout); + this.callableFactory = callableFactory; + } + + /** + * Gets a scanner for following scan. Move to next region or continue from the last result or + * start from the start row. * * @param nbRows - * @param done true if Server-side says we're done scanning. - * @param currentRegionDone true if scan is over on current region + * @param done + * true if Server-side says we're done scanning. + * @param currentRegionDone + * true if scan is over on current region * @return true if has next scanner * @throws IOException */ @@ -81,7 +134,7 @@ public class ClientSmallReversedScanner extends ReversedClientScanner { // Where to start the next getter byte[] localStartKey; int cacheNum = nbRows; - skipRowOfFirstResult = null; + boolean regionChanged = true; // if we're at end of table, close and return false to stop iterating if (this.currentRegion != null && currentRegionDone) { byte[] startKey = this.currentRegion.getStartKey(); @@ -100,9 +153,8 @@ public class ClientSmallReversedScanner extends ReversedClientScanner { LOG.debug("Finished with region " + this.currentRegion); } } else if (this.lastResult != null) { - localStartKey = this.lastResult.getRow(); - skipRowOfFirstResult = this.lastResult.getRow(); - cacheNum++; + regionChanged = false; + localStartKey = createClosestRowBefore(lastResult.getRow()); } else { localStartKey = this.scan.getStartRow(); } @@ -112,12 +164,11 @@ public class ClientSmallReversedScanner extends ReversedClientScanner { + Bytes.toStringBinary(localStartKey) + "'"); } - smallScanCallable = ClientSmallScanner.getSmallScanCallable( - getConnection(), getTable(), scan, getScanMetrics(), localStartKey, cacheNum, - rpcControllerFactory, getPool(), getPrimaryOperationTimeout(), - getRetries(), getScannerTimeout(), getConf(), caller); + smallScanCallable = callableFactory.getCallable(getConnection(), getTable(), scan, + getScanMetrics(), localStartKey, cacheNum, rpcControllerFactory, getPool(), + getPrimaryOperationTimeout(), getRetries(), getScannerTimeout(), getConf(), caller); - if (this.scanMetrics != null && skipRowOfFirstResult == null) { + if (this.scanMetrics != null && regionChanged) { this.scanMetrics.countOfRegions.incrementAndGet(); } return true; @@ -131,45 +182,7 @@ public class ClientSmallReversedScanner extends ReversedClientScanner { return null; } if (cache.size() == 0) { - Result[] values = null; - long remainingResultSize = maxScannerResultSize; - int countdown = this.caching; - boolean currentRegionDone = false; - // Values == null means server-side filter has determined we must STOP - while (remainingResultSize > 0 && countdown > 0 - && nextScanner(countdown, values == null, currentRegionDone)) { - // Server returns a null values if scanning is to stop. Else, - // returns an empty array if scanning is to go on and we've just - // exhausted current region. - // callWithoutRetries is at this layer. Within the ScannerCallableWithReplicas, - // we do a callWithRetries - values = this.caller.callWithoutRetries(smallScanCallable, scannerTimeout); - this.currentRegion = smallScanCallable.getHRegionInfo(); - long currentTime = System.currentTimeMillis(); - if (this.scanMetrics != null) { - this.scanMetrics.sumOfMillisSecBetweenNexts.addAndGet(currentTime - - lastNext); - } - lastNext = currentTime; - if (values != null && values.length > 0) { - for (int i = 0; i < values.length; i++) { - Result rs = values[i]; - if (i == 0 && this.skipRowOfFirstResult != null - && Bytes.equals(skipRowOfFirstResult, rs.getRow())) { - // Skip the first result - continue; - } - cache.add(rs); - // We don't make Iterator here - for (Cell cell : rs.rawCells()) { - remainingResultSize -= CellUtil.estimatedHeapSizeOf(cell); - } - countdown--; - this.lastResult = rs; - } - } - currentRegionDone = countdown > 0; - } + loadCache(); } if (cache.size() > 0) { @@ -181,6 +194,47 @@ public class ClientSmallReversedScanner extends ReversedClientScanner { return null; } + @Override + protected void loadCache() throws IOException { + Result[] values = null; + long remainingResultSize = maxScannerResultSize; + int countdown = this.caching; + boolean currentRegionDone = false; + // Values == null means server-side filter has determined we must STOP + while (remainingResultSize > 0 && countdown > 0 + && nextScanner(countdown, values == null, currentRegionDone)) { + // Server returns a null values if scanning is to stop. Else, + // returns an empty array if scanning is to go on and we've just + // exhausted current region. + // callWithoutRetries is at this layer. Within the ScannerCallableWithReplicas, + // we do a callWithRetries + values = this.caller.callWithoutRetries(smallScanCallable, scannerTimeout); + this.currentRegion = smallScanCallable.getHRegionInfo(); + long currentTime = System.currentTimeMillis(); + if (this.scanMetrics != null) { + this.scanMetrics.sumOfMillisSecBetweenNexts.addAndGet(currentTime + - lastNext); + } + lastNext = currentTime; + if (values != null && values.length > 0) { + for (int i = 0; i < values.length; i++) { + Result rs = values[i]; + cache.add(rs); + // We don't make Iterator here + for (Cell cell : rs.rawCells()) { + remainingResultSize -= CellUtil.estimatedHeapSizeOf(cell); + } + countdown--; + this.lastResult = rs; + } + } + if (smallScanCallable.hasMoreResultsContext()) { + currentRegionDone = !smallScanCallable.getServerHasMoreResults(); + } else { + currentRegionDone = countdown > 0; + } + } + } @Override protected void initializeScannerInConstruction() throws IOException { @@ -194,4 +248,8 @@ public class ClientSmallReversedScanner extends ReversedClientScanner { closed = true; } + @VisibleForTesting + protected void setScannerCallableFactory(SmallScannerCallableFactory callableFactory) { + this.callableFactory = callableFactory; + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientSmallScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientSmallScanner.java index 9fc9cc68041..77321b012db 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientSmallScanner.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientSmallScanner.java @@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse; import org.apache.hadoop.hbase.util.Bytes; +import com.google.common.annotations.VisibleForTesting; import com.google.protobuf.ServiceException; /** @@ -52,29 +53,72 @@ import com.google.protobuf.ServiceException; public class ClientSmallScanner extends ClientScanner { private final Log LOG = LogFactory.getLog(this.getClass()); private ScannerCallableWithReplicas smallScanCallable = null; - // When fetching results from server, skip the first result if it has the same - // row with this one - private byte[] skipRowOfFirstResult = null; + private SmallScannerCallableFactory callableFactory; /** - * Create a new ShortClientScanner for the specified table Note that the - * passed {@link Scan}'s start row maybe changed changed. + * Create a new ShortClientScanner for the specified table. Take note that the passed {@link Scan} + * 's start row maybe changed changed. * - * @param conf The {@link Configuration} to use. - * @param scan {@link Scan} to use in this scanner - * @param tableName The table that we wish to rangeGet - * @param connection Connection identifying the cluster + * @param conf + * The {@link Configuration} to use. + * @param scan + * {@link Scan} to use in this scanner + * @param tableName + * The table that we wish to rangeGet + * @param connection + * Connection identifying the cluster * @param rpcFactory + * Factory used to create the {@link RpcRetryingCaller} + * @param controllerFactory + * Factory used to access RPC payloads * @param pool + * Threadpool for RPC threads * @param primaryOperationTimeout + * Call timeout + * @throws IOException + * If the remote call fails + */ + public ClientSmallScanner(final Configuration conf, final Scan scan, final TableName tableName, + ClusterConnection connection, RpcRetryingCallerFactory rpcFactory, + RpcControllerFactory controllerFactory, ExecutorService pool, int primaryOperationTimeout) + throws IOException { + this(conf, scan, tableName, connection, rpcFactory, controllerFactory, pool, + primaryOperationTimeout, new SmallScannerCallableFactory()); + } + + /** + * Create a new ShortClientScanner for the specified table. Take note that the passed {@link Scan} + * 's start row maybe changed changed. Intended for unit tests to provide their own + * {@link SmallScannerCallableFactory} implementation/mock. + * + * @param conf + * The {@link Configuration} to use. + * @param scan + * {@link Scan} to use in this scanner + * @param tableName + * The table that we wish to rangeGet + * @param connection + * Connection identifying the cluster + * @param rpcFactory + * Factory used to create the {@link RpcRetryingCaller} + * @param controllerFactory + * Factory used to access RPC payloads + * @param pool + * Threadpool for RPC threads + * @param primaryOperationTimeout + * Call timeout + * @param callableFactory + * Factory used to create the {@link SmallScannerCallable} * @throws IOException */ - public ClientSmallScanner(final Configuration conf, final Scan scan, - final TableName tableName, ClusterConnection connection, - RpcRetryingCallerFactory rpcFactory, RpcControllerFactory controllerFactory, - ExecutorService pool, int primaryOperationTimeout) throws IOException { + @VisibleForTesting + ClientSmallScanner(final Configuration conf, final Scan scan, final TableName tableName, + ClusterConnection connection, RpcRetryingCallerFactory rpcFactory, + RpcControllerFactory controllerFactory, ExecutorService pool, int primaryOperationTimeout, + SmallScannerCallableFactory callableFactory) throws IOException { super(conf, scan, tableName, connection, rpcFactory, controllerFactory, pool, - primaryOperationTimeout); + primaryOperationTimeout); + this.callableFactory = callableFactory; } @Override @@ -97,26 +141,25 @@ public class ClientSmallScanner extends ClientScanner { // Where to start the next getter byte[] localStartKey; int cacheNum = nbRows; - skipRowOfFirstResult = null; + boolean regionChanged = true; // if we're at end of table, close and return false to stop iterating if (this.currentRegion != null && currentRegionDone) { byte[] endKey = this.currentRegion.getEndKey(); if (endKey == null || Bytes.equals(endKey, HConstants.EMPTY_BYTE_ARRAY) || checkScanStopRow(endKey) || done) { close(); - if (LOG.isDebugEnabled()) { - LOG.debug("Finished with small scan at " + this.currentRegion); + if (LOG.isTraceEnabled()) { + LOG.trace("Finished with small scan at " + this.currentRegion); } return false; } localStartKey = endKey; - if (LOG.isDebugEnabled()) { - LOG.debug("Finished with region " + this.currentRegion); + if (LOG.isTraceEnabled()) { + LOG.trace("Finished with region " + this.currentRegion); } } else if (this.lastResult != null) { - localStartKey = this.lastResult.getRow(); - skipRowOfFirstResult = this.lastResult.getRow(); - cacheNum++; + regionChanged = false; + localStartKey = Bytes.add(lastResult.getRow(), new byte[1]); } else { localStartKey = this.scan.getStartRow(); } @@ -125,32 +168,15 @@ public class ClientSmallScanner extends ClientScanner { LOG.trace("Advancing internal small scanner to startKey at '" + Bytes.toStringBinary(localStartKey) + "'"); } - smallScanCallable = getSmallScanCallable( - getConnection(), getTable(), scan, getScanMetrics(), localStartKey, cacheNum, - rpcControllerFactory, getPool(), getPrimaryOperationTimeout(), - getRetries(), getScannerTimeout(), getConf(), caller); - if (this.scanMetrics != null && skipRowOfFirstResult == null) { + smallScanCallable = callableFactory.getCallable(getConnection(), getTable(), scan, + getScanMetrics(), localStartKey, cacheNum, rpcControllerFactory, getPool(), + getPrimaryOperationTimeout(), getRetries(), getScannerTimeout(), getConf(), caller); + if (this.scanMetrics != null && regionChanged) { this.scanMetrics.countOfRegions.incrementAndGet(); } return true; } - - static ScannerCallableWithReplicas getSmallScanCallable( - ClusterConnection connection, TableName table, Scan scan, - ScanMetrics scanMetrics, byte[] localStartKey, final int cacheNum, - RpcControllerFactory controllerFactory, ExecutorService pool, int primaryOperationTimeout, - int retries, int scannerTimeout, Configuration conf, RpcRetryingCaller caller) { - scan.setStartRow(localStartKey); - SmallScannerCallable s = new SmallScannerCallable( - connection, table, scan, scanMetrics, controllerFactory, cacheNum, 0); - ScannerCallableWithReplicas scannerCallableWithReplicas = - new ScannerCallableWithReplicas(table, connection, - s, pool, primaryOperationTimeout, scan, retries, - scannerTimeout, cacheNum, conf, caller); - return scannerCallableWithReplicas; - } - static class SmallScannerCallable extends ScannerCallable { public SmallScannerCallable( ClusterConnection connection, TableName table, Scan scan, @@ -173,8 +199,15 @@ public class ClientSmallScanner extends ClientScanner { controller.setPriority(getTableName()); controller.setCallTimeout(timeout); response = getStub().scan(controller, request); - return ResponseConverter.getResults(controller.cellScanner(), + Result[] results = ResponseConverter.getResults(controller.cellScanner(), response); + if (response.hasMoreResultsInRegion()) { + setHasMoreResultsContext(true); + setServerHasMoreResults(response.getMoreResultsInRegion()); + } else { + setHasMoreResultsContext(false); + } + return results; } catch (ServiceException se) { throw ProtobufUtil.getRemoteException(se); } @@ -195,45 +228,7 @@ public class ClientSmallScanner extends ClientScanner { return null; } if (cache.size() == 0) { - Result[] values = null; - long remainingResultSize = maxScannerResultSize; - int countdown = this.caching; - boolean currentRegionDone = false; - // Values == null means server-side filter has determined we must STOP - while (remainingResultSize > 0 && countdown > 0 - && nextScanner(countdown, values == null, currentRegionDone)) { - // Server returns a null values if scanning is to stop. Else, - // returns an empty array if scanning is to go on and we've just - // exhausted current region. - // callWithoutRetries is at this layer. Within the ScannerCallableWithReplicas, - // we do a callWithRetries - values = this.caller.callWithoutRetries(smallScanCallable, scannerTimeout); - this.currentRegion = smallScanCallable.getHRegionInfo(); - long currentTime = System.currentTimeMillis(); - if (this.scanMetrics != null) { - this.scanMetrics.sumOfMillisSecBetweenNexts.addAndGet(currentTime - - lastNext); - } - lastNext = currentTime; - if (values != null && values.length > 0) { - for (int i = 0; i < values.length; i++) { - Result rs = values[i]; - if (i == 0 && this.skipRowOfFirstResult != null - && Bytes.equals(skipRowOfFirstResult, rs.getRow())) { - // Skip the first result - continue; - } - cache.add(rs); - // We don't make Iterator here - for (Cell cell : rs.rawCells()) { - remainingResultSize -= CellUtil.estimatedHeapSizeOf(cell); - } - countdown--; - this.lastResult = rs; - } - } - currentRegionDone = countdown > 0; - } + loadCache(); } if (cache.size() > 0) { @@ -246,8 +241,75 @@ public class ClientSmallScanner extends ClientScanner { } @Override + protected void loadCache() throws IOException { + Result[] values = null; + long remainingResultSize = maxScannerResultSize; + int countdown = this.caching; + boolean currentRegionDone = false; + // Values == null means server-side filter has determined we must STOP + while (remainingResultSize > 0 && countdown > 0 + && nextScanner(countdown, values == null, currentRegionDone)) { + // Server returns a null values if scanning is to stop. Else, + // returns an empty array if scanning is to go on and we've just + // exhausted current region. + // callWithoutRetries is at this layer. Within the ScannerCallableWithReplicas, + // we do a callWithRetries + values = this.caller.callWithoutRetries(smallScanCallable, scannerTimeout); + this.currentRegion = smallScanCallable.getHRegionInfo(); + long currentTime = System.currentTimeMillis(); + if (this.scanMetrics != null) { + this.scanMetrics.sumOfMillisSecBetweenNexts.addAndGet(currentTime + - lastNext); + } + lastNext = currentTime; + if (values != null && values.length > 0) { + for (int i = 0; i < values.length; i++) { + Result rs = values[i]; + cache.add(rs); + // We don't make Iterator here + for (Cell cell : rs.rawCells()) { + remainingResultSize -= CellUtil.estimatedHeapSizeOf(cell); + } + countdown--; + this.lastResult = rs; + } + } + if (smallScanCallable.hasMoreResultsContext()) { + // If the server has more results, the current region is not done + currentRegionDone = !smallScanCallable.getServerHasMoreResults(); + } else { + // not guaranteed to get the context in older versions, fall back to checking countdown + currentRegionDone = countdown > 0; + } + } + } + public void close() { if (!scanMetricsPublished) writeScanMetrics(); closed = true; } + + @VisibleForTesting + protected void setScannerCallableFactory(SmallScannerCallableFactory callableFactory) { + this.callableFactory = callableFactory; + } + + @InterfaceAudience.Private + protected static class SmallScannerCallableFactory { + + public ScannerCallableWithReplicas getCallable(ClusterConnection connection, TableName table, + Scan scan, ScanMetrics scanMetrics, byte[] localStartKey, int cacheNum, + RpcControllerFactory controllerFactory, ExecutorService pool, int primaryOperationTimeout, + int retries, int scannerTimeout, Configuration conf, RpcRetryingCaller caller) { + scan.setStartRow(localStartKey); + SmallScannerCallable s = new SmallScannerCallable( + connection, table, scan, scanMetrics, controllerFactory, cacheNum, 0); + ScannerCallableWithReplicas scannerCallableWithReplicas = + new ScannerCallableWithReplicas(table, connection, + s, pool, primaryOperationTimeout, scan, retries, + scannerTimeout, cacheNum, conf, caller); + return scannerCallableWithReplicas; + } + + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java index f0398f9671c..07b055a6b98 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java @@ -287,12 +287,6 @@ public interface ClusterConnection extends HConnection { */ RpcRetryingCallerFactory getNewRpcRetryingCallerFactory(Configuration conf); - /** - * - * @return true if this is a managed connection. - */ - boolean isManaged(); - /** * @return the current statistics tracker associated with this connection */ @@ -302,4 +296,4 @@ public interface ClusterConnection extends HConnection { * @return the configured client backoff policy */ ClientBackoffPolicy getBackoffPolicy(); -} \ No newline at end of file +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java index 72f870fb4b3..dab4905ce80 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java @@ -74,10 +74,13 @@ public interface Connection extends Abortable, Closeable { * The returned Table is not thread safe, a new instance should be created for each using thread. * This is a lightweight operation, pooling or caching of the returned Table * is neither required nor desired. - *
+ *

* The caller is responsible for calling {@link Table#close()} on the returned * table instance. - * + *

+ * Since 0.98.1 this method no longer checks table existence. An exception + * will be thrown if the table does not exist only when the first operation is + * attempted. * @param tableName the name of the table * @return a Table to use for interactions with this table */ @@ -88,9 +91,13 @@ public interface Connection extends Abortable, Closeable { * The returned Table is not thread safe, a new instance should be created for each using thread. * This is a lightweight operation, pooling or caching of the returned Table * is neither required nor desired. - *
+ *

* The caller is responsible for calling {@link Table#close()} on the returned * table instance. + *

+ * Since 0.98.1 this method no longer checks table existence. An exception + * will be thrown if the table does not exist only when the first operation is + * attempted. * * @param tableName the name of the table * @param pool The thread pool to use for batch operations, null to use a default pool. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java index e1458b83d33..1d8a7939a6b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java @@ -41,7 +41,7 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService; * A convenience to override when customizing method implementations. * * - * @see ConnectionUtils#createShortCircuitHConnection(HConnection, ServerName, + * @see ConnectionUtils#createShortCircuitHConnection(Connection, ServerName, * AdminService.BlockingInterface, ClientService.BlockingInterface) for case where we make * Connections skip RPC if request is to local server. */ @@ -455,11 +455,6 @@ abstract class ConnectionAdapter implements ClusterConnection { return wrappedConnection.getNewRpcRetryingCallerFactory(conf); } - @Override - public boolean isManaged() { - return wrappedConnection.isManaged(); - } - @Override public ServerStatisticTracker getStatisticsTracker() { return wrappedConnection.getStatisticsTracker(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java index 89378ddebca..3e8ca31f1e3 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java @@ -50,7 +50,6 @@ import org.apache.hadoop.hbase.security.UserProvider; * Similarly, {@link Connection} also returns {@link Admin} and {@link RegionLocator} * implementations. * - * This class replaces {@link HConnectionManager}, which is now deprecated. * @see Connection * @since 0.99.0 */ @@ -215,15 +214,9 @@ public class ConnectionFactory { user = provider.getCurrent(); } - return createConnection(conf, false, pool, user); - } - - static Connection createConnection(final Configuration conf, final boolean managed, - final ExecutorService pool, final User user) - throws IOException { String className = conf.get(HConnection.HBASE_CLIENT_CONNECTION_IMPL, - ConnectionManager.HConnectionImplementation.class.getName()); - Class clazz = null; + ConnectionImplementation.class.getName()); + Class clazz; try { clazz = Class.forName(className); } catch (ClassNotFoundException e) { @@ -233,9 +226,9 @@ public class ConnectionFactory { // Default HCM#HCI is not accessible; make it so before invoking. Constructor constructor = clazz.getDeclaredConstructor(Configuration.class, - boolean.class, ExecutorService.class, User.class); + ExecutorService.class, User.class); constructor.setAccessible(true); - return (Connection) constructor.newInstance(conf, managed, pool, user); + return (Connection) constructor.newInstance(conf, pool, user); } catch (Exception e) { throw new IOException(e); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java new file mode 100644 index 00000000000..a51a4ac66fe --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java @@ -0,0 +1,2248 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import com.google.common.annotations.VisibleForTesting; +import com.google.protobuf.BlockingRpcChannel; +import com.google.protobuf.RpcController; +import com.google.protobuf.ServiceException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.MasterNotRunningException; +import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.RegionLocations; +import org.apache.hadoop.hbase.RegionTooBusyException; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.TableNotEnabledException; +import org.apache.hadoop.hbase.TableNotFoundException; +import org.apache.hadoop.hbase.ZooKeeperConnectionException; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy; +import org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicyFactory; +import org.apache.hadoop.hbase.client.coprocessor.Batch; +import org.apache.hadoop.hbase.exceptions.RegionMovedException; +import org.apache.hadoop.hbase.exceptions.RegionOpeningException; +import org.apache.hadoop.hbase.ipc.RpcClient; +import org.apache.hadoop.hbase.ipc.RpcClientFactory; +import org.apache.hadoop.hbase.ipc.RpcControllerFactory; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.RequestConverter; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse; +import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException; +import org.apache.hadoop.hbase.security.User; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.util.ExceptionUtil; +import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.hbase.util.Threads; +import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker; +import org.apache.hadoop.hbase.zookeeper.ZKUtil; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.hadoop.ipc.RemoteException; +import org.apache.zookeeper.KeeperException; + +import javax.annotation.Nullable; +import java.io.Closeable; +import java.io.IOException; +import java.io.InterruptedIOException; +import java.lang.reflect.UndeclaredThrowableException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.Date; +import java.util.List; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; + +/** + * Main implementation of {@link Connection} and {@link ClusterConnection} interfaces. + * Encapsulates connection to zookeeper and regionservers. + */ +@edu.umd.cs.findbugs.annotations.SuppressWarnings( + value="AT_OPERATION_SEQUENCE_ON_CONCURRENT_ABSTRACTION", + justification="Access to the conncurrent hash map is under a lock so should be fine.") +@InterfaceAudience.Private +class ConnectionImplementation implements ClusterConnection, Closeable { + public static final String RETRIES_BY_SERVER_KEY = "hbase.client.retries.by.server"; + static final Log LOG = LogFactory.getLog(ConnectionImplementation.class); + private static final String CLIENT_NONCES_ENABLED_KEY = "hbase.client.nonces.enabled"; + + private final long pause; + private final boolean useMetaReplicas; + private final int numTries; + final int rpcTimeout; + + /** + * Global nonceGenerator shared per client.Currently there's no reason to limit its scope. + * Once it's set under nonceGeneratorCreateLock, it is never unset or changed. + */ + private static volatile NonceGenerator nonceGenerator = null; + /** The nonce generator lock. Only taken when creating HConnection, which gets a private copy. */ + private static Object nonceGeneratorCreateLock = new Object(); + + private final AsyncProcess asyncProcess; + // single tracker per connection + private final ServerStatisticTracker stats; + + private volatile boolean closed; + private volatile boolean aborted; + + // package protected for the tests + ClusterStatusListener clusterStatusListener; + + + private final Object metaRegionLock = new Object(); + + // We have a single lock for master & zk to prevent deadlocks. Having + // one lock for ZK and one lock for master is not possible: + // When creating a connection to master, we need a connection to ZK to get + // its address. But another thread could have taken the ZK lock, and could + // be waiting for the master lock => deadlock. + private final Object masterAndZKLock = new Object(); + + private long keepZooKeeperWatcherAliveUntil = Long.MAX_VALUE; + + // thread executor shared by all HTableInterface instances created + // by this connection + private volatile ExecutorService batchPool = null; + // meta thread executor shared by all HTableInterface instances created + // by this connection + private volatile ExecutorService metaLookupPool = null; + private volatile boolean cleanupPool = false; + + private final Configuration conf; + + // cache the configuration value for tables so that we can avoid calling + // the expensive Configuration to fetch the value multiple times. + private final TableConfiguration tableConfig; + + // Client rpc instance. + private RpcClient rpcClient; + + private MetaCache metaCache = new MetaCache(); + + private int refCount; + + private User user; + + private RpcRetryingCallerFactory rpcCallerFactory; + + private RpcControllerFactory rpcControllerFactory; + + private final RetryingCallerInterceptor interceptor; + + /** + * Cluster registry of basic info such as clusterid and meta region location. + */ + Registry registry; + + private final ClientBackoffPolicy backoffPolicy; + + /** + * constructor + * @param conf Configuration object + */ + ConnectionImplementation(Configuration conf, + ExecutorService pool, User user) throws IOException { + this(conf); + this.user = user; + this.batchPool = pool; + this.registry = setupRegistry(); + retrieveClusterId(); + + this.rpcClient = RpcClientFactory.createClient(this.conf, this.clusterId); + this.rpcControllerFactory = RpcControllerFactory.instantiate(conf); + + // Do we publish the status? + boolean shouldListen = conf.getBoolean(HConstants.STATUS_PUBLISHED, + HConstants.STATUS_PUBLISHED_DEFAULT); + Class listenerClass = + conf.getClass(ClusterStatusListener.STATUS_LISTENER_CLASS, + ClusterStatusListener.DEFAULT_STATUS_LISTENER_CLASS, + ClusterStatusListener.Listener.class); + if (shouldListen) { + if (listenerClass == null) { + LOG.warn(HConstants.STATUS_PUBLISHED + " is true, but " + + ClusterStatusListener.STATUS_LISTENER_CLASS + " is not set - not listening status"); + } else { + clusterStatusListener = new ClusterStatusListener( + new ClusterStatusListener.DeadServerHandler() { + @Override + public void newDead(ServerName sn) { + clearCaches(sn); + rpcClient.cancelConnections(sn); + } + }, conf, listenerClass); + } + } + } + + /** + * For tests. + */ + protected ConnectionImplementation(Configuration conf) { + this.conf = conf; + this.tableConfig = new TableConfiguration(conf); + this.closed = false; + this.pause = conf.getLong(HConstants.HBASE_CLIENT_PAUSE, + HConstants.DEFAULT_HBASE_CLIENT_PAUSE); + this.useMetaReplicas = conf.getBoolean(HConstants.USE_META_REPLICAS, + HConstants.DEFAULT_USE_META_REPLICAS); + this.numTries = tableConfig.getRetriesNumber(); + this.rpcTimeout = conf.getInt( + HConstants.HBASE_RPC_TIMEOUT_KEY, + HConstants.DEFAULT_HBASE_RPC_TIMEOUT); + if (conf.getBoolean(CLIENT_NONCES_ENABLED_KEY, true)) { + synchronized (nonceGeneratorCreateLock) { + if (nonceGenerator == null) { + nonceGenerator = new PerClientRandomNonceGenerator(); + } + } + } else { + nonceGenerator = new NoNonceGenerator(); + } + stats = ServerStatisticTracker.create(conf); + this.asyncProcess = createAsyncProcess(this.conf); + this.interceptor = (new RetryingCallerInterceptorFactory(conf)).build(); + this.rpcCallerFactory = RpcRetryingCallerFactory.instantiate(conf, interceptor, this.stats); + this.backoffPolicy = ClientBackoffPolicyFactory.create(conf); + } + + /** + * @param conn The connection for which to replace the generator. + * @param cnm Replaces the nonce generator used, for testing. + * @return old nonce generator. + */ + @VisibleForTesting + static NonceGenerator injectNonceGeneratorForTesting( + ClusterConnection conn, NonceGenerator cnm) { + ConnectionImplementation connImpl = (ConnectionImplementation)conn; + NonceGenerator ng = connImpl.getNonceGenerator(); + LOG.warn("Nonce generator is being replaced by test code for " + + cnm.getClass().getName()); + nonceGenerator = cnm; + return ng; + } + + /** + * Look for an exception we know in the remote exception: + * - hadoop.ipc wrapped exceptions + * - nested exceptions + * + * Looks for: RegionMovedException / RegionOpeningException / RegionTooBusyException + * @return null if we didn't find the exception, the exception otherwise. + */ + public static Throwable findException(Object exception) { + if (exception == null || !(exception instanceof Throwable)) { + return null; + } + Throwable cur = (Throwable) exception; + while (cur != null) { + if (cur instanceof RegionMovedException || cur instanceof RegionOpeningException + || cur instanceof RegionTooBusyException) { + return cur; + } + if (cur instanceof RemoteException) { + RemoteException re = (RemoteException) cur; + cur = re.unwrapRemoteException( + RegionOpeningException.class, RegionMovedException.class, + RegionTooBusyException.class); + if (cur == null) { + cur = re.unwrapRemoteException(); + } + // unwrapRemoteException can return the exception given as a parameter when it cannot + // unwrap it. In this case, there is no need to look further + // noinspection ObjectEquality + if (cur == re) { + return null; + } + } else { + cur = cur.getCause(); + } + } + + return null; + } + + @Override + public HTableInterface getTable(String tableName) throws IOException { + return getTable(TableName.valueOf(tableName)); + } + + @Override + public HTableInterface getTable(byte[] tableName) throws IOException { + return getTable(TableName.valueOf(tableName)); + } + + @Override + public HTableInterface getTable(TableName tableName) throws IOException { + return getTable(tableName, getBatchPool()); + } + + @Override + public HTableInterface getTable(String tableName, ExecutorService pool) throws IOException { + return getTable(TableName.valueOf(tableName), pool); + } + + @Override + public HTableInterface getTable(byte[] tableName, ExecutorService pool) throws IOException { + return getTable(TableName.valueOf(tableName), pool); + } + + @Override + public HTableInterface getTable(TableName tableName, ExecutorService pool) throws IOException { + return new HTable(tableName, this, tableConfig, rpcCallerFactory, rpcControllerFactory, pool); + } + + @Override + public BufferedMutator getBufferedMutator(BufferedMutatorParams params) { + if (params.getTableName() == null) { + throw new IllegalArgumentException("TableName cannot be null."); + } + if (params.getPool() == null) { + params.pool(HTable.getDefaultExecutor(getConfiguration())); + } + if (params.getWriteBufferSize() == BufferedMutatorParams.UNSET) { + params.writeBufferSize(tableConfig.getWriteBufferSize()); + } + if (params.getMaxKeyValueSize() == BufferedMutatorParams.UNSET) { + params.maxKeyValueSize(tableConfig.getMaxKeyValueSize()); + } + return new BufferedMutatorImpl(this, rpcCallerFactory, rpcControllerFactory, params); + } + + @Override + public BufferedMutator getBufferedMutator(TableName tableName) { + return getBufferedMutator(new BufferedMutatorParams(tableName)); + } + + @Override + public RegionLocator getRegionLocator(TableName tableName) throws IOException { + return new HRegionLocator(tableName, this); + } + + @Override + public Admin getAdmin() throws IOException { + return new HBaseAdmin(this); + } + + private ExecutorService getBatchPool() { + if (batchPool == null) { + synchronized (this) { + if (batchPool == null) { + this.batchPool = getThreadPool(conf.getInt("hbase.hconnection.threads.max", 256), + conf.getInt("hbase.hconnection.threads.core", 256), "-shared-", null); + this.cleanupPool = true; + } + } + } + return this.batchPool; + } + + private ExecutorService getThreadPool(int maxThreads, int coreThreads, String nameHint, + BlockingQueue passedWorkQueue) { + // shared HTable thread executor not yet initialized + if (maxThreads == 0) { + maxThreads = Runtime.getRuntime().availableProcessors() * 8; + } + if (coreThreads == 0) { + coreThreads = Runtime.getRuntime().availableProcessors() * 8; + } + long keepAliveTime = conf.getLong("hbase.hconnection.threads.keepalivetime", 60); + BlockingQueue workQueue = passedWorkQueue; + if (workQueue == null) { + workQueue = + new LinkedBlockingQueue(maxThreads * + conf.getInt(HConstants.HBASE_CLIENT_MAX_TOTAL_TASKS, + HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS)); + } + ThreadPoolExecutor tpe = new ThreadPoolExecutor( + coreThreads, + maxThreads, + keepAliveTime, + TimeUnit.SECONDS, + workQueue, + Threads.newDaemonThreadFactory(toString() + nameHint)); + tpe.allowCoreThreadTimeOut(true); + return tpe; + } + + private ExecutorService getMetaLookupPool() { + if (this.metaLookupPool == null) { + synchronized (this) { + if (this.metaLookupPool == null) { + //Some of the threads would be used for meta replicas + //To start with, threads.max.core threads can hit the meta (including replicas). + //After that, requests will get queued up in the passed queue, and only after + //the queue is full, a new thread will be started + this.metaLookupPool = getThreadPool( + conf.getInt("hbase.hconnection.meta.lookup.threads.max", 128), + conf.getInt("hbase.hconnection.meta.lookup.threads.max.core", 10), + "-metaLookup-shared-", new LinkedBlockingQueue()); + } + } + } + return this.metaLookupPool; + } + + protected ExecutorService getCurrentMetaLookupPool() { + return metaLookupPool; + } + + protected ExecutorService getCurrentBatchPool() { + return batchPool; + } + + private void shutdownPools() { + if (this.cleanupPool && this.batchPool != null && !this.batchPool.isShutdown()) { + shutdownBatchPool(this.batchPool); + } + if (this.metaLookupPool != null && !this.metaLookupPool.isShutdown()) { + shutdownBatchPool(this.metaLookupPool); + } + } + + private void shutdownBatchPool(ExecutorService pool) { + pool.shutdown(); + try { + if (!pool.awaitTermination(10, TimeUnit.SECONDS)) { + pool.shutdownNow(); + } + } catch (InterruptedException e) { + pool.shutdownNow(); + } + } + + /** + * @return The cluster registry implementation to use. + * @throws java.io.IOException + */ + private Registry setupRegistry() throws IOException { + return RegistryFactory.getRegistry(this); + } + + /** + * For tests only. + */ + @VisibleForTesting + RpcClient getRpcClient() { + return rpcClient; + } + + /** + * An identifier that will remain the same for a given connection. + */ + @Override + public String toString(){ + return "hconnection-0x" + Integer.toHexString(hashCode()); + } + + protected String clusterId = null; + + void retrieveClusterId() { + if (clusterId != null) return; + this.clusterId = this.registry.getClusterId(); + if (clusterId == null) { + clusterId = HConstants.CLUSTER_ID_DEFAULT; + LOG.debug("clusterid came back null, using default " + clusterId); + } + } + + @Override + public Configuration getConfiguration() { + return this.conf; + } + + private void checkIfBaseNodeAvailable(ZooKeeperWatcher zkw) + throws MasterNotRunningException { + String errorMsg; + try { + if (ZKUtil.checkExists(zkw, zkw.baseZNode) == -1) { + errorMsg = "The node " + zkw.baseZNode+" is not in ZooKeeper. " + + "It should have been written by the master. " + + "Check the value configured in 'zookeeper.znode.parent'. " + + "There could be a mismatch with the one configured in the master."; + LOG.error(errorMsg); + throw new MasterNotRunningException(errorMsg); + } + } catch (KeeperException e) { + errorMsg = "Can't get connection to ZooKeeper: " + e.getMessage(); + LOG.error(errorMsg); + throw new MasterNotRunningException(errorMsg, e); + } + } + + /** + * @return true if the master is running, throws an exception otherwise + * @throws org.apache.hadoop.hbase.MasterNotRunningException - if the master is not running + * @throws org.apache.hadoop.hbase.ZooKeeperConnectionException + * @deprecated this has been deprecated without a replacement + */ + @Deprecated + @Override + public boolean isMasterRunning() + throws MasterNotRunningException, ZooKeeperConnectionException { + // When getting the master connection, we check it's running, + // so if there is no exception, it means we've been able to get a + // connection on a running master + MasterKeepAliveConnection m = getKeepAliveMasterService(); + m.close(); + return true; + } + + @Override + public HRegionLocation getRegionLocation(final TableName tableName, + final byte [] row, boolean reload) + throws IOException { + return reload? relocateRegion(tableName, row): locateRegion(tableName, row); + } + + @Override + public HRegionLocation getRegionLocation(final byte[] tableName, + final byte [] row, boolean reload) + throws IOException { + return getRegionLocation(TableName.valueOf(tableName), row, reload); + } + + @Override + public boolean isTableEnabled(TableName tableName) throws IOException { + return getTableState(tableName).inStates(TableState.State.ENABLED); + } + + @Override + public boolean isTableEnabled(byte[] tableName) throws IOException { + return isTableEnabled(TableName.valueOf(tableName)); + } + + @Override + public boolean isTableDisabled(TableName tableName) throws IOException { + return getTableState(tableName).inStates(TableState.State.DISABLED); + } + + @Override + public boolean isTableDisabled(byte[] tableName) throws IOException { + return isTableDisabled(TableName.valueOf(tableName)); + } + + @Override + public boolean isTableAvailable(final TableName tableName) throws IOException { + return isTableAvailable(tableName, null); + } + + @Override + public boolean isTableAvailable(final byte[] tableName) throws IOException { + return isTableAvailable(TableName.valueOf(tableName)); + } + + @Override + public boolean isTableAvailable(final TableName tableName, @Nullable final byte[][] splitKeys) + throws IOException { + if (this.closed) throw new IOException(toString() + " closed"); + try { + if (!isTableEnabled(tableName)) { + LOG.debug("Table " + tableName + " not enabled"); + return false; + } + List> locations = + MetaTableAccessor.getTableRegionsAndLocations(this, tableName, true); + + int notDeployed = 0; + int regionCount = 0; + for (Pair pair : locations) { + HRegionInfo info = pair.getFirst(); + if (pair.getSecond() == null) { + if (LOG.isDebugEnabled()) { + LOG.debug("Table " + tableName + " has not deployed region " + pair.getFirst() + .getEncodedName()); + } + notDeployed++; + } else if (splitKeys != null + && !Bytes.equals(info.getStartKey(), HConstants.EMPTY_BYTE_ARRAY)) { + for (byte[] splitKey : splitKeys) { + // Just check if the splitkey is available + if (Bytes.equals(info.getStartKey(), splitKey)) { + regionCount++; + break; + } + } + } else { + // Always empty start row should be counted + regionCount++; + } + } + if (notDeployed > 0) { + if (LOG.isDebugEnabled()) { + LOG.debug("Table " + tableName + " has " + notDeployed + " regions"); + } + return false; + } else if (splitKeys != null && regionCount != splitKeys.length + 1) { + if (LOG.isDebugEnabled()) { + LOG.debug("Table " + tableName + " expected to have " + (splitKeys.length + 1) + + " regions, but only " + regionCount + " available"); + } + return false; + } else { + if (LOG.isDebugEnabled()) { + LOG.debug("Table " + tableName + " should be available"); + } + return true; + } + } catch (TableNotFoundException tnfe) { + LOG.warn("Table " + tableName + " not enabled, it is not exists"); + return false; + } + } + + @Override + public boolean isTableAvailable(final byte[] tableName, final byte[][] splitKeys) + throws IOException { + return isTableAvailable(TableName.valueOf(tableName), splitKeys); + } + + @Override + public HRegionLocation locateRegion(final byte[] regionName) throws IOException { + RegionLocations locations = locateRegion(HRegionInfo.getTable(regionName), + HRegionInfo.getStartKey(regionName), false, true); + return locations == null ? null : locations.getRegionLocation(); + } + + @Override + public boolean isDeadServer(ServerName sn) { + if (clusterStatusListener == null) { + return false; + } else { + return clusterStatusListener.isDeadServer(sn); + } + } + + @Override + public List locateRegions(final TableName tableName) + throws IOException { + return locateRegions(tableName, false, true); + } + + @Override + public List locateRegions(final byte[] tableName) + throws IOException { + return locateRegions(TableName.valueOf(tableName)); + } + + @Override + public List locateRegions(final TableName tableName, + final boolean useCache, final boolean offlined) throws IOException { + List regions = MetaTableAccessor + .getTableRegions(this, tableName, !offlined); + final List locations = new ArrayList(); + for (HRegionInfo regionInfo : regions) { + RegionLocations list = locateRegion(tableName, regionInfo.getStartKey(), useCache, true); + if (list != null) { + for (HRegionLocation loc : list.getRegionLocations()) { + if (loc != null) { + locations.add(loc); + } + } + } + } + return locations; + } + + @Override + public List locateRegions(final byte[] tableName, + final boolean useCache, final boolean offlined) throws IOException { + return locateRegions(TableName.valueOf(tableName), useCache, offlined); + } + + @Override + public HRegionLocation locateRegion( + final TableName tableName, final byte[] row) throws IOException{ + RegionLocations locations = locateRegion(tableName, row, true, true); + return locations == null ? null : locations.getRegionLocation(); + } + + @Override + public HRegionLocation locateRegion(final byte[] tableName, + final byte [] row) + throws IOException{ + return locateRegion(TableName.valueOf(tableName), row); + } + + @Override + public HRegionLocation relocateRegion(final TableName tableName, + final byte [] row) throws IOException{ + RegionLocations locations = relocateRegion(tableName, row, + RegionReplicaUtil.DEFAULT_REPLICA_ID); + return locations == null ? null : + locations.getRegionLocation(RegionReplicaUtil.DEFAULT_REPLICA_ID); + } + + @Override + public RegionLocations relocateRegion(final TableName tableName, + final byte [] row, int replicaId) throws IOException{ + // Since this is an explicit request not to use any caching, finding + // disabled tables should not be desirable. This will ensure that an exception is thrown when + // the first time a disabled table is interacted with. + if (!tableName.equals(TableName.META_TABLE_NAME) && isTableDisabled(tableName)) { + throw new TableNotEnabledException(tableName.getNameAsString() + " is disabled."); + } + + return locateRegion(tableName, row, false, true, replicaId); + } + + @Override + public HRegionLocation relocateRegion(final byte[] tableName, + final byte [] row) throws IOException { + return relocateRegion(TableName.valueOf(tableName), row); + } + + @Override + public RegionLocations locateRegion(final TableName tableName, + final byte [] row, boolean useCache, boolean retry) + throws IOException { + return locateRegion(tableName, row, useCache, retry, RegionReplicaUtil.DEFAULT_REPLICA_ID); + } + + @Override + public RegionLocations locateRegion(final TableName tableName, + final byte [] row, boolean useCache, boolean retry, int replicaId) + throws IOException { + if (this.closed) throw new IOException(toString() + " closed"); + if (tableName== null || tableName.getName().length == 0) { + throw new IllegalArgumentException( + "table name cannot be null or zero length"); + } + if (tableName.equals(TableName.META_TABLE_NAME)) { + return locateMeta(tableName, useCache, replicaId); + } else { + // Region not in the cache - have to go to the meta RS + return locateRegionInMeta(tableName, row, useCache, retry, replicaId); + } + } + + private RegionLocations locateMeta(final TableName tableName, + boolean useCache, int replicaId) throws IOException { + // HBASE-10785: We cache the location of the META itself, so that we are not overloading + // zookeeper with one request for every region lookup. We cache the META with empty row + // key in MetaCache. + byte[] metaCacheKey = HConstants.EMPTY_START_ROW; // use byte[0] as the row for meta + RegionLocations locations = null; + if (useCache) { + locations = getCachedLocation(tableName, metaCacheKey); + if (locations != null && locations.getRegionLocation(replicaId) != null) { + return locations; + } + } + + // only one thread should do the lookup. + synchronized (metaRegionLock) { + // Check the cache again for a hit in case some other thread made the + // same query while we were waiting on the lock. + if (useCache) { + locations = getCachedLocation(tableName, metaCacheKey); + if (locations != null && locations.getRegionLocation(replicaId) != null) { + return locations; + } + } + + // Look up from zookeeper + locations = this.registry.getMetaRegionLocation(); + if (locations != null) { + cacheLocation(tableName, locations); + } + } + return locations; + } + + /* + * Search the hbase:meta table for the HRegionLocation + * info that contains the table and row we're seeking. + */ + private RegionLocations locateRegionInMeta(TableName tableName, byte[] row, + boolean useCache, boolean retry, int replicaId) throws IOException { + + // If we are supposed to be using the cache, look in the cache to see if + // we already have the region. + if (useCache) { + RegionLocations locations = getCachedLocation(tableName, row); + if (locations != null && locations.getRegionLocation(replicaId) != null) { + return locations; + } + } + + // build the key of the meta region we should be looking for. + // the extra 9's on the end are necessary to allow "exact" matches + // without knowing the precise region names. + byte[] metaKey = HRegionInfo.createRegionName(tableName, row, HConstants.NINES, false); + + Scan s = new Scan(); + s.setReversed(true); + s.setStartRow(metaKey); + s.setSmall(true); + s.setCaching(1); + if (this.useMetaReplicas) { + s.setConsistency(Consistency.TIMELINE); + } + + int localNumRetries = (retry ? numTries : 1); + + for (int tries = 0; true; tries++) { + if (tries >= localNumRetries) { + throw new NoServerForRegionException("Unable to find region for " + + Bytes.toStringBinary(row) + " in " + tableName + + " after " + localNumRetries + " tries."); + } + if (useCache) { + RegionLocations locations = getCachedLocation(tableName, row); + if (locations != null && locations.getRegionLocation(replicaId) != null) { + return locations; + } + } else { + // If we are not supposed to be using the cache, delete any existing cached location + // so it won't interfere. + metaCache.clearCache(tableName, row); + } + + // Query the meta region + try { + Result regionInfoRow = null; + ReversedClientScanner rcs = null; + try { + rcs = new ClientSmallReversedScanner(conf, s, TableName.META_TABLE_NAME, this, + rpcCallerFactory, rpcControllerFactory, getMetaLookupPool(), 0); + regionInfoRow = rcs.next(); + } finally { + if (rcs != null) { + rcs.close(); + } + } + + if (regionInfoRow == null) { + throw new TableNotFoundException(tableName); + } + + // convert the row result into the HRegionLocation we need! + RegionLocations locations = MetaTableAccessor.getRegionLocations(regionInfoRow); + if (locations == null || locations.getRegionLocation(replicaId) == null) { + throw new IOException("HRegionInfo was null in " + + tableName + ", row=" + regionInfoRow); + } + HRegionInfo regionInfo = locations.getRegionLocation(replicaId).getRegionInfo(); + if (regionInfo == null) { + throw new IOException("HRegionInfo was null or empty in " + + TableName.META_TABLE_NAME + ", row=" + regionInfoRow); + } + + // possible we got a region of a different table... + if (!regionInfo.getTable().equals(tableName)) { + throw new TableNotFoundException( + "Table '" + tableName + "' was not found, got: " + + regionInfo.getTable() + "."); + } + if (regionInfo.isSplit()) { + throw new RegionOfflineException("the only available region for" + + " the required row is a split parent," + + " the daughters should be online soon: " + + regionInfo.getRegionNameAsString()); + } + if (regionInfo.isOffline()) { + throw new RegionOfflineException("the region is offline, could" + + " be caused by a disable table call: " + + regionInfo.getRegionNameAsString()); + } + + ServerName serverName = locations.getRegionLocation(replicaId).getServerName(); + if (serverName == null) { + throw new NoServerForRegionException("No server address listed " + + "in " + TableName.META_TABLE_NAME + " for region " + + regionInfo.getRegionNameAsString() + " containing row " + + Bytes.toStringBinary(row)); + } + + if (isDeadServer(serverName)){ + throw new RegionServerStoppedException("hbase:meta says the region "+ + regionInfo.getRegionNameAsString()+" is managed by the server " + serverName + + ", but it is dead."); + } + // Instantiate the location + cacheLocation(tableName, locations); + return locations; + } catch (TableNotFoundException e) { + // if we got this error, probably means the table just plain doesn't + // exist. rethrow the error immediately. this should always be coming + // from the HTable constructor. + throw e; + } catch (IOException e) { + ExceptionUtil.rethrowIfInterrupt(e); + + if (e instanceof RemoteException) { + e = ((RemoteException)e).unwrapRemoteException(); + } + if (tries < localNumRetries - 1) { + if (LOG.isDebugEnabled()) { + LOG.debug("locateRegionInMeta parentTable=" + + TableName.META_TABLE_NAME + ", metaLocation=" + + ", attempt=" + tries + " of " + + localNumRetries + " failed; retrying after sleep of " + + ConnectionUtils.getPauseTime(this.pause, tries) + " because: " + e.getMessage()); + } + } else { + throw e; + } + // Only relocate the parent region if necessary + if(!(e instanceof RegionOfflineException || + e instanceof NoServerForRegionException)) { + relocateRegion(TableName.META_TABLE_NAME, metaKey, replicaId); + } + } + try{ + Thread.sleep(ConnectionUtils.getPauseTime(this.pause, tries)); + } catch (InterruptedException e) { + throw new InterruptedIOException("Giving up trying to location region in " + + "meta: thread is interrupted."); + } + } + } + + /** + * Put a newly discovered HRegionLocation into the cache. + * @param tableName The table name. + * @param location the new location + */ + private void cacheLocation(final TableName tableName, final RegionLocations location) { + metaCache.cacheLocation(tableName, location); + } + + /** + * Search the cache for a location that fits our table and row key. + * Return null if no suitable region is located. + * + * @param tableName + * @param row + * @return Null or region location found in cache. + */ + RegionLocations getCachedLocation(final TableName tableName, + final byte [] row) { + return metaCache.getCachedLocation(tableName, row); + } + + public void clearRegionCache(final TableName tableName, byte[] row) { + metaCache.clearCache(tableName, row); + } + + /* + * Delete all cached entries of a table that maps to a specific location. + */ + @Override + public void clearCaches(final ServerName serverName) { + metaCache.clearCache(serverName); + } + + @Override + public void clearRegionCache() { + metaCache.clearCache(); + } + + @Override + public void clearRegionCache(final TableName tableName) { + metaCache.clearCache(tableName); + } + + @Override + public void clearRegionCache(final byte[] tableName) { + clearRegionCache(TableName.valueOf(tableName)); + } + + /** + * Put a newly discovered HRegionLocation into the cache. + * @param tableName The table name. + * @param source the source of the new location, if it's not coming from meta + * @param location the new location + */ + private void cacheLocation(final TableName tableName, final ServerName source, + final HRegionLocation location) { + metaCache.cacheLocation(tableName, source, location); + } + + // Map keyed by service name + regionserver to service stub implementation + private final ConcurrentHashMap stubs = + new ConcurrentHashMap(); + // Map of locks used creating service stubs per regionserver. + private final ConcurrentHashMap connectionLock = + new ConcurrentHashMap(); + + /** + * State of the MasterService connection/setup. + */ + static class MasterServiceState { + HConnection connection; + MasterProtos.MasterService.BlockingInterface stub; + int userCount; + + MasterServiceState(final HConnection connection) { + super(); + this.connection = connection; + } + + @Override + public String toString() { + return "MasterService"; + } + + Object getStub() { + return this.stub; + } + + void clearStub() { + this.stub = null; + } + + boolean isMasterRunning() throws ServiceException { + MasterProtos.IsMasterRunningResponse response = + this.stub.isMasterRunning(null, RequestConverter.buildIsMasterRunningRequest()); + return response != null? response.getIsMasterRunning(): false; + } + } + + /** Dummy nonce generator for disabled nonces. */ + static class NoNonceGenerator implements NonceGenerator { + @Override + public long getNonceGroup() { + return HConstants.NO_NONCE; + } + @Override + public long newNonce() { + return HConstants.NO_NONCE; + } + } + + /** + * The record of errors for servers. + */ + static class ServerErrorTracker { + // We need a concurrent map here, as we could have multiple threads updating it in parallel. + private final ConcurrentMap errorsByServer = + new ConcurrentHashMap(); + private final long canRetryUntil; + private final int maxRetries; + private final long startTrackingTime; + + public ServerErrorTracker(long timeout, int maxRetries) { + this.maxRetries = maxRetries; + this.canRetryUntil = EnvironmentEdgeManager.currentTime() + timeout; + this.startTrackingTime = new Date().getTime(); + } + + /** + * We stop to retry when we have exhausted BOTH the number of retries and the time allocated. + */ + boolean canRetryMore(int numRetry) { + // If there is a single try we must not take into account the time. + return numRetry < maxRetries || (maxRetries > 1 && + EnvironmentEdgeManager.currentTime() < this.canRetryUntil); + } + + /** + * Calculates the back-off time for a retrying request to a particular server. + * + * @param server The server in question. + * @param basePause The default hci pause. + * @return The time to wait before sending next request. + */ + long calculateBackoffTime(ServerName server, long basePause) { + long result; + ServerErrors errorStats = errorsByServer.get(server); + if (errorStats != null) { + result = ConnectionUtils.getPauseTime(basePause, errorStats.getCount()); + } else { + result = 0; // yes, if the server is not in our list we don't wait before retrying. + } + return result; + } + + /** + * Reports that there was an error on the server to do whatever bean-counting necessary. + * + * @param server The server in question. + */ + void reportServerError(ServerName server) { + ServerErrors errors = errorsByServer.get(server); + if (errors != null) { + errors.addError(); + } else { + errors = errorsByServer.putIfAbsent(server, new ServerErrors()); + if (errors != null){ + errors.addError(); + } + } + } + + long getStartTrackingTime() { + return startTrackingTime; + } + + /** + * The record of errors for a server. + */ + private static class ServerErrors { + private final AtomicInteger retries = new AtomicInteger(0); + + public int getCount() { + return retries.get(); + } + + public void addError() { + retries.incrementAndGet(); + } + } + } + + /** + * Makes a client-side stub for master services. Sub-class to specialize. + * Depends on hosting class so not static. Exists so we avoid duplicating a bunch of code + * when setting up the MasterMonitorService and MasterAdminService. + */ + abstract class StubMaker { + /** + * Returns the name of the service stub being created. + */ + protected abstract String getServiceName(); + + /** + * Make stub and cache it internal so can be used later doing the isMasterRunning call. + * @param channel + */ + protected abstract Object makeStub(final BlockingRpcChannel channel); + + /** + * Once setup, check it works by doing isMasterRunning check. + * @throws com.google.protobuf.ServiceException + */ + protected abstract void isMasterRunning() throws ServiceException; + + /** + * Create a stub. Try once only. It is not typed because there is no common type to + * protobuf services nor their interfaces. Let the caller do appropriate casting. + * @return A stub for master services. + * @throws java.io.IOException + * @throws org.apache.zookeeper.KeeperException + * @throws com.google.protobuf.ServiceException + */ + private Object makeStubNoRetries() throws IOException, KeeperException, ServiceException { + ZooKeeperKeepAliveConnection zkw; + try { + zkw = getKeepAliveZooKeeperWatcher(); + } catch (IOException e) { + ExceptionUtil.rethrowIfInterrupt(e); + throw new ZooKeeperConnectionException("Can't connect to ZooKeeper", e); + } + try { + checkIfBaseNodeAvailable(zkw); + ServerName sn = MasterAddressTracker.getMasterAddress(zkw); + if (sn == null) { + String msg = "ZooKeeper available but no active master location found"; + LOG.info(msg); + throw new MasterNotRunningException(msg); + } + if (isDeadServer(sn)) { + throw new MasterNotRunningException(sn + " is dead."); + } + // Use the security info interface name as our stub key + String key = getStubKey(getServiceName(), sn.getHostname(), sn.getPort()); + connectionLock.putIfAbsent(key, key); + Object stub = null; + synchronized (connectionLock.get(key)) { + stub = stubs.get(key); + if (stub == null) { + BlockingRpcChannel channel = rpcClient.createBlockingRpcChannel(sn, user, rpcTimeout); + stub = makeStub(channel); + isMasterRunning(); + stubs.put(key, stub); + } + } + return stub; + } finally { + zkw.close(); + } + } + + /** + * Create a stub against the master. Retry if necessary. + * @return A stub to do intf against the master + * @throws org.apache.hadoop.hbase.MasterNotRunningException + */ + Object makeStub() throws IOException { + // The lock must be at the beginning to prevent multiple master creations + // (and leaks) in a multithread context + synchronized (masterAndZKLock) { + Exception exceptionCaught = null; + if (!closed) { + try { + return makeStubNoRetries(); + } catch (IOException e) { + exceptionCaught = e; + } catch (KeeperException e) { + exceptionCaught = e; + } catch (ServiceException e) { + exceptionCaught = e; + } + + throw new MasterNotRunningException(exceptionCaught); + } else { + throw new DoNotRetryIOException("Connection was closed while trying to get master"); + } + } + } + } + + /** + * Class to make a MasterServiceStubMaker stub. + */ + class MasterServiceStubMaker extends StubMaker { + private MasterProtos.MasterService.BlockingInterface stub; + @Override + protected String getServiceName() { + return MasterProtos.MasterService.getDescriptor().getName(); + } + + @Override + MasterProtos.MasterService.BlockingInterface makeStub() throws IOException { + return (MasterProtos.MasterService.BlockingInterface)super.makeStub(); + } + + @Override + protected Object makeStub(BlockingRpcChannel channel) { + this.stub = MasterProtos.MasterService.newBlockingStub(channel); + return this.stub; + } + + @Override + protected void isMasterRunning() throws ServiceException { + this.stub.isMasterRunning(null, RequestConverter.buildIsMasterRunningRequest()); + } + } + + @Override + public AdminProtos.AdminService.BlockingInterface getAdmin(final ServerName serverName) + throws IOException { + return getAdmin(serverName, false); + } + + @Override + // Nothing is done w/ the 'master' parameter. It is ignored. + public AdminProtos.AdminService.BlockingInterface getAdmin(final ServerName serverName, + final boolean master) + throws IOException { + if (isDeadServer(serverName)) { + throw new RegionServerStoppedException(serverName + " is dead."); + } + String key = getStubKey(AdminProtos.AdminService.BlockingInterface.class.getName(), + serverName.getHostname(), serverName.getPort()); + this.connectionLock.putIfAbsent(key, key); + AdminProtos.AdminService.BlockingInterface stub = null; + synchronized (this.connectionLock.get(key)) { + stub = (AdminProtos.AdminService.BlockingInterface)this.stubs.get(key); + if (stub == null) { + BlockingRpcChannel channel = + this.rpcClient.createBlockingRpcChannel(serverName, user, rpcTimeout); + stub = AdminProtos.AdminService.newBlockingStub(channel); + this.stubs.put(key, stub); + } + } + return stub; + } + + @Override + public ClientProtos.ClientService.BlockingInterface getClient(final ServerName sn) + throws IOException { + if (isDeadServer(sn)) { + throw new RegionServerStoppedException(sn + " is dead."); + } + String key = getStubKey(ClientProtos.ClientService.BlockingInterface.class.getName(), sn.getHostname(), + sn.getPort()); + this.connectionLock.putIfAbsent(key, key); + ClientProtos.ClientService.BlockingInterface stub = null; + synchronized (this.connectionLock.get(key)) { + stub = (ClientProtos.ClientService.BlockingInterface)this.stubs.get(key); + if (stub == null) { + BlockingRpcChannel channel = + this.rpcClient.createBlockingRpcChannel(sn, user, rpcTimeout); + stub = ClientProtos.ClientService.newBlockingStub(channel); + // In old days, after getting stub/proxy, we'd make a call. We are not doing that here. + // Just fail on first actual call rather than in here on setup. + this.stubs.put(key, stub); + } + } + return stub; + } + + static String getStubKey(final String serviceName, final String rsHostname, int port) { + // Sometimes, servers go down and they come back up with the same hostname but a different + // IP address. Force a resolution of the rsHostname by trying to instantiate an + // InetSocketAddress, and this way we will rightfully get a new stubKey. + // Also, include the hostname in the key so as to take care of those cases where the + // DNS name is different but IP address remains the same. + InetAddress i = new InetSocketAddress(rsHostname, port).getAddress(); + String address = rsHostname; + if (i != null) { + address = i.getHostAddress() + "-" + rsHostname; + } + return serviceName + "@" + address + ":" + port; + } + + private ZooKeeperKeepAliveConnection keepAliveZookeeper; + private AtomicInteger keepAliveZookeeperUserCount = new AtomicInteger(0); + private boolean canCloseZKW = true; + + // keepAlive time, in ms. No reason to make it configurable. + private static final long keepAlive = 5 * 60 * 1000; + + /** + * Retrieve a shared ZooKeeperWatcher. You must close it it once you've have finished with it. + * @return The shared instance. Never returns null. + */ + ZooKeeperKeepAliveConnection getKeepAliveZooKeeperWatcher() + throws IOException { + synchronized (masterAndZKLock) { + if (keepAliveZookeeper == null) { + if (this.closed) { + throw new IOException(toString() + " closed"); + } + // We don't check that our link to ZooKeeper is still valid + // But there is a retry mechanism in the ZooKeeperWatcher itself + keepAliveZookeeper = new ZooKeeperKeepAliveConnection(conf, this.toString(), this); + } + keepAliveZookeeperUserCount.addAndGet(1); + keepZooKeeperWatcherAliveUntil = Long.MAX_VALUE; + return keepAliveZookeeper; + } + } + + void releaseZooKeeperWatcher(final ZooKeeperWatcher zkw) { + if (zkw == null){ + return; + } + if (keepAliveZookeeperUserCount.addAndGet(-1) <= 0) { + keepZooKeeperWatcherAliveUntil = System.currentTimeMillis() + keepAlive; + } + } + + private void closeZooKeeperWatcher() { + synchronized (masterAndZKLock) { + if (keepAliveZookeeper != null) { + LOG.info("Closing zookeeper sessionid=0x" + + Long.toHexString( + keepAliveZookeeper.getRecoverableZooKeeper().getSessionId())); + keepAliveZookeeper.internalClose(); + keepAliveZookeeper = null; + } + keepAliveZookeeperUserCount.set(0); + } + } + + final MasterServiceState masterServiceState = new MasterServiceState(this); + + @Override + public MasterProtos.MasterService.BlockingInterface getMaster() throws MasterNotRunningException { + return getKeepAliveMasterService(); + } + + private void resetMasterServiceState(final MasterServiceState mss) { + mss.userCount++; + } + + @Override + public MasterKeepAliveConnection getKeepAliveMasterService() + throws MasterNotRunningException { + synchronized (masterAndZKLock) { + if (!isKeepAliveMasterConnectedAndRunning(this.masterServiceState)) { + MasterServiceStubMaker stubMaker = new MasterServiceStubMaker(); + try { + this.masterServiceState.stub = stubMaker.makeStub(); + } catch (MasterNotRunningException ex) { + throw ex; + } catch (IOException e) { + // rethrow as MasterNotRunningException so that we can keep the method sig + throw new MasterNotRunningException(e); + } + } + resetMasterServiceState(this.masterServiceState); + } + // Ugly delegation just so we can add in a Close method. + final MasterProtos.MasterService.BlockingInterface stub = this.masterServiceState.stub; + return new MasterKeepAliveConnection() { + MasterServiceState mss = masterServiceState; + @Override + public MasterProtos.AddColumnResponse addColumn(RpcController controller, MasterProtos.AddColumnRequest request) + throws ServiceException { + return stub.addColumn(controller, request); + } + + @Override + public MasterProtos.DeleteColumnResponse deleteColumn(RpcController controller, + MasterProtos.DeleteColumnRequest request) + throws ServiceException { + return stub.deleteColumn(controller, request); + } + + @Override + public MasterProtos.ModifyColumnResponse modifyColumn(RpcController controller, + MasterProtos.ModifyColumnRequest request) + throws ServiceException { + return stub.modifyColumn(controller, request); + } + + @Override + public MasterProtos.MoveRegionResponse moveRegion(RpcController controller, + MasterProtos.MoveRegionRequest request) throws ServiceException { + return stub.moveRegion(controller, request); + } + + @Override + public MasterProtos.DispatchMergingRegionsResponse dispatchMergingRegions( + RpcController controller, MasterProtos.DispatchMergingRegionsRequest request) + throws ServiceException { + return stub.dispatchMergingRegions(controller, request); + } + + @Override + public MasterProtos.AssignRegionResponse assignRegion(RpcController controller, + MasterProtos.AssignRegionRequest request) throws ServiceException { + return stub.assignRegion(controller, request); + } + + @Override + public MasterProtos.UnassignRegionResponse unassignRegion(RpcController controller, + MasterProtos.UnassignRegionRequest request) throws ServiceException { + return stub.unassignRegion(controller, request); + } + + @Override + public MasterProtos.OfflineRegionResponse offlineRegion(RpcController controller, + MasterProtos.OfflineRegionRequest request) throws ServiceException { + return stub.offlineRegion(controller, request); + } + + @Override + public MasterProtos.DeleteTableResponse deleteTable(RpcController controller, + MasterProtos.DeleteTableRequest request) throws ServiceException { + return stub.deleteTable(controller, request); + } + + @Override + public MasterProtos.TruncateTableResponse truncateTable(RpcController controller, + MasterProtos.TruncateTableRequest request) throws ServiceException { + return stub.truncateTable(controller, request); + } + + @Override + public MasterProtos.EnableTableResponse enableTable(RpcController controller, + MasterProtos.EnableTableRequest request) throws ServiceException { + return stub.enableTable(controller, request); + } + + @Override + public MasterProtos.DisableTableResponse disableTable(RpcController controller, + MasterProtos.DisableTableRequest request) throws ServiceException { + return stub.disableTable(controller, request); + } + + @Override + public MasterProtos.ModifyTableResponse modifyTable(RpcController controller, + MasterProtos.ModifyTableRequest request) throws ServiceException { + return stub.modifyTable(controller, request); + } + + @Override + public MasterProtos.CreateTableResponse createTable(RpcController controller, + MasterProtos.CreateTableRequest request) throws ServiceException { + return stub.createTable(controller, request); + } + + @Override + public MasterProtos.ShutdownResponse shutdown(RpcController controller, + MasterProtos.ShutdownRequest request) throws ServiceException { + return stub.shutdown(controller, request); + } + + @Override + public MasterProtos.StopMasterResponse stopMaster(RpcController controller, + MasterProtos.StopMasterRequest request) throws ServiceException { + return stub.stopMaster(controller, request); + } + + @Override + public MasterProtos.BalanceResponse balance(RpcController controller, + MasterProtos.BalanceRequest request) throws ServiceException { + return stub.balance(controller, request); + } + + @Override + public MasterProtos.SetBalancerRunningResponse setBalancerRunning( + RpcController controller, MasterProtos.SetBalancerRunningRequest request) + throws ServiceException { + return stub.setBalancerRunning(controller, request); + } + + @Override + public MasterProtos.RunCatalogScanResponse runCatalogScan(RpcController controller, + MasterProtos.RunCatalogScanRequest request) throws ServiceException { + return stub.runCatalogScan(controller, request); + } + + @Override + public MasterProtos.EnableCatalogJanitorResponse enableCatalogJanitor( + RpcController controller, MasterProtos.EnableCatalogJanitorRequest request) + throws ServiceException { + return stub.enableCatalogJanitor(controller, request); + } + + @Override + public MasterProtos.IsCatalogJanitorEnabledResponse isCatalogJanitorEnabled( + RpcController controller, MasterProtos.IsCatalogJanitorEnabledRequest request) + throws ServiceException { + return stub.isCatalogJanitorEnabled(controller, request); + } + + @Override + public ClientProtos.CoprocessorServiceResponse execMasterService( + RpcController controller, ClientProtos.CoprocessorServiceRequest request) + throws ServiceException { + return stub.execMasterService(controller, request); + } + + @Override + public MasterProtos.SnapshotResponse snapshot(RpcController controller, + MasterProtos.SnapshotRequest request) throws ServiceException { + return stub.snapshot(controller, request); + } + + @Override + public MasterProtos.GetCompletedSnapshotsResponse getCompletedSnapshots( + RpcController controller, MasterProtos.GetCompletedSnapshotsRequest request) + throws ServiceException { + return stub.getCompletedSnapshots(controller, request); + } + + @Override + public MasterProtos.DeleteSnapshotResponse deleteSnapshot(RpcController controller, + MasterProtos.DeleteSnapshotRequest request) throws ServiceException { + return stub.deleteSnapshot(controller, request); + } + + @Override + public MasterProtos.IsSnapshotDoneResponse isSnapshotDone(RpcController controller, + MasterProtos.IsSnapshotDoneRequest request) throws ServiceException { + return stub.isSnapshotDone(controller, request); + } + + @Override + public MasterProtos.RestoreSnapshotResponse restoreSnapshot( + RpcController controller, MasterProtos.RestoreSnapshotRequest request) + throws ServiceException { + return stub.restoreSnapshot(controller, request); + } + + @Override + public MasterProtos.IsRestoreSnapshotDoneResponse isRestoreSnapshotDone( + RpcController controller, MasterProtos.IsRestoreSnapshotDoneRequest request) + throws ServiceException { + return stub.isRestoreSnapshotDone(controller, request); + } + + @Override + public MasterProtos.ExecProcedureResponse execProcedure( + RpcController controller, MasterProtos.ExecProcedureRequest request) + throws ServiceException { + return stub.execProcedure(controller, request); + } + + @Override + public MasterProtos.ExecProcedureResponse execProcedureWithRet( + RpcController controller, MasterProtos.ExecProcedureRequest request) + throws ServiceException { + return stub.execProcedureWithRet(controller, request); + } + + @Override + public MasterProtos.IsProcedureDoneResponse isProcedureDone(RpcController controller, + MasterProtos.IsProcedureDoneRequest request) throws ServiceException { + return stub.isProcedureDone(controller, request); + } + + @Override + public MasterProtos.GetProcedureResultResponse getProcedureResult(RpcController controller, + MasterProtos.GetProcedureResultRequest request) throws ServiceException { + return stub.getProcedureResult(controller, request); + } + + @Override + public MasterProtos.IsMasterRunningResponse isMasterRunning( + RpcController controller, MasterProtos.IsMasterRunningRequest request) + throws ServiceException { + return stub.isMasterRunning(controller, request); + } + + @Override + public MasterProtos.ModifyNamespaceResponse modifyNamespace(RpcController controller, + MasterProtos.ModifyNamespaceRequest request) + throws ServiceException { + return stub.modifyNamespace(controller, request); + } + + @Override + public MasterProtos.CreateNamespaceResponse createNamespace( + RpcController controller, MasterProtos.CreateNamespaceRequest request) throws ServiceException { + return stub.createNamespace(controller, request); + } + + @Override + public MasterProtos.DeleteNamespaceResponse deleteNamespace( + RpcController controller, MasterProtos.DeleteNamespaceRequest request) throws ServiceException { + return stub.deleteNamespace(controller, request); + } + + @Override + public MasterProtos.GetNamespaceDescriptorResponse getNamespaceDescriptor(RpcController controller, + MasterProtos.GetNamespaceDescriptorRequest request) throws ServiceException { + return stub.getNamespaceDescriptor(controller, request); + } + + @Override + public MasterProtos.ListNamespaceDescriptorsResponse listNamespaceDescriptors(RpcController controller, + MasterProtos.ListNamespaceDescriptorsRequest request) throws ServiceException { + return stub.listNamespaceDescriptors(controller, request); + } + + @Override + public MasterProtos.ListTableDescriptorsByNamespaceResponse listTableDescriptorsByNamespace( + RpcController controller, MasterProtos.ListTableDescriptorsByNamespaceRequest request) + throws ServiceException { + return stub.listTableDescriptorsByNamespace(controller, request); + } + + @Override + public MasterProtos.ListTableNamesByNamespaceResponse listTableNamesByNamespace( + RpcController controller, MasterProtos.ListTableNamesByNamespaceRequest request) + throws ServiceException { + return stub.listTableNamesByNamespace(controller, request); + } + + @Override + public MasterProtos.GetTableStateResponse getTableState( + RpcController controller, MasterProtos.GetTableStateRequest request) + throws ServiceException { + return stub.getTableState(controller, request); + } + + @Override + public void close() { + release(this.mss); + } + + @Override + public MasterProtos.GetSchemaAlterStatusResponse getSchemaAlterStatus( + RpcController controller, MasterProtos.GetSchemaAlterStatusRequest request) + throws ServiceException { + return stub.getSchemaAlterStatus(controller, request); + } + + @Override + public MasterProtos.GetTableDescriptorsResponse getTableDescriptors( + RpcController controller, MasterProtos.GetTableDescriptorsRequest request) + throws ServiceException { + return stub.getTableDescriptors(controller, request); + } + + @Override + public MasterProtos.GetTableNamesResponse getTableNames( + RpcController controller, MasterProtos.GetTableNamesRequest request) + throws ServiceException { + return stub.getTableNames(controller, request); + } + + @Override + public MasterProtos.GetClusterStatusResponse getClusterStatus( + RpcController controller, MasterProtos.GetClusterStatusRequest request) + throws ServiceException { + return stub.getClusterStatus(controller, request); + } + + @Override + public MasterProtos.SetQuotaResponse setQuota( + RpcController controller, MasterProtos.SetQuotaRequest request) + throws ServiceException { + return stub.setQuota(controller, request); + } + + @Override + public MasterProtos.MajorCompactionTimestampResponse getLastMajorCompactionTimestamp( + RpcController controller, MasterProtos.MajorCompactionTimestampRequest request) + throws ServiceException { + return stub.getLastMajorCompactionTimestamp(controller, request); + } + + @Override + public MasterProtos.MajorCompactionTimestampResponse getLastMajorCompactionTimestampForRegion( + RpcController controller, MasterProtos.MajorCompactionTimestampForRegionRequest request) + throws ServiceException { + return stub.getLastMajorCompactionTimestampForRegion(controller, request); + } + + @Override + public IsBalancerEnabledResponse isBalancerEnabled(RpcController controller, + IsBalancerEnabledRequest request) throws ServiceException { + return stub.isBalancerEnabled(controller, request); + } + }; + } + + + private static void release(MasterServiceState mss) { + if (mss != null && mss.connection != null) { + ((ConnectionImplementation)mss.connection).releaseMaster(mss); + } + } + + private boolean isKeepAliveMasterConnectedAndRunning(MasterServiceState mss) { + if (mss.getStub() == null){ + return false; + } + try { + return mss.isMasterRunning(); + } catch (UndeclaredThrowableException e) { + // It's somehow messy, but we can receive exceptions such as + // java.net.ConnectException but they're not declared. So we catch it... + LOG.info("Master connection is not running anymore", e.getUndeclaredThrowable()); + return false; + } catch (ServiceException se) { + LOG.warn("Checking master connection", se); + return false; + } + } + + void releaseMaster(MasterServiceState mss) { + if (mss.getStub() == null) return; + synchronized (masterAndZKLock) { + --mss.userCount; + } + } + + private void closeMasterService(MasterServiceState mss) { + if (mss.getStub() != null) { + LOG.info("Closing master protocol: " + mss); + mss.clearStub(); + } + mss.userCount = 0; + } + + /** + * Immediate close of the shared master. Can be by the delayed close or when closing the + * connection itself. + */ + private void closeMaster() { + synchronized (masterAndZKLock) { + closeMasterService(masterServiceState); + } + } + + void updateCachedLocation(HRegionInfo hri, ServerName source, + ServerName serverName, long seqNum) { + HRegionLocation newHrl = new HRegionLocation(hri, serverName, seqNum); + cacheLocation(hri.getTable(), source, newHrl); + } + + @Override + public void deleteCachedRegionLocation(final HRegionLocation location) { + metaCache.clearCache(location); + } + + @Override + public void updateCachedLocations(final TableName tableName, byte[] rowkey, + final Object exception, final HRegionLocation source) { + assert source != null; + updateCachedLocations(tableName, source.getRegionInfo().getRegionName() + , rowkey, exception, source.getServerName()); + } + + /** + * Update the location with the new value (if the exception is a RegionMovedException) + * or delete it from the cache. Does nothing if we can be sure from the exception that + * the location is still accurate, or if the cache has already been updated. + * @param exception an object (to simplify user code) on which we will try to find a nested + * or wrapped or both RegionMovedException + * @param source server that is the source of the location update. + */ + @Override + public void updateCachedLocations(final TableName tableName, byte[] regionName, byte[] rowkey, + final Object exception, final ServerName source) { + if (rowkey == null || tableName == null) { + LOG.warn("Coding error, see method javadoc. row=" + (rowkey == null ? "null" : rowkey) + + ", tableName=" + (tableName == null ? "null" : tableName)); + return; + } + + if (source == null) { + // This should not happen, but let's secure ourselves. + return; + } + + if (regionName == null) { + // we do not know which region, so just remove the cache entry for the row and server + metaCache.clearCache(tableName, rowkey, source); + return; + } + + // Is it something we have already updated? + final RegionLocations oldLocations = getCachedLocation(tableName, rowkey); + HRegionLocation oldLocation = null; + if (oldLocations != null) { + oldLocation = oldLocations.getRegionLocationByRegionName(regionName); + } + if (oldLocation == null || !source.equals(oldLocation.getServerName())) { + // There is no such location in the cache (it's been removed already) or + // the cache has already been refreshed with a different location. => nothing to do + return; + } + + HRegionInfo regionInfo = oldLocation.getRegionInfo(); + Throwable cause = findException(exception); + if (cause != null) { + if (cause instanceof RegionTooBusyException || cause instanceof RegionOpeningException) { + // We know that the region is still on this region server + return; + } + + if (cause instanceof RegionMovedException) { + RegionMovedException rme = (RegionMovedException) cause; + if (LOG.isTraceEnabled()) { + LOG.trace("Region " + regionInfo.getRegionNameAsString() + " moved to " + + rme.getHostname() + ":" + rme.getPort() + + " according to " + source.getHostAndPort()); + } + // We know that the region is not anymore on this region server, but we know + // the new location. + updateCachedLocation( + regionInfo, source, rme.getServerName(), rme.getLocationSeqNum()); + return; + } + } + + // If we're here, it means that can cannot be sure about the location, so we remove it from + // the cache. Do not send the source because source can be a new server in the same host:port + metaCache.clearCache(regionInfo); + } + + @Override + public void updateCachedLocations(final byte[] tableName, byte[] rowkey, + final Object exception, final HRegionLocation source) { + updateCachedLocations(TableName.valueOf(tableName), rowkey, exception, source); + } + + /** + * @deprecated since 0.96 - Use {@link org.apache.hadoop.hbase.client.HTableInterface#batch} instead + */ + @Override + @Deprecated + public void processBatch(List list, + final TableName tableName, + ExecutorService pool, + Object[] results) throws IOException, InterruptedException { + // This belongs in HTable!!! Not in here. St.Ack + + // results must be the same size as list + if (results.length != list.size()) { + throw new IllegalArgumentException( + "argument results must be the same size as argument list"); + } + processBatchCallback(list, tableName, pool, results, null); + } + + /** + * @deprecated Unsupported API + */ + @Override + @Deprecated + public void processBatch(List list, + final byte[] tableName, + ExecutorService pool, + Object[] results) throws IOException, InterruptedException { + processBatch(list, TableName.valueOf(tableName), pool, results); + } + + /** + * Send the queries in parallel on the different region servers. Retries on failures. + * If the method returns it means that there is no error, and the 'results' array will + * contain no exception. On error, an exception is thrown, and the 'results' array will + * contain results and exceptions. + * @deprecated since 0.96 - Use {@link org.apache.hadoop.hbase.client.HTable#processBatchCallback} instead + */ + @Override + @Deprecated + public void processBatchCallback( + List list, + TableName tableName, + ExecutorService pool, + Object[] results, + Batch.Callback callback) + throws IOException, InterruptedException { + + AsyncProcess.AsyncRequestFuture ars = this.asyncProcess.submitAll( + pool, tableName, list, callback, results); + ars.waitUntilDone(); + if (ars.hasError()) { + throw ars.getErrors(); + } + } + + /** + * @deprecated Unsupported API + */ + @Override + @Deprecated + public void processBatchCallback( + List list, + byte[] tableName, + ExecutorService pool, + Object[] results, + Batch.Callback callback) + throws IOException, InterruptedException { + processBatchCallback(list, TableName.valueOf(tableName), pool, results, callback); + } + + // For tests to override. + protected AsyncProcess createAsyncProcess(Configuration conf) { + // No default pool available. + return new AsyncProcess(this, conf, this.batchPool, + RpcRetryingCallerFactory.instantiate(conf, this.getStatisticsTracker()), false, + RpcControllerFactory.instantiate(conf)); + } + + @Override + public AsyncProcess getAsyncProcess() { + return asyncProcess; + } + + @Override + public ServerStatisticTracker getStatisticsTracker() { + return this.stats; + } + + @Override + public ClientBackoffPolicy getBackoffPolicy() { + return this.backoffPolicy; + } + + /* + * Return the number of cached region for a table. It will only be called + * from a unit test. + */ + @VisibleForTesting + int getNumberOfCachedRegionLocations(final TableName tableName) { + return metaCache.getNumberOfCachedRegionLocations(tableName); + } + + /** + * @deprecated always return false since 0.99 + */ + @Override + @Deprecated + public void setRegionCachePrefetch(final TableName tableName, final boolean enable) { + } + + /** + * @deprecated always return false since 0.99 + */ + @Override + @Deprecated + public void setRegionCachePrefetch(final byte[] tableName, + final boolean enable) { + } + + /** + * @deprecated always return false since 0.99 + */ + @Override + @Deprecated + public boolean getRegionCachePrefetch(TableName tableName) { + return false; + } + + /** + * @deprecated always return false since 0.99 + */ + @Override + @Deprecated + public boolean getRegionCachePrefetch(byte[] tableName) { + return false; + } + + @Override + public void abort(final String msg, Throwable t) { + if (t instanceof KeeperException.SessionExpiredException + && keepAliveZookeeper != null) { + synchronized (masterAndZKLock) { + if (keepAliveZookeeper != null) { + LOG.warn("This client just lost it's session with ZooKeeper," + + " closing it." + + " It will be recreated next time someone needs it", t); + closeZooKeeperWatcher(); + } + } + } else { + if (t != null) { + LOG.fatal(msg, t); + } else { + LOG.fatal(msg); + } + this.aborted = true; + close(); + this.closed = true; + } + } + + @Override + public boolean isClosed() { + return this.closed; + } + + @Override + public boolean isAborted(){ + return this.aborted; + } + + @Override + public int getCurrentNrHRS() throws IOException { + return this.registry.getCurrentNrHRS(); + } + + /** + * Increment this client's reference count. + */ + void incCount() { + ++refCount; + } + + /** + * Decrement this client's reference count. + */ + void decCount() { + if (refCount > 0) { + --refCount; + } + } + + @Override + public void close() { + if (this.closed) { + return; + } + closeMaster(); + shutdownPools(); + this.closed = true; + closeZooKeeperWatcher(); + this.stubs.clear(); + if (clusterStatusListener != null) { + clusterStatusListener.close(); + } + if (rpcClient != null) { + rpcClient.close(); + } + } + + /** + * Close the connection for good, regardless of what the current value of + * {@link #refCount} is. Ideally, {@link #refCount} should be zero at this + * point, which would be the case if all of its consumers close the + * connection. However, on the off chance that someone is unable to close + * the connection, perhaps because it bailed out prematurely, the method + * below will ensure that this {@link org.apache.hadoop.hbase.client.HConnection} instance is cleaned up. + * Caveat: The JVM may take an unknown amount of time to call finalize on an + * unreachable object, so our hope is that every consumer cleans up after + * itself, like any good citizen. + */ + @Override + protected void finalize() throws Throwable { + super.finalize(); + // Pretend as if we are about to release the last remaining reference + refCount = 1; + close(); + } + + /** + * @deprecated Use {@link org.apache.hadoop.hbase.client.Admin#listTables()} instead + */ + @Deprecated + @Override + public HTableDescriptor[] listTables() throws IOException { + MasterKeepAliveConnection master = getKeepAliveMasterService(); + try { + MasterProtos.GetTableDescriptorsRequest req = + RequestConverter.buildGetTableDescriptorsRequest((List)null); + return ProtobufUtil.getHTableDescriptorArray(master.getTableDescriptors(null, req)); + } catch (ServiceException se) { + throw ProtobufUtil.getRemoteException(se); + } finally { + master.close(); + } + } + + /** + * @deprecated Use {@link org.apache.hadoop.hbase.client.Admin#listTableNames()} instead + */ + @Deprecated + @Override + public String[] getTableNames() throws IOException { + TableName[] tableNames = listTableNames(); + String[] result = new String[tableNames.length]; + for (int i = 0; i < tableNames.length; i++) { + result[i] = tableNames[i].getNameAsString(); + } + return result; + } + + /** + * @deprecated Use {@link org.apache.hadoop.hbase.client.Admin#listTableNames()} instead + */ + @Deprecated + @Override + public TableName[] listTableNames() throws IOException { + MasterKeepAliveConnection master = getKeepAliveMasterService(); + try { + return ProtobufUtil.getTableNameArray(master.getTableNames(null, + MasterProtos.GetTableNamesRequest.newBuilder().build()) + .getTableNamesList()); + } catch (ServiceException se) { + throw ProtobufUtil.getRemoteException(se); + } finally { + master.close(); + } + } + + /** + * @deprecated Use {@link org.apache.hadoop.hbase.client.Admin#getTableDescriptorsByTableName(java.util.List)} instead + */ + @Deprecated + @Override + public HTableDescriptor[] getHTableDescriptorsByTableName( + List tableNames) throws IOException { + if (tableNames == null || tableNames.isEmpty()) return new HTableDescriptor[0]; + MasterKeepAliveConnection master = getKeepAliveMasterService(); + try { + MasterProtos.GetTableDescriptorsRequest req = + RequestConverter.buildGetTableDescriptorsRequest(tableNames); + return ProtobufUtil.getHTableDescriptorArray(master.getTableDescriptors(null, req)); + } catch (ServiceException se) { + throw ProtobufUtil.getRemoteException(se); + } finally { + master.close(); + } + } + + /** + * @deprecated Use {@link org.apache.hadoop.hbase.client.Admin#getTableDescriptorsByTableName(java.util.List)} instead + */ + @Deprecated + @Override + public HTableDescriptor[] getHTableDescriptors( + List names) throws IOException { + List tableNames = new ArrayList(names.size()); + for(String name : names) { + tableNames.add(TableName.valueOf(name)); + } + + return getHTableDescriptorsByTableName(tableNames); + } + + @Override + public NonceGenerator getNonceGenerator() { + return nonceGenerator; + } + + /** + * Connects to the master to get the table descriptor. + * @param tableName table name + * @throws java.io.IOException if the connection to master fails or if the table + * is not found. + * @deprecated Use {@link org.apache.hadoop.hbase.client.Admin#getTableDescriptor(org.apache.hadoop.hbase.TableName)} instead + */ + @Deprecated + @Override + public HTableDescriptor getHTableDescriptor(final TableName tableName) + throws IOException { + if (tableName == null) return null; + MasterKeepAliveConnection master = getKeepAliveMasterService(); + MasterProtos.GetTableDescriptorsResponse htds; + try { + MasterProtos.GetTableDescriptorsRequest req = + RequestConverter.buildGetTableDescriptorsRequest(tableName); + htds = master.getTableDescriptors(null, req); + } catch (ServiceException se) { + throw ProtobufUtil.getRemoteException(se); + } finally { + master.close(); + } + if (!htds.getTableSchemaList().isEmpty()) { + return HTableDescriptor.convert(htds.getTableSchemaList().get(0)); + } + throw new TableNotFoundException(tableName.getNameAsString()); + } + + /** + * @deprecated Use {@link org.apache.hadoop.hbase.client.Admin#getTableDescriptor(org.apache.hadoop.hbase.TableName)} instead + */ + @Deprecated + @Override + public HTableDescriptor getHTableDescriptor(final byte[] tableName) + throws IOException { + return getHTableDescriptor(TableName.valueOf(tableName)); + } + + @Override + public TableState getTableState(TableName tableName) throws IOException { + if (this.closed) throw new IOException(toString() + " closed"); + + TableState tableState = MetaTableAccessor.getTableState(this, tableName); + if (tableState == null) + throw new TableNotFoundException(tableName); + return tableState; + } + + @Override + public RpcRetryingCallerFactory getNewRpcRetryingCallerFactory(Configuration conf) { + return RpcRetryingCallerFactory + .instantiate(conf, this.interceptor, this.getStatisticsTracker()); + } +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java deleted file mode 100644 index e986156019a..00000000000 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java +++ /dev/null @@ -1,2644 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.client; - -import javax.annotation.Nullable; -import java.io.Closeable; -import java.io.IOException; -import java.io.InterruptedIOException; -import java.lang.reflect.UndeclaredThrowableException; -import java.util.ArrayList; -import java.util.Date; -import java.util.HashSet; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.NavigableMap; -import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; - -import com.google.common.annotations.VisibleForTesting; -import com.google.protobuf.BlockingRpcChannel; -import com.google.protobuf.RpcController; -import com.google.protobuf.ServiceException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.DoNotRetryIOException; -import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HRegionLocation; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.MasterNotRunningException; -import org.apache.hadoop.hbase.MetaTableAccessor; -import org.apache.hadoop.hbase.RegionLocations; -import org.apache.hadoop.hbase.RegionTooBusyException; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.TableNotEnabledException; -import org.apache.hadoop.hbase.TableNotFoundException; -import org.apache.hadoop.hbase.ZooKeeperConnectionException; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.client.AsyncProcess.AsyncRequestFuture; -import org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy; -import org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicyFactory; -import org.apache.hadoop.hbase.client.coprocessor.Batch; -import org.apache.hadoop.hbase.exceptions.RegionMovedException; -import org.apache.hadoop.hbase.exceptions.RegionOpeningException; -import org.apache.hadoop.hbase.ipc.RpcClient; -import org.apache.hadoop.hbase.ipc.RpcClientFactory; -import org.apache.hadoop.hbase.ipc.RpcControllerFactory; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.RequestConverter; -import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest; -import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteColumnRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteColumnResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.OfflineRegionRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.OfflineRegionResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.TruncateTableRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.TruncateTableResponse; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionResponse; -import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException; -import org.apache.hadoop.hbase.security.User; -import org.apache.hadoop.hbase.security.UserProvider; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.apache.hadoop.hbase.util.ExceptionUtil; -import org.apache.hadoop.hbase.util.Pair; -import org.apache.hadoop.hbase.util.Threads; -import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; -import org.apache.hadoop.ipc.RemoteException; -import org.apache.zookeeper.KeeperException; - -/** - * An internal, non-instantiable class that manages creation of {@link HConnection}s. - */ -@SuppressWarnings("serial") -@InterfaceAudience.Private -// NOTE: DO NOT make this class public. It was made package-private on purpose. -final class ConnectionManager { - static final Log LOG = LogFactory.getLog(ConnectionManager.class); - - public static final String RETRIES_BY_SERVER_KEY = "hbase.client.retries.by.server"; - private static final String CLIENT_NONCES_ENABLED_KEY = "hbase.client.nonces.enabled"; - - // An LRU Map of HConnectionKey -> HConnection (TableServer). All - // access must be synchronized. This map is not private because tests - // need to be able to tinker with it. - static final Map CONNECTION_INSTANCES; - - public static final int MAX_CACHED_CONNECTION_INSTANCES; - - /** - * Global nonceGenerator shared per client.Currently there's no reason to limit its scope. - * Once it's set under nonceGeneratorCreateLock, it is never unset or changed. - */ - private static volatile NonceGenerator nonceGenerator = null; - /** The nonce generator lock. Only taken when creating HConnection, which gets a private copy. */ - private static Object nonceGeneratorCreateLock = new Object(); - - static { - // We set instances to one more than the value specified for {@link - // HConstants#ZOOKEEPER_MAX_CLIENT_CNXNS}. By default, the zk default max - // connections to the ensemble from the one client is 30, so in that case we - // should run into zk issues before the LRU hit this value of 31. - MAX_CACHED_CONNECTION_INSTANCES = HBaseConfiguration.create().getInt( - HConstants.ZOOKEEPER_MAX_CLIENT_CNXNS, HConstants.DEFAULT_ZOOKEPER_MAX_CLIENT_CNXNS) + 1; - CONNECTION_INSTANCES = new LinkedHashMap( - (int) (MAX_CACHED_CONNECTION_INSTANCES / 0.75F) + 1, 0.75F, true) { - @Override - protected boolean removeEldestEntry( - Map.Entry eldest) { - return size() > MAX_CACHED_CONNECTION_INSTANCES; - } - }; - } - - /** Dummy nonce generator for disabled nonces. */ - static class NoNonceGenerator implements NonceGenerator { - @Override - public long getNonceGroup() { - return HConstants.NO_NONCE; - } - @Override - public long newNonce() { - return HConstants.NO_NONCE; - } - } - - /* - * Non-instantiable. - */ - private ConnectionManager() { - super(); - } - - /** - * @param conn The connection for which to replace the generator. - * @param cnm Replaces the nonce generator used, for testing. - * @return old nonce generator. - */ - @VisibleForTesting - static NonceGenerator injectNonceGeneratorForTesting( - ClusterConnection conn, NonceGenerator cnm) { - HConnectionImplementation connImpl = (HConnectionImplementation)conn; - NonceGenerator ng = connImpl.getNonceGenerator(); - LOG.warn("Nonce generator is being replaced by test code for " + cnm.getClass().getName()); - connImpl.nonceGenerator = cnm; - return ng; - } - - /** - * Get the connection that goes with the passed conf configuration instance. - * If no current connection exists, method creates a new connection and keys it using - * connection-specific properties from the passed {@link Configuration}; see - * {@link HConnectionKey}. - * @param conf configuration - * @return HConnection object for conf - * @throws ZooKeeperConnectionException - * @deprecated connection caching is going away. - */ - @Deprecated - public static HConnection getConnection(final Configuration conf) throws IOException { - return getConnectionInternal(conf); - } - - - static ClusterConnection getConnectionInternal(final Configuration conf) - throws IOException { - HConnectionKey connectionKey = new HConnectionKey(conf); - synchronized (CONNECTION_INSTANCES) { - HConnectionImplementation connection = CONNECTION_INSTANCES.get(connectionKey); - if (connection == null) { - connection = (HConnectionImplementation)createConnection(conf, true); - CONNECTION_INSTANCES.put(connectionKey, connection); - } else if (connection.isClosed()) { - ConnectionManager.deleteConnection(connectionKey, true); - connection = (HConnectionImplementation)createConnection(conf, true); - CONNECTION_INSTANCES.put(connectionKey, connection); - } - connection.incCount(); - return connection; - } - } - - /** - * Create a new HConnection instance using the passed conf instance. - *

Note: This bypasses the usual HConnection life cycle management done by - * {@link #getConnection(Configuration)}. The caller is responsible for - * calling {@link HConnection#close()} on the returned connection instance. - * - * This is the recommended way to create HConnections. - * {@code - * HConnection connection = ConnectionManagerInternal.createConnection(conf); - * HTableInterface table = connection.getTable("mytable"); - * table.get(...); - * ... - * table.close(); - * connection.close(); - * } - * - * @param conf configuration - * @return HConnection object for conf - * @throws ZooKeeperConnectionException - */ - public static HConnection createConnection(Configuration conf) throws IOException { - return createConnectionInternal(conf); - } - - static ClusterConnection createConnectionInternal(Configuration conf) throws IOException { - UserProvider provider = UserProvider.instantiate(conf); - return createConnection(conf, false, null, provider.getCurrent()); - } - - /** - * Create a new HConnection instance using the passed conf instance. - *

Note: This bypasses the usual HConnection life cycle management done by - * {@link #getConnection(Configuration)}. The caller is responsible for - * calling {@link HConnection#close()} on the returned connection instance. - * This is the recommended way to create HConnections. - * {@code - * ExecutorService pool = ...; - * HConnection connection = HConnectionManager.createConnection(conf, pool); - * HTableInterface table = connection.getTable("mytable"); - * table.get(...); - * ... - * table.close(); - * connection.close(); - * } - * @param conf configuration - * @param pool the thread pool to use for batch operation in HTables used via this HConnection - * @return HConnection object for conf - * @throws ZooKeeperConnectionException - */ - public static HConnection createConnection(Configuration conf, ExecutorService pool) - throws IOException { - UserProvider provider = UserProvider.instantiate(conf); - return createConnection(conf, false, pool, provider.getCurrent()); - } - - /** - * Create a new HConnection instance using the passed conf instance. - *

Note: This bypasses the usual HConnection life cycle management done by - * {@link #getConnection(Configuration)}. The caller is responsible for - * calling {@link HConnection#close()} on the returned connection instance. - * This is the recommended way to create HConnections. - * {@code - * ExecutorService pool = ...; - * HConnection connection = HConnectionManager.createConnection(conf, pool); - * HTableInterface table = connection.getTable("mytable"); - * table.get(...); - * ... - * table.close(); - * connection.close(); - * } - * @param conf configuration - * @param user the user the connection is for - * @return HConnection object for conf - * @throws ZooKeeperConnectionException - */ - public static HConnection createConnection(Configuration conf, User user) - throws IOException { - return createConnection(conf, false, null, user); - } - - /** - * Create a new HConnection instance using the passed conf instance. - *

Note: This bypasses the usual HConnection life cycle management done by - * {@link #getConnection(Configuration)}. The caller is responsible for - * calling {@link HConnection#close()} on the returned connection instance. - * This is the recommended way to create HConnections. - * {@code - * ExecutorService pool = ...; - * HConnection connection = HConnectionManager.createConnection(conf, pool); - * HTableInterface table = connection.getTable("mytable"); - * table.get(...); - * ... - * table.close(); - * connection.close(); - * } - * @param conf configuration - * @param pool the thread pool to use for batch operation in HTables used via this HConnection - * @param user the user the connection is for - * @return HConnection object for conf - * @throws ZooKeeperConnectionException - */ - public static HConnection createConnection(Configuration conf, ExecutorService pool, User user) - throws IOException { - return createConnection(conf, false, pool, user); - } - - /** - * @deprecated instead use one of the {@link ConnectionFactory#createConnection()} methods. - */ - @Deprecated - static HConnection createConnection(final Configuration conf, final boolean managed) - throws IOException { - UserProvider provider = UserProvider.instantiate(conf); - return createConnection(conf, managed, null, provider.getCurrent()); - } - - /** - * @deprecated instead use one of the {@link ConnectionFactory#createConnection()} methods. - */ - @Deprecated - static ClusterConnection createConnection(final Configuration conf, final boolean managed, - final ExecutorService pool, final User user) - throws IOException { - return (ClusterConnection) ConnectionFactory.createConnection(conf, managed, pool, user); - } - - /** - * Delete connection information for the instance specified by passed configuration. - * If there are no more references to the designated connection connection, this method will - * then close connection to the zookeeper ensemble and let go of all associated resources. - * - * @param conf configuration whose identity is used to find {@link HConnection} instance. - * @deprecated connection caching is going away. - */ - @Deprecated - public static void deleteConnection(Configuration conf) { - deleteConnection(new HConnectionKey(conf), false); - } - - /** - * Cleanup a known stale connection. - * This will then close connection to the zookeeper ensemble and let go of all resources. - * - * @param connection - * @deprecated connection caching is going away. - */ - @Deprecated - public static void deleteStaleConnection(HConnection connection) { - deleteConnection(connection, true); - } - - /** - * Delete information for all connections. Close or not the connection, depending on the - * staleConnection boolean and the ref count. By default, you should use it with - * staleConnection to true. - * @deprecated connection caching is going away. - */ - @Deprecated - public static void deleteAllConnections(boolean staleConnection) { - synchronized (CONNECTION_INSTANCES) { - Set connectionKeys = new HashSet(); - connectionKeys.addAll(CONNECTION_INSTANCES.keySet()); - for (HConnectionKey connectionKey : connectionKeys) { - deleteConnection(connectionKey, staleConnection); - } - CONNECTION_INSTANCES.clear(); - } - } - - /** - * Delete information for all connections.. - * @deprecated kept for backward compatibility, but the behavior is broken. HBASE-8983 - */ - @Deprecated - public static void deleteAllConnections() { - deleteAllConnections(false); - } - - /** - * @deprecated connection caching is going away. - */ - @Deprecated - private static void deleteConnection(HConnection connection, boolean staleConnection) { - synchronized (CONNECTION_INSTANCES) { - for (Entry e: CONNECTION_INSTANCES.entrySet()) { - if (e.getValue() == connection) { - deleteConnection(e.getKey(), staleConnection); - break; - } - } - } - } - - /** - * @deprecated connection caching is going away. -˙ */ - @Deprecated - private static void deleteConnection(HConnectionKey connectionKey, boolean staleConnection) { - synchronized (CONNECTION_INSTANCES) { - HConnectionImplementation connection = CONNECTION_INSTANCES.get(connectionKey); - if (connection != null) { - connection.decCount(); - if (connection.isZeroReference() || staleConnection) { - CONNECTION_INSTANCES.remove(connectionKey); - connection.internalClose(); - } - } else { - LOG.error("Connection not found in the list, can't delete it "+ - "(connection key=" + connectionKey + "). May be the key was modified?", new Exception()); - } - } - } - - - /** - * This convenience method invokes the given {@link HConnectable#connect} - * implementation using a {@link HConnection} instance that lasts just for the - * duration of the invocation. - * - * @param the return type of the connect method - * @param connectable the {@link HConnectable} instance - * @return the value returned by the connect method - * @throws IOException - */ - @InterfaceAudience.Private - public static T execute(HConnectable connectable) throws IOException { - if (connectable == null || connectable.conf == null) { - return null; - } - Configuration conf = connectable.conf; - HConnection connection = getConnection(conf); - boolean connectSucceeded = false; - try { - T returnValue = connectable.connect(connection); - connectSucceeded = true; - return returnValue; - } finally { - try { - connection.close(); - } catch (Exception e) { - ExceptionUtil.rethrowIfInterrupt(e); - if (connectSucceeded) { - throw new IOException("The connection to " + connection - + " could not be deleted.", e); - } - } - } - } - - /** Encapsulates connection to zookeeper and regionservers.*/ - @edu.umd.cs.findbugs.annotations.SuppressWarnings( - value="AT_OPERATION_SEQUENCE_ON_CONCURRENT_ABSTRACTION", - justification="Access to the conncurrent hash map is under a lock so should be fine.") - static class HConnectionImplementation implements ClusterConnection, Closeable { - static final Log LOG = LogFactory.getLog(HConnectionImplementation.class); - private final long pause; - private final boolean useMetaReplicas; - private final int numTries; - final int rpcTimeout; - private NonceGenerator nonceGenerator = null; - private final AsyncProcess asyncProcess; - // single tracker per connection - private final ServerStatisticTracker stats; - - private volatile boolean closed; - private volatile boolean aborted; - - // package protected for the tests - ClusterStatusListener clusterStatusListener; - - - private final Object metaRegionLock = new Object(); - - // We have a single lock for master & zk to prevent deadlocks. Having - // one lock for ZK and one lock for master is not possible: - // When creating a connection to master, we need a connection to ZK to get - // its address. But another thread could have taken the ZK lock, and could - // be waiting for the master lock => deadlock. - private final Object masterAndZKLock = new Object(); - - private long keepZooKeeperWatcherAliveUntil = Long.MAX_VALUE; - - // thread executor shared by all HTableInterface instances created - // by this connection - private volatile ExecutorService batchPool = null; - private volatile boolean cleanupPool = false; - - private final Configuration conf; - - // cache the configuration value for tables so that we can avoid calling - // the expensive Configuration to fetch the value multiple times. - private final TableConfiguration tableConfig; - - // Client rpc instance. - private RpcClient rpcClient; - - private MetaCache metaCache = new MetaCache(); - - private int refCount; - - // indicates whether this connection's life cycle is managed (by us) - private boolean managed; - - private User user; - - private RpcRetryingCallerFactory rpcCallerFactory; - - private RpcControllerFactory rpcControllerFactory; - - private final RetryingCallerInterceptor interceptor; - - /** - * Cluster registry of basic info such as clusterid and meta region location. - */ - Registry registry; - - private final ClientBackoffPolicy backoffPolicy; - - HConnectionImplementation(Configuration conf, boolean managed) throws IOException { - this(conf, managed, null, null); - } - - /** - * constructor - * @param conf Configuration object - * @param managed If true, does not do full shutdown on close; i.e. cleanup of connection - * to zk and shutdown of all services; we just close down the resources this connection was - * responsible for and decrement usage counters. It is up to the caller to do the full - * cleanup. It is set when we want have connection sharing going on -- reuse of zk connection, - * and cached region locations, established regionserver connections, etc. When connections - * are shared, we have reference counting going on and will only do full cleanup when no more - * users of an HConnectionImplementation instance. - */ - HConnectionImplementation(Configuration conf, boolean managed, - ExecutorService pool, User user) throws IOException { - this(conf); - this.user = user; - this.batchPool = pool; - this.managed = managed; - this.registry = setupRegistry(); - retrieveClusterId(); - - this.rpcClient = RpcClientFactory.createClient(this.conf, this.clusterId); - this.rpcControllerFactory = RpcControllerFactory.instantiate(conf); - - // Do we publish the status? - boolean shouldListen = conf.getBoolean(HConstants.STATUS_PUBLISHED, - HConstants.STATUS_PUBLISHED_DEFAULT); - Class listenerClass = - conf.getClass(ClusterStatusListener.STATUS_LISTENER_CLASS, - ClusterStatusListener.DEFAULT_STATUS_LISTENER_CLASS, - ClusterStatusListener.Listener.class); - if (shouldListen) { - if (listenerClass == null) { - LOG.warn(HConstants.STATUS_PUBLISHED + " is true, but " + - ClusterStatusListener.STATUS_LISTENER_CLASS + " is not set - not listening status"); - } else { - clusterStatusListener = new ClusterStatusListener( - new ClusterStatusListener.DeadServerHandler() { - @Override - public void newDead(ServerName sn) { - clearCaches(sn); - rpcClient.cancelConnections(sn); - } - }, conf, listenerClass); - } - } - } - - /** - * For tests. - */ - protected HConnectionImplementation(Configuration conf) { - this.conf = conf; - this.tableConfig = new TableConfiguration(conf); - this.closed = false; - this.pause = conf.getLong(HConstants.HBASE_CLIENT_PAUSE, - HConstants.DEFAULT_HBASE_CLIENT_PAUSE); - this.useMetaReplicas = conf.getBoolean(HConstants.USE_META_REPLICAS, - HConstants.DEFAULT_USE_META_REPLICAS); - this.numTries = tableConfig.getRetriesNumber(); - this.rpcTimeout = conf.getInt( - HConstants.HBASE_RPC_TIMEOUT_KEY, - HConstants.DEFAULT_HBASE_RPC_TIMEOUT); - if (conf.getBoolean(CLIENT_NONCES_ENABLED_KEY, true)) { - synchronized (nonceGeneratorCreateLock) { - if (ConnectionManager.nonceGenerator == null) { - ConnectionManager.nonceGenerator = new PerClientRandomNonceGenerator(); - } - this.nonceGenerator = ConnectionManager.nonceGenerator; - } - } else { - this.nonceGenerator = new NoNonceGenerator(); - } - stats = ServerStatisticTracker.create(conf); - this.asyncProcess = createAsyncProcess(this.conf); - this.interceptor = (new RetryingCallerInterceptorFactory(conf)).build(); - this.rpcCallerFactory = RpcRetryingCallerFactory.instantiate(conf, interceptor, this.stats); - this.backoffPolicy = ClientBackoffPolicyFactory.create(conf); - } - - @Override - public HTableInterface getTable(String tableName) throws IOException { - return getTable(TableName.valueOf(tableName)); - } - - @Override - public HTableInterface getTable(byte[] tableName) throws IOException { - return getTable(TableName.valueOf(tableName)); - } - - @Override - public HTableInterface getTable(TableName tableName) throws IOException { - return getTable(tableName, getBatchPool()); - } - - @Override - public HTableInterface getTable(String tableName, ExecutorService pool) throws IOException { - return getTable(TableName.valueOf(tableName), pool); - } - - @Override - public HTableInterface getTable(byte[] tableName, ExecutorService pool) throws IOException { - return getTable(TableName.valueOf(tableName), pool); - } - - @Override - public HTableInterface getTable(TableName tableName, ExecutorService pool) throws IOException { - if (managed) { - throw new IOException("The connection has to be unmanaged."); - } - return new HTable(tableName, this, tableConfig, rpcCallerFactory, rpcControllerFactory, pool); - } - - @Override - public BufferedMutator getBufferedMutator(BufferedMutatorParams params) { - if (params.getTableName() == null) { - throw new IllegalArgumentException("TableName cannot be null."); - } - if (params.getPool() == null) { - params.pool(HTable.getDefaultExecutor(getConfiguration())); - } - if (params.getWriteBufferSize() == BufferedMutatorParams.UNSET) { - params.writeBufferSize(tableConfig.getWriteBufferSize()); - } - if (params.getMaxKeyValueSize() == BufferedMutatorParams.UNSET) { - params.maxKeyValueSize(tableConfig.getMaxKeyValueSize()); - } - return new BufferedMutatorImpl(this, rpcCallerFactory, rpcControllerFactory, params); - } - - @Override - public BufferedMutator getBufferedMutator(TableName tableName) { - return getBufferedMutator(new BufferedMutatorParams(tableName)); - } - - @Override - public RegionLocator getRegionLocator(TableName tableName) throws IOException { - return new HRegionLocator(tableName, this); - } - - @Override - public Admin getAdmin() throws IOException { - if (managed) { - throw new IOException("The connection has to be unmanaged."); - } - return new HBaseAdmin(this); - } - - private ExecutorService getBatchPool() { - if (batchPool == null) { - // shared HTable thread executor not yet initialized - synchronized (this) { - if (batchPool == null) { - int maxThreads = conf.getInt("hbase.hconnection.threads.max", 256); - int coreThreads = conf.getInt("hbase.hconnection.threads.core", 256); - if (maxThreads == 0) { - maxThreads = Runtime.getRuntime().availableProcessors() * 8; - } - if (coreThreads == 0) { - coreThreads = Runtime.getRuntime().availableProcessors() * 8; - } - long keepAliveTime = conf.getLong("hbase.hconnection.threads.keepalivetime", 60); - LinkedBlockingQueue workQueue = - new LinkedBlockingQueue(maxThreads * - conf.getInt(HConstants.HBASE_CLIENT_MAX_TOTAL_TASKS, - HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS)); - ThreadPoolExecutor tpe = new ThreadPoolExecutor( - coreThreads, - maxThreads, - keepAliveTime, - TimeUnit.SECONDS, - workQueue, - Threads.newDaemonThreadFactory(toString() + "-shared-")); - tpe.allowCoreThreadTimeOut(true); - this.batchPool = tpe; - } - this.cleanupPool = true; - } - } - return this.batchPool; - } - - protected ExecutorService getCurrentBatchPool() { - return batchPool; - } - - private void shutdownBatchPool() { - if (this.cleanupPool && this.batchPool != null && !this.batchPool.isShutdown()) { - this.batchPool.shutdown(); - try { - if (!this.batchPool.awaitTermination(10, TimeUnit.SECONDS)) { - this.batchPool.shutdownNow(); - } - } catch (InterruptedException e) { - this.batchPool.shutdownNow(); - } - } - } - - /** - * @return The cluster registry implementation to use. - * @throws IOException - */ - private Registry setupRegistry() throws IOException { - return RegistryFactory.getRegistry(this); - } - - /** - * For tests only. - */ - @VisibleForTesting - RpcClient getRpcClient() { - return rpcClient; - } - - /** - * An identifier that will remain the same for a given connection. - */ - @Override - public String toString(){ - return "hconnection-0x" + Integer.toHexString(hashCode()); - } - - protected String clusterId = null; - - void retrieveClusterId() { - if (clusterId != null) return; - this.clusterId = this.registry.getClusterId(); - if (clusterId == null) { - clusterId = HConstants.CLUSTER_ID_DEFAULT; - LOG.debug("clusterid came back null, using default " + clusterId); - } - } - - @Override - public Configuration getConfiguration() { - return this.conf; - } - - private void checkIfBaseNodeAvailable(ZooKeeperWatcher zkw) - throws MasterNotRunningException { - String errorMsg; - try { - if (ZKUtil.checkExists(zkw, zkw.baseZNode) == -1) { - errorMsg = "The node " + zkw.baseZNode+" is not in ZooKeeper. " - + "It should have been written by the master. " - + "Check the value configured in 'zookeeper.znode.parent'. " - + "There could be a mismatch with the one configured in the master."; - LOG.error(errorMsg); - throw new MasterNotRunningException(errorMsg); - } - } catch (KeeperException e) { - errorMsg = "Can't get connection to ZooKeeper: " + e.getMessage(); - LOG.error(errorMsg); - throw new MasterNotRunningException(errorMsg, e); - } - } - - /** - * @return true if the master is running, throws an exception otherwise - * @throws MasterNotRunningException - if the master is not running - * @throws ZooKeeperConnectionException - * @deprecated this has been deprecated without a replacement - */ - @Deprecated - @Override - public boolean isMasterRunning() - throws MasterNotRunningException, ZooKeeperConnectionException { - // When getting the master connection, we check it's running, - // so if there is no exception, it means we've been able to get a - // connection on a running master - MasterKeepAliveConnection m = getKeepAliveMasterService(); - m.close(); - return true; - } - - @Override - public HRegionLocation getRegionLocation(final TableName tableName, - final byte [] row, boolean reload) - throws IOException { - return reload? relocateRegion(tableName, row): locateRegion(tableName, row); - } - - @Override - public HRegionLocation getRegionLocation(final byte[] tableName, - final byte [] row, boolean reload) - throws IOException { - return getRegionLocation(TableName.valueOf(tableName), row, reload); - } - - @Override - public boolean isTableEnabled(TableName tableName) throws IOException { - return getTableState(tableName).inStates(TableState.State.ENABLED); - } - - @Override - public boolean isTableEnabled(byte[] tableName) throws IOException { - return isTableEnabled(TableName.valueOf(tableName)); - } - - @Override - public boolean isTableDisabled(TableName tableName) throws IOException { - return getTableState(tableName).inStates(TableState.State.DISABLED); - } - - @Override - public boolean isTableDisabled(byte[] tableName) throws IOException { - return isTableDisabled(TableName.valueOf(tableName)); - } - - @Override - public boolean isTableAvailable(final TableName tableName) throws IOException { - return isTableAvailable(tableName, null); - } - - @Override - public boolean isTableAvailable(final byte[] tableName) throws IOException { - return isTableAvailable(TableName.valueOf(tableName)); - } - - @Override - public boolean isTableAvailable(final TableName tableName, @Nullable final byte[][] splitKeys) - throws IOException { - try { - if (!isTableEnabled(tableName)) { - LOG.debug("Table " + tableName + " not enabled"); - return false; - } - ClusterConnection connection = getConnectionInternal(getConfiguration()); - List> locations = MetaTableAccessor - .getTableRegionsAndLocations(connection, tableName, true); - int notDeployed = 0; - int regionCount = 0; - for (Pair pair : locations) { - HRegionInfo info = pair.getFirst(); - if (pair.getSecond() == null) { - if (LOG.isDebugEnabled()) { - LOG.debug("Table " + tableName + " has not deployed region " + pair.getFirst() - .getEncodedName()); - } - notDeployed++; - } else if (splitKeys != null - && !Bytes.equals(info.getStartKey(), HConstants.EMPTY_BYTE_ARRAY)) { - for (byte[] splitKey : splitKeys) { - // Just check if the splitkey is available - if (Bytes.equals(info.getStartKey(), splitKey)) { - regionCount++; - break; - } - } - } else { - // Always empty start row should be counted - regionCount++; - } - } - if (notDeployed > 0) { - if (LOG.isDebugEnabled()) { - LOG.debug("Table " + tableName + " has " + notDeployed + " regions"); - } - return false; - } else if (splitKeys != null && regionCount != splitKeys.length + 1) { - if (LOG.isDebugEnabled()) { - LOG.debug("Table " + tableName + " expected to have " + (splitKeys.length + 1) - + " regions, but only " + regionCount + " available"); - } - return false; - } else { - if (LOG.isDebugEnabled()) { - LOG.debug("Table " + tableName + " should be available"); - } - return true; - } - } catch (TableNotFoundException tnfe) { - LOG.warn("Table " + tableName + " not enabled, it is not exists"); - return false; - } - } - - @Override - public boolean isTableAvailable(final byte[] tableName, final byte[][] splitKeys) - throws IOException { - return isTableAvailable(TableName.valueOf(tableName), splitKeys); - } - - @Override - public HRegionLocation locateRegion(final byte[] regionName) throws IOException { - RegionLocations locations = locateRegion(HRegionInfo.getTable(regionName), - HRegionInfo.getStartKey(regionName), false, true); - return locations == null ? null : locations.getRegionLocation(); - } - - @Override - public boolean isDeadServer(ServerName sn) { - if (clusterStatusListener == null) { - return false; - } else { - return clusterStatusListener.isDeadServer(sn); - } - } - - @Override - public List locateRegions(final TableName tableName) - throws IOException { - return locateRegions(tableName, false, true); - } - - @Override - public List locateRegions(final byte[] tableName) - throws IOException { - return locateRegions(TableName.valueOf(tableName)); - } - - @Override - public List locateRegions(final TableName tableName, - final boolean useCache, final boolean offlined) throws IOException { - NavigableMap regions = MetaScanner.allTableRegions(this, tableName); - final List locations = new ArrayList(); - for (HRegionInfo regionInfo : regions.keySet()) { - RegionLocations list = locateRegion(tableName, regionInfo.getStartKey(), useCache, true); - if (list != null) { - for (HRegionLocation loc : list.getRegionLocations()) { - if (loc != null) { - locations.add(loc); - } - } - } - } - return locations; - } - - @Override - public List locateRegions(final byte[] tableName, - final boolean useCache, final boolean offlined) throws IOException { - return locateRegions(TableName.valueOf(tableName), useCache, offlined); - } - - @Override - public HRegionLocation locateRegion( - final TableName tableName, final byte[] row) throws IOException{ - RegionLocations locations = locateRegion(tableName, row, true, true); - return locations == null ? null : locations.getRegionLocation(); - } - - @Override - public HRegionLocation locateRegion(final byte[] tableName, - final byte [] row) - throws IOException{ - return locateRegion(TableName.valueOf(tableName), row); - } - - @Override - public HRegionLocation relocateRegion(final TableName tableName, - final byte [] row) throws IOException{ - RegionLocations locations = relocateRegion(tableName, row, - RegionReplicaUtil.DEFAULT_REPLICA_ID); - return locations == null ? null : - locations.getRegionLocation(RegionReplicaUtil.DEFAULT_REPLICA_ID); - } - - @Override - public RegionLocations relocateRegion(final TableName tableName, - final byte [] row, int replicaId) throws IOException{ - // Since this is an explicit request not to use any caching, finding - // disabled tables should not be desirable. This will ensure that an exception is thrown when - // the first time a disabled table is interacted with. - if (!tableName.equals(TableName.META_TABLE_NAME) && isTableDisabled(tableName)) { - throw new TableNotEnabledException(tableName.getNameAsString() + " is disabled."); - } - - return locateRegion(tableName, row, false, true, replicaId); - } - - @Override - public HRegionLocation relocateRegion(final byte[] tableName, - final byte [] row) throws IOException { - return relocateRegion(TableName.valueOf(tableName), row); - } - - @Override - public RegionLocations locateRegion(final TableName tableName, - final byte [] row, boolean useCache, boolean retry) - throws IOException { - return locateRegion(tableName, row, useCache, retry, RegionReplicaUtil.DEFAULT_REPLICA_ID); - } - - @Override - public RegionLocations locateRegion(final TableName tableName, - final byte [] row, boolean useCache, boolean retry, int replicaId) - throws IOException { - if (this.closed) throw new IOException(toString() + " closed"); - if (tableName== null || tableName.getName().length == 0) { - throw new IllegalArgumentException( - "table name cannot be null or zero length"); - } - if (tableName.equals(TableName.META_TABLE_NAME)) { - return locateMeta(tableName, useCache, replicaId); - } else { - // Region not in the cache - have to go to the meta RS - return locateRegionInMeta(tableName, row, useCache, retry, replicaId); - } - } - - private RegionLocations locateMeta(final TableName tableName, - boolean useCache, int replicaId) throws IOException { - // HBASE-10785: We cache the location of the META itself, so that we are not overloading - // zookeeper with one request for every region lookup. We cache the META with empty row - // key in MetaCache. - byte[] metaCacheKey = HConstants.EMPTY_START_ROW; // use byte[0] as the row for meta - RegionLocations locations = null; - if (useCache) { - locations = getCachedLocation(tableName, metaCacheKey); - if (locations != null && locations.getRegionLocation(replicaId) != null) { - return locations; - } - } - - // only one thread should do the lookup. - synchronized (metaRegionLock) { - // Check the cache again for a hit in case some other thread made the - // same query while we were waiting on the lock. - if (useCache) { - locations = getCachedLocation(tableName, metaCacheKey); - if (locations != null && locations.getRegionLocation(replicaId) != null) { - return locations; - } - } - - // Look up from zookeeper - locations = this.registry.getMetaRegionLocation(); - if (locations != null) { - cacheLocation(tableName, locations); - } - } - return locations; - } - - /* - * Search the hbase:meta table for the HRegionLocation - * info that contains the table and row we're seeking. - */ - private RegionLocations locateRegionInMeta(TableName tableName, byte[] row, - boolean useCache, boolean retry, int replicaId) throws IOException { - - // If we are supposed to be using the cache, look in the cache to see if - // we already have the region. - if (useCache) { - RegionLocations locations = getCachedLocation(tableName, row); - if (locations != null && locations.getRegionLocation(replicaId) != null) { - return locations; - } - } - - // build the key of the meta region we should be looking for. - // the extra 9's on the end are necessary to allow "exact" matches - // without knowing the precise region names. - byte[] metaKey = HRegionInfo.createRegionName(tableName, row, HConstants.NINES, false); - - Scan s = new Scan(); - s.setReversed(true); - s.setStartRow(metaKey); - s.setSmall(true); - s.setCaching(1); - if (this.useMetaReplicas) { - s.setConsistency(Consistency.TIMELINE); - } - - int localNumRetries = (retry ? numTries : 1); - - for (int tries = 0; true; tries++) { - if (tries >= localNumRetries) { - throw new NoServerForRegionException("Unable to find region for " - + Bytes.toStringBinary(row) + " in " + tableName + - " after " + localNumRetries + " tries."); - } - if (useCache) { - RegionLocations locations = getCachedLocation(tableName, row); - if (locations != null && locations.getRegionLocation(replicaId) != null) { - return locations; - } - } else { - // If we are not supposed to be using the cache, delete any existing cached location - // so it won't interfere. - metaCache.clearCache(tableName, row); - } - - // Query the meta region - try { - Result regionInfoRow = null; - ReversedClientScanner rcs = null; - try { - rcs = new ClientSmallReversedScanner(conf, s, TableName.META_TABLE_NAME, this, - rpcCallerFactory, rpcControllerFactory, getBatchPool(), 0); - regionInfoRow = rcs.next(); - } finally { - if (rcs != null) { - rcs.close(); - } - } - - if (regionInfoRow == null) { - throw new TableNotFoundException(tableName); - } - - // convert the row result into the HRegionLocation we need! - RegionLocations locations = MetaTableAccessor.getRegionLocations(regionInfoRow); - if (locations == null || locations.getRegionLocation(replicaId) == null) { - throw new IOException("HRegionInfo was null in " + - tableName + ", row=" + regionInfoRow); - } - HRegionInfo regionInfo = locations.getRegionLocation(replicaId).getRegionInfo(); - if (regionInfo == null) { - throw new IOException("HRegionInfo was null or empty in " + - TableName.META_TABLE_NAME + ", row=" + regionInfoRow); - } - - // possible we got a region of a different table... - if (!regionInfo.getTable().equals(tableName)) { - throw new TableNotFoundException( - "Table '" + tableName + "' was not found, got: " + - regionInfo.getTable() + "."); - } - if (regionInfo.isSplit()) { - throw new RegionOfflineException("the only available region for" + - " the required row is a split parent," + - " the daughters should be online soon: " + - regionInfo.getRegionNameAsString()); - } - if (regionInfo.isOffline()) { - throw new RegionOfflineException("the region is offline, could" + - " be caused by a disable table call: " + - regionInfo.getRegionNameAsString()); - } - - ServerName serverName = locations.getRegionLocation(replicaId).getServerName(); - if (serverName == null) { - throw new NoServerForRegionException("No server address listed " + - "in " + TableName.META_TABLE_NAME + " for region " + - regionInfo.getRegionNameAsString() + " containing row " + - Bytes.toStringBinary(row)); - } - - if (isDeadServer(serverName)){ - throw new RegionServerStoppedException("hbase:meta says the region "+ - regionInfo.getRegionNameAsString()+" is managed by the server " + serverName + - ", but it is dead."); - } - // Instantiate the location - cacheLocation(tableName, locations); - return locations; - } catch (TableNotFoundException e) { - // if we got this error, probably means the table just plain doesn't - // exist. rethrow the error immediately. this should always be coming - // from the HTable constructor. - throw e; - } catch (IOException e) { - ExceptionUtil.rethrowIfInterrupt(e); - - if (e instanceof RemoteException) { - e = ((RemoteException)e).unwrapRemoteException(); - } - if (tries < localNumRetries - 1) { - if (LOG.isDebugEnabled()) { - LOG.debug("locateRegionInMeta parentTable=" + - TableName.META_TABLE_NAME + ", metaLocation=" + - ", attempt=" + tries + " of " + - localNumRetries + " failed; retrying after sleep of " + - ConnectionUtils.getPauseTime(this.pause, tries) + " because: " + e.getMessage()); - } - } else { - throw e; - } - // Only relocate the parent region if necessary - if(!(e instanceof RegionOfflineException || - e instanceof NoServerForRegionException)) { - relocateRegion(TableName.META_TABLE_NAME, metaKey, replicaId); - } - } - try{ - Thread.sleep(ConnectionUtils.getPauseTime(this.pause, tries)); - } catch (InterruptedException e) { - throw new InterruptedIOException("Giving up trying to location region in " + - "meta: thread is interrupted."); - } - } - } - - /** - * Put a newly discovered HRegionLocation into the cache. - * @param tableName The table name. - * @param location the new location - */ - private void cacheLocation(final TableName tableName, final RegionLocations location) { - metaCache.cacheLocation(tableName, location); - } - - /** - * Search the cache for a location that fits our table and row key. - * Return null if no suitable region is located. - * - * @param tableName - * @param row - * @return Null or region location found in cache. - */ - RegionLocations getCachedLocation(final TableName tableName, - final byte [] row) { - return metaCache.getCachedLocation(tableName, row); - } - - public void clearRegionCache(final TableName tableName, byte[] row) { - metaCache.clearCache(tableName, row); - } - - /* - * Delete all cached entries of a table that maps to a specific location. - */ - @Override - public void clearCaches(final ServerName serverName) { - metaCache.clearCache(serverName); - } - - @Override - public void clearRegionCache() { - metaCache.clearCache(); - } - - @Override - public void clearRegionCache(final TableName tableName) { - metaCache.clearCache(tableName); - } - - @Override - public void clearRegionCache(final byte[] tableName) { - clearRegionCache(TableName.valueOf(tableName)); - } - - /** - * Put a newly discovered HRegionLocation into the cache. - * @param tableName The table name. - * @param source the source of the new location, if it's not coming from meta - * @param location the new location - */ - private void cacheLocation(final TableName tableName, final ServerName source, - final HRegionLocation location) { - metaCache.cacheLocation(tableName, source, location); - } - - // Map keyed by service name + regionserver to service stub implementation - private final ConcurrentHashMap stubs = - new ConcurrentHashMap(); - // Map of locks used creating service stubs per regionserver. - private final ConcurrentHashMap connectionLock = - new ConcurrentHashMap(); - - /** - * State of the MasterService connection/setup. - */ - static class MasterServiceState { - HConnection connection; - MasterService.BlockingInterface stub; - int userCount; - - MasterServiceState(final HConnection connection) { - super(); - this.connection = connection; - } - - @Override - public String toString() { - return "MasterService"; - } - - Object getStub() { - return this.stub; - } - - void clearStub() { - this.stub = null; - } - - boolean isMasterRunning() throws ServiceException { - IsMasterRunningResponse response = - this.stub.isMasterRunning(null, RequestConverter.buildIsMasterRunningRequest()); - return response != null? response.getIsMasterRunning(): false; - } - } - - /** - * Makes a client-side stub for master services. Sub-class to specialize. - * Depends on hosting class so not static. Exists so we avoid duplicating a bunch of code - * when setting up the MasterMonitorService and MasterAdminService. - */ - abstract class StubMaker { - /** - * Returns the name of the service stub being created. - */ - protected abstract String getServiceName(); - - /** - * Make stub and cache it internal so can be used later doing the isMasterRunning call. - * @param channel - */ - protected abstract Object makeStub(final BlockingRpcChannel channel); - - /** - * Once setup, check it works by doing isMasterRunning check. - * @throws ServiceException - */ - protected abstract void isMasterRunning() throws ServiceException; - - /** - * Create a stub. Try once only. It is not typed because there is no common type to - * protobuf services nor their interfaces. Let the caller do appropriate casting. - * @return A stub for master services. - * @throws IOException - * @throws KeeperException - * @throws ServiceException - */ - private Object makeStubNoRetries() throws IOException, KeeperException, ServiceException { - ZooKeeperKeepAliveConnection zkw; - try { - zkw = getKeepAliveZooKeeperWatcher(); - } catch (IOException e) { - ExceptionUtil.rethrowIfInterrupt(e); - throw new ZooKeeperConnectionException("Can't connect to ZooKeeper", e); - } - try { - checkIfBaseNodeAvailable(zkw); - ServerName sn = MasterAddressTracker.getMasterAddress(zkw); - if (sn == null) { - String msg = "ZooKeeper available but no active master location found"; - LOG.info(msg); - throw new MasterNotRunningException(msg); - } - if (isDeadServer(sn)) { - throw new MasterNotRunningException(sn + " is dead."); - } - // Use the security info interface name as our stub key - String key = getStubKey(getServiceName(), sn.getHostAndPort()); - connectionLock.putIfAbsent(key, key); - Object stub = null; - synchronized (connectionLock.get(key)) { - stub = stubs.get(key); - if (stub == null) { - BlockingRpcChannel channel = rpcClient.createBlockingRpcChannel(sn, user, rpcTimeout); - stub = makeStub(channel); - isMasterRunning(); - stubs.put(key, stub); - } - } - return stub; - } finally { - zkw.close(); - } - } - - /** - * Create a stub against the master. Retry if necessary. - * @return A stub to do intf against the master - * @throws MasterNotRunningException - */ - Object makeStub() throws IOException { - // The lock must be at the beginning to prevent multiple master creations - // (and leaks) in a multithread context - synchronized (masterAndZKLock) { - Exception exceptionCaught = null; - if (!closed) { - try { - return makeStubNoRetries(); - } catch (IOException e) { - exceptionCaught = e; - } catch (KeeperException e) { - exceptionCaught = e; - } catch (ServiceException e) { - exceptionCaught = e; - } - - throw new MasterNotRunningException(exceptionCaught); - } else { - throw new DoNotRetryIOException("Connection was closed while trying to get master"); - } - } - } - } - - /** - * Class to make a MasterServiceStubMaker stub. - */ - class MasterServiceStubMaker extends StubMaker { - private MasterService.BlockingInterface stub; - @Override - protected String getServiceName() { - return MasterService.getDescriptor().getName(); - } - - @Override - MasterService.BlockingInterface makeStub() throws IOException { - return (MasterService.BlockingInterface)super.makeStub(); - } - - @Override - protected Object makeStub(BlockingRpcChannel channel) { - this.stub = MasterService.newBlockingStub(channel); - return this.stub; - } - - @Override - protected void isMasterRunning() throws ServiceException { - this.stub.isMasterRunning(null, RequestConverter.buildIsMasterRunningRequest()); - } - } - - @Override - public AdminService.BlockingInterface getAdmin(final ServerName serverName) - throws IOException { - return getAdmin(serverName, false); - } - - @Override - // Nothing is done w/ the 'master' parameter. It is ignored. - public AdminService.BlockingInterface getAdmin(final ServerName serverName, - final boolean master) - throws IOException { - if (isDeadServer(serverName)) { - throw new RegionServerStoppedException(serverName + " is dead."); - } - String key = getStubKey(AdminService.BlockingInterface.class.getName(), - serverName.getHostAndPort()); - this.connectionLock.putIfAbsent(key, key); - AdminService.BlockingInterface stub = null; - synchronized (this.connectionLock.get(key)) { - stub = (AdminService.BlockingInterface)this.stubs.get(key); - if (stub == null) { - BlockingRpcChannel channel = - this.rpcClient.createBlockingRpcChannel(serverName, user, rpcTimeout); - stub = AdminService.newBlockingStub(channel); - this.stubs.put(key, stub); - } - } - return stub; - } - - @Override - public ClientService.BlockingInterface getClient(final ServerName sn) - throws IOException { - if (isDeadServer(sn)) { - throw new RegionServerStoppedException(sn + " is dead."); - } - String key = getStubKey(ClientService.BlockingInterface.class.getName(), sn.getHostAndPort()); - this.connectionLock.putIfAbsent(key, key); - ClientService.BlockingInterface stub = null; - synchronized (this.connectionLock.get(key)) { - stub = (ClientService.BlockingInterface)this.stubs.get(key); - if (stub == null) { - BlockingRpcChannel channel = - this.rpcClient.createBlockingRpcChannel(sn, user, rpcTimeout); - stub = ClientService.newBlockingStub(channel); - // In old days, after getting stub/proxy, we'd make a call. We are not doing that here. - // Just fail on first actual call rather than in here on setup. - this.stubs.put(key, stub); - } - } - return stub; - } - - static String getStubKey(final String serviceName, final String rsHostnamePort) { - return serviceName + "@" + rsHostnamePort; - } - - private ZooKeeperKeepAliveConnection keepAliveZookeeper; - private AtomicInteger keepAliveZookeeperUserCount = new AtomicInteger(0); - private boolean canCloseZKW = true; - - // keepAlive time, in ms. No reason to make it configurable. - private static final long keepAlive = 5 * 60 * 1000; - - /** - * Retrieve a shared ZooKeeperWatcher. You must close it it once you've have finished with it. - * @return The shared instance. Never returns null. - */ - ZooKeeperKeepAliveConnection getKeepAliveZooKeeperWatcher() - throws IOException { - synchronized (masterAndZKLock) { - if (keepAliveZookeeper == null) { - if (this.closed) { - throw new IOException(toString() + " closed"); - } - // We don't check that our link to ZooKeeper is still valid - // But there is a retry mechanism in the ZooKeeperWatcher itself - keepAliveZookeeper = new ZooKeeperKeepAliveConnection(conf, this.toString(), this); - } - keepAliveZookeeperUserCount.addAndGet(1); - keepZooKeeperWatcherAliveUntil = Long.MAX_VALUE; - return keepAliveZookeeper; - } - } - - void releaseZooKeeperWatcher(final ZooKeeperWatcher zkw) { - if (zkw == null){ - return; - } - if (keepAliveZookeeperUserCount.addAndGet(-1) <= 0) { - keepZooKeeperWatcherAliveUntil = System.currentTimeMillis() + keepAlive; - } - } - - private void closeZooKeeperWatcher() { - synchronized (masterAndZKLock) { - if (keepAliveZookeeper != null) { - LOG.info("Closing zookeeper sessionid=0x" + - Long.toHexString( - keepAliveZookeeper.getRecoverableZooKeeper().getSessionId())); - keepAliveZookeeper.internalClose(); - keepAliveZookeeper = null; - } - keepAliveZookeeperUserCount.set(0); - } - } - - final MasterServiceState masterServiceState = new MasterServiceState(this); - - @Override - public MasterService.BlockingInterface getMaster() throws MasterNotRunningException { - return getKeepAliveMasterService(); - } - - private void resetMasterServiceState(final MasterServiceState mss) { - mss.userCount++; - } - - @Override - public MasterKeepAliveConnection getKeepAliveMasterService() - throws MasterNotRunningException { - synchronized (masterAndZKLock) { - if (!isKeepAliveMasterConnectedAndRunning(this.masterServiceState)) { - MasterServiceStubMaker stubMaker = new MasterServiceStubMaker(); - try { - this.masterServiceState.stub = stubMaker.makeStub(); - } catch (MasterNotRunningException ex) { - throw ex; - } catch (IOException e) { - // rethrow as MasterNotRunningException so that we can keep the method sig - throw new MasterNotRunningException(e); - } - } - resetMasterServiceState(this.masterServiceState); - } - // Ugly delegation just so we can add in a Close method. - final MasterService.BlockingInterface stub = this.masterServiceState.stub; - return new MasterKeepAliveConnection() { - MasterServiceState mss = masterServiceState; - @Override - public AddColumnResponse addColumn(RpcController controller, AddColumnRequest request) - throws ServiceException { - return stub.addColumn(controller, request); - } - - @Override - public DeleteColumnResponse deleteColumn(RpcController controller, - DeleteColumnRequest request) - throws ServiceException { - return stub.deleteColumn(controller, request); - } - - @Override - public ModifyColumnResponse modifyColumn(RpcController controller, - ModifyColumnRequest request) - throws ServiceException { - return stub.modifyColumn(controller, request); - } - - @Override - public MoveRegionResponse moveRegion(RpcController controller, - MoveRegionRequest request) throws ServiceException { - return stub.moveRegion(controller, request); - } - - @Override - public DispatchMergingRegionsResponse dispatchMergingRegions( - RpcController controller, DispatchMergingRegionsRequest request) - throws ServiceException { - return stub.dispatchMergingRegions(controller, request); - } - - @Override - public AssignRegionResponse assignRegion(RpcController controller, - AssignRegionRequest request) throws ServiceException { - return stub.assignRegion(controller, request); - } - - @Override - public UnassignRegionResponse unassignRegion(RpcController controller, - UnassignRegionRequest request) throws ServiceException { - return stub.unassignRegion(controller, request); - } - - @Override - public OfflineRegionResponse offlineRegion(RpcController controller, - OfflineRegionRequest request) throws ServiceException { - return stub.offlineRegion(controller, request); - } - - @Override - public DeleteTableResponse deleteTable(RpcController controller, - DeleteTableRequest request) throws ServiceException { - return stub.deleteTable(controller, request); - } - - @Override - public TruncateTableResponse truncateTable(RpcController controller, - TruncateTableRequest request) throws ServiceException { - return stub.truncateTable(controller, request); - } - - @Override - public EnableTableResponse enableTable(RpcController controller, - EnableTableRequest request) throws ServiceException { - return stub.enableTable(controller, request); - } - - @Override - public DisableTableResponse disableTable(RpcController controller, - DisableTableRequest request) throws ServiceException { - return stub.disableTable(controller, request); - } - - @Override - public ModifyTableResponse modifyTable(RpcController controller, - ModifyTableRequest request) throws ServiceException { - return stub.modifyTable(controller, request); - } - - @Override - public CreateTableResponse createTable(RpcController controller, - CreateTableRequest request) throws ServiceException { - return stub.createTable(controller, request); - } - - @Override - public ShutdownResponse shutdown(RpcController controller, - ShutdownRequest request) throws ServiceException { - return stub.shutdown(controller, request); - } - - @Override - public StopMasterResponse stopMaster(RpcController controller, - StopMasterRequest request) throws ServiceException { - return stub.stopMaster(controller, request); - } - - @Override - public BalanceResponse balance(RpcController controller, - BalanceRequest request) throws ServiceException { - return stub.balance(controller, request); - } - - @Override - public SetBalancerRunningResponse setBalancerRunning( - RpcController controller, SetBalancerRunningRequest request) - throws ServiceException { - return stub.setBalancerRunning(controller, request); - } - - @Override - public RunCatalogScanResponse runCatalogScan(RpcController controller, - RunCatalogScanRequest request) throws ServiceException { - return stub.runCatalogScan(controller, request); - } - - @Override - public EnableCatalogJanitorResponse enableCatalogJanitor( - RpcController controller, EnableCatalogJanitorRequest request) - throws ServiceException { - return stub.enableCatalogJanitor(controller, request); - } - - @Override - public IsCatalogJanitorEnabledResponse isCatalogJanitorEnabled( - RpcController controller, IsCatalogJanitorEnabledRequest request) - throws ServiceException { - return stub.isCatalogJanitorEnabled(controller, request); - } - - @Override - public CoprocessorServiceResponse execMasterService( - RpcController controller, CoprocessorServiceRequest request) - throws ServiceException { - return stub.execMasterService(controller, request); - } - - @Override - public SnapshotResponse snapshot(RpcController controller, - SnapshotRequest request) throws ServiceException { - return stub.snapshot(controller, request); - } - - @Override - public GetCompletedSnapshotsResponse getCompletedSnapshots( - RpcController controller, GetCompletedSnapshotsRequest request) - throws ServiceException { - return stub.getCompletedSnapshots(controller, request); - } - - @Override - public DeleteSnapshotResponse deleteSnapshot(RpcController controller, - DeleteSnapshotRequest request) throws ServiceException { - return stub.deleteSnapshot(controller, request); - } - - @Override - public IsSnapshotDoneResponse isSnapshotDone(RpcController controller, - IsSnapshotDoneRequest request) throws ServiceException { - return stub.isSnapshotDone(controller, request); - } - - @Override - public RestoreSnapshotResponse restoreSnapshot( - RpcController controller, RestoreSnapshotRequest request) - throws ServiceException { - return stub.restoreSnapshot(controller, request); - } - - @Override - public IsRestoreSnapshotDoneResponse isRestoreSnapshotDone( - RpcController controller, IsRestoreSnapshotDoneRequest request) - throws ServiceException { - return stub.isRestoreSnapshotDone(controller, request); - } - - @Override - public ExecProcedureResponse execProcedure( - RpcController controller, ExecProcedureRequest request) - throws ServiceException { - return stub.execProcedure(controller, request); - } - - @Override - public ExecProcedureResponse execProcedureWithRet( - RpcController controller, ExecProcedureRequest request) - throws ServiceException { - return stub.execProcedureWithRet(controller, request); - } - - @Override - public IsProcedureDoneResponse isProcedureDone(RpcController controller, - IsProcedureDoneRequest request) throws ServiceException { - return stub.isProcedureDone(controller, request); - } - - @Override - public IsMasterRunningResponse isMasterRunning( - RpcController controller, IsMasterRunningRequest request) - throws ServiceException { - return stub.isMasterRunning(controller, request); - } - - @Override - public ModifyNamespaceResponse modifyNamespace(RpcController controller, - ModifyNamespaceRequest request) - throws ServiceException { - return stub.modifyNamespace(controller, request); - } - - @Override - public CreateNamespaceResponse createNamespace( - RpcController controller, CreateNamespaceRequest request) throws ServiceException { - return stub.createNamespace(controller, request); - } - - @Override - public DeleteNamespaceResponse deleteNamespace( - RpcController controller, DeleteNamespaceRequest request) throws ServiceException { - return stub.deleteNamespace(controller, request); - } - - @Override - public GetNamespaceDescriptorResponse getNamespaceDescriptor(RpcController controller, - GetNamespaceDescriptorRequest request) throws ServiceException { - return stub.getNamespaceDescriptor(controller, request); - } - - @Override - public ListNamespaceDescriptorsResponse listNamespaceDescriptors(RpcController controller, - ListNamespaceDescriptorsRequest request) throws ServiceException { - return stub.listNamespaceDescriptors(controller, request); - } - - @Override - public ListTableDescriptorsByNamespaceResponse listTableDescriptorsByNamespace( - RpcController controller, ListTableDescriptorsByNamespaceRequest request) - throws ServiceException { - return stub.listTableDescriptorsByNamespace(controller, request); - } - - @Override - public ListTableNamesByNamespaceResponse listTableNamesByNamespace( - RpcController controller, ListTableNamesByNamespaceRequest request) - throws ServiceException { - return stub.listTableNamesByNamespace(controller, request); - } - - @Override - public GetTableStateResponse getTableState( - RpcController controller, GetTableStateRequest request) - throws ServiceException { - return stub.getTableState(controller, request); - } - - @Override - public void close() { - release(this.mss); - } - - @Override - public GetSchemaAlterStatusResponse getSchemaAlterStatus( - RpcController controller, GetSchemaAlterStatusRequest request) - throws ServiceException { - return stub.getSchemaAlterStatus(controller, request); - } - - @Override - public GetTableDescriptorsResponse getTableDescriptors( - RpcController controller, GetTableDescriptorsRequest request) - throws ServiceException { - return stub.getTableDescriptors(controller, request); - } - - @Override - public GetTableNamesResponse getTableNames( - RpcController controller, GetTableNamesRequest request) - throws ServiceException { - return stub.getTableNames(controller, request); - } - - @Override - public GetClusterStatusResponse getClusterStatus( - RpcController controller, GetClusterStatusRequest request) - throws ServiceException { - return stub.getClusterStatus(controller, request); - } - - @Override - public SetQuotaResponse setQuota( - RpcController controller, SetQuotaRequest request) - throws ServiceException { - return stub.setQuota(controller, request); - } - - @Override - public MajorCompactionTimestampResponse getLastMajorCompactionTimestamp( - RpcController controller, MajorCompactionTimestampRequest request) - throws ServiceException { - return stub.getLastMajorCompactionTimestamp(controller, request); - } - - @Override - public MajorCompactionTimestampResponse getLastMajorCompactionTimestampForRegion( - RpcController controller, MajorCompactionTimestampForRegionRequest request) - throws ServiceException { - return stub.getLastMajorCompactionTimestampForRegion(controller, request); - } - }; - } - - - private static void release(MasterServiceState mss) { - if (mss != null && mss.connection != null) { - ((HConnectionImplementation)mss.connection).releaseMaster(mss); - } - } - - private boolean isKeepAliveMasterConnectedAndRunning(MasterServiceState mss) { - if (mss.getStub() == null){ - return false; - } - try { - return mss.isMasterRunning(); - } catch (UndeclaredThrowableException e) { - // It's somehow messy, but we can receive exceptions such as - // java.net.ConnectException but they're not declared. So we catch it... - LOG.info("Master connection is not running anymore", e.getUndeclaredThrowable()); - return false; - } catch (ServiceException se) { - LOG.warn("Checking master connection", se); - return false; - } - } - - void releaseMaster(MasterServiceState mss) { - if (mss.getStub() == null) return; - synchronized (masterAndZKLock) { - --mss.userCount; - } - } - - private void closeMasterService(MasterServiceState mss) { - if (mss.getStub() != null) { - LOG.info("Closing master protocol: " + mss); - mss.clearStub(); - } - mss.userCount = 0; - } - - /** - * Immediate close of the shared master. Can be by the delayed close or when closing the - * connection itself. - */ - private void closeMaster() { - synchronized (masterAndZKLock) { - closeMasterService(masterServiceState); - } - } - - void updateCachedLocation(HRegionInfo hri, ServerName source, - ServerName serverName, long seqNum) { - HRegionLocation newHrl = new HRegionLocation(hri, serverName, seqNum); - cacheLocation(hri.getTable(), source, newHrl); - } - - @Override - public void deleteCachedRegionLocation(final HRegionLocation location) { - metaCache.clearCache(location); - } - - @Override - public void updateCachedLocations(final TableName tableName, byte[] rowkey, - final Object exception, final HRegionLocation source) { - assert source != null; - updateCachedLocations(tableName, source.getRegionInfo().getRegionName() - , rowkey, exception, source.getServerName()); - } - - /** - * Update the location with the new value (if the exception is a RegionMovedException) - * or delete it from the cache. Does nothing if we can be sure from the exception that - * the location is still accurate, or if the cache has already been updated. - * @param exception an object (to simplify user code) on which we will try to find a nested - * or wrapped or both RegionMovedException - * @param source server that is the source of the location update. - */ - @Override - public void updateCachedLocations(final TableName tableName, byte[] regionName, byte[] rowkey, - final Object exception, final ServerName source) { - if (rowkey == null || tableName == null) { - LOG.warn("Coding error, see method javadoc. row=" + (rowkey == null ? "null" : rowkey) + - ", tableName=" + (tableName == null ? "null" : tableName)); - return; - } - - if (source == null) { - // This should not happen, but let's secure ourselves. - return; - } - - if (regionName == null) { - // we do not know which region, so just remove the cache entry for the row and server - metaCache.clearCache(tableName, rowkey, source); - return; - } - - // Is it something we have already updated? - final RegionLocations oldLocations = getCachedLocation(tableName, rowkey); - HRegionLocation oldLocation = null; - if (oldLocations != null) { - oldLocation = oldLocations.getRegionLocationByRegionName(regionName); - } - if (oldLocation == null || !source.equals(oldLocation.getServerName())) { - // There is no such location in the cache (it's been removed already) or - // the cache has already been refreshed with a different location. => nothing to do - return; - } - - HRegionInfo regionInfo = oldLocation.getRegionInfo(); - Throwable cause = findException(exception); - if (cause != null) { - if (cause instanceof RegionTooBusyException || cause instanceof RegionOpeningException) { - // We know that the region is still on this region server - return; - } - - if (cause instanceof RegionMovedException) { - RegionMovedException rme = (RegionMovedException) cause; - if (LOG.isTraceEnabled()) { - LOG.trace("Region " + regionInfo.getRegionNameAsString() + " moved to " + - rme.getHostname() + ":" + rme.getPort() + - " according to " + source.getHostAndPort()); - } - // We know that the region is not anymore on this region server, but we know - // the new location. - updateCachedLocation( - regionInfo, source, rme.getServerName(), rme.getLocationSeqNum()); - return; - } - } - - // If we're here, it means that can cannot be sure about the location, so we remove it from - // the cache. Do not send the source because source can be a new server in the same host:port - metaCache.clearCache(regionInfo); - } - - @Override - public void updateCachedLocations(final byte[] tableName, byte[] rowkey, - final Object exception, final HRegionLocation source) { - updateCachedLocations(TableName.valueOf(tableName), rowkey, exception, source); - } - - /** - * @deprecated since 0.96 - Use {@link HTableInterface#batch} instead - */ - @Override - @Deprecated - public void processBatch(List list, - final TableName tableName, - ExecutorService pool, - Object[] results) throws IOException, InterruptedException { - // This belongs in HTable!!! Not in here. St.Ack - - // results must be the same size as list - if (results.length != list.size()) { - throw new IllegalArgumentException( - "argument results must be the same size as argument list"); - } - processBatchCallback(list, tableName, pool, results, null); - } - - /** - * @deprecated Unsupported API - */ - @Override - @Deprecated - public void processBatch(List list, - final byte[] tableName, - ExecutorService pool, - Object[] results) throws IOException, InterruptedException { - processBatch(list, TableName.valueOf(tableName), pool, results); - } - - /** - * Send the queries in parallel on the different region servers. Retries on failures. - * If the method returns it means that there is no error, and the 'results' array will - * contain no exception. On error, an exception is thrown, and the 'results' array will - * contain results and exceptions. - * @deprecated since 0.96 - Use {@link HTable#processBatchCallback} instead - */ - @Override - @Deprecated - public void processBatchCallback( - List list, - TableName tableName, - ExecutorService pool, - Object[] results, - Batch.Callback callback) - throws IOException, InterruptedException { - - AsyncRequestFuture ars = this.asyncProcess.submitAll( - pool, tableName, list, callback, results); - ars.waitUntilDone(); - if (ars.hasError()) { - throw ars.getErrors(); - } - } - - /** - * @deprecated Unsupported API - */ - @Override - @Deprecated - public void processBatchCallback( - List list, - byte[] tableName, - ExecutorService pool, - Object[] results, - Batch.Callback callback) - throws IOException, InterruptedException { - processBatchCallback(list, TableName.valueOf(tableName), pool, results, callback); - } - - // For tests to override. - protected AsyncProcess createAsyncProcess(Configuration conf) { - // No default pool available. - return new AsyncProcess(this, conf, this.batchPool, - RpcRetryingCallerFactory.instantiate(conf, this.getStatisticsTracker()), false, - RpcControllerFactory.instantiate(conf)); - } - - @Override - public AsyncProcess getAsyncProcess() { - return asyncProcess; - } - - @Override - public ServerStatisticTracker getStatisticsTracker() { - return this.stats; - } - - @Override - public ClientBackoffPolicy getBackoffPolicy() { - return this.backoffPolicy; - } - - /* - * Return the number of cached region for a table. It will only be called - * from a unit test. - */ - @VisibleForTesting - int getNumberOfCachedRegionLocations(final TableName tableName) { - return metaCache.getNumberOfCachedRegionLocations(tableName); - } - - /** - * @deprecated always return false since 0.99 - */ - @Override - @Deprecated - public void setRegionCachePrefetch(final TableName tableName, final boolean enable) { - } - - /** - * @deprecated always return false since 0.99 - */ - @Override - @Deprecated - public void setRegionCachePrefetch(final byte[] tableName, - final boolean enable) { - } - - /** - * @deprecated always return false since 0.99 - */ - @Override - @Deprecated - public boolean getRegionCachePrefetch(TableName tableName) { - return false; - } - - /** - * @deprecated always return false since 0.99 - */ - @Override - @Deprecated - public boolean getRegionCachePrefetch(byte[] tableName) { - return false; - } - - @Override - public void abort(final String msg, Throwable t) { - if (t instanceof KeeperException.SessionExpiredException - && keepAliveZookeeper != null) { - synchronized (masterAndZKLock) { - if (keepAliveZookeeper != null) { - LOG.warn("This client just lost it's session with ZooKeeper," + - " closing it." + - " It will be recreated next time someone needs it", t); - closeZooKeeperWatcher(); - } - } - } else { - if (t != null) { - LOG.fatal(msg, t); - } else { - LOG.fatal(msg); - } - this.aborted = true; - close(); - this.closed = true; - } - } - - @Override - public boolean isClosed() { - return this.closed; - } - - @Override - public boolean isAborted(){ - return this.aborted; - } - - @Override - public int getCurrentNrHRS() throws IOException { - return this.registry.getCurrentNrHRS(); - } - - /** - * Increment this client's reference count. - */ - void incCount() { - ++refCount; - } - - /** - * Decrement this client's reference count. - */ - void decCount() { - if (refCount > 0) { - --refCount; - } - } - - /** - * Return if this client has no reference - * - * @return true if this client has no reference; false otherwise - */ - boolean isZeroReference() { - return refCount == 0; - } - - void internalClose() { - if (this.closed) { - return; - } - closeMaster(); - shutdownBatchPool(); - this.closed = true; - closeZooKeeperWatcher(); - this.stubs.clear(); - if (clusterStatusListener != null) { - clusterStatusListener.close(); - } - if (rpcClient != null) { - rpcClient.close(); - } - } - - @Override - public void close() { - if (managed) { - if (aborted) { - ConnectionManager.deleteStaleConnection(this); - } else { - ConnectionManager.deleteConnection(this, false); - } - } else { - internalClose(); - } - } - - /** - * Close the connection for good, regardless of what the current value of - * {@link #refCount} is. Ideally, {@link #refCount} should be zero at this - * point, which would be the case if all of its consumers close the - * connection. However, on the off chance that someone is unable to close - * the connection, perhaps because it bailed out prematurely, the method - * below will ensure that this {@link HConnection} instance is cleaned up. - * Caveat: The JVM may take an unknown amount of time to call finalize on an - * unreachable object, so our hope is that every consumer cleans up after - * itself, like any good citizen. - */ - @Override - protected void finalize() throws Throwable { - super.finalize(); - // Pretend as if we are about to release the last remaining reference - refCount = 1; - close(); - } - - /** - * @deprecated Use {@link Admin#listTables()} instead - */ - @Deprecated - @Override - public HTableDescriptor[] listTables() throws IOException { - MasterKeepAliveConnection master = getKeepAliveMasterService(); - try { - GetTableDescriptorsRequest req = - RequestConverter.buildGetTableDescriptorsRequest((List)null); - return ProtobufUtil.getHTableDescriptorArray(master.getTableDescriptors(null, req)); - } catch (ServiceException se) { - throw ProtobufUtil.getRemoteException(se); - } finally { - master.close(); - } - } - - /** - * @deprecated Use {@link Admin#listTableNames()} instead - */ - @Deprecated - @Override - public String[] getTableNames() throws IOException { - TableName[] tableNames = listTableNames(); - String[] result = new String[tableNames.length]; - for (int i = 0; i < tableNames.length; i++) { - result[i] = tableNames[i].getNameAsString(); - } - return result; - } - - /** - * @deprecated Use {@link Admin#listTableNames()} instead - */ - @Deprecated - @Override - public TableName[] listTableNames() throws IOException { - MasterKeepAliveConnection master = getKeepAliveMasterService(); - try { - return ProtobufUtil.getTableNameArray(master.getTableNames(null, - GetTableNamesRequest.newBuilder().build()) - .getTableNamesList()); - } catch (ServiceException se) { - throw ProtobufUtil.getRemoteException(se); - } finally { - master.close(); - } - } - - /** - * @deprecated Use {@link Admin#getTableDescriptorsByTableName(List)} instead - */ - @Deprecated - @Override - public HTableDescriptor[] getHTableDescriptorsByTableName( - List tableNames) throws IOException { - if (tableNames == null || tableNames.isEmpty()) return new HTableDescriptor[0]; - MasterKeepAliveConnection master = getKeepAliveMasterService(); - try { - GetTableDescriptorsRequest req = - RequestConverter.buildGetTableDescriptorsRequest(tableNames); - return ProtobufUtil.getHTableDescriptorArray(master.getTableDescriptors(null, req)); - } catch (ServiceException se) { - throw ProtobufUtil.getRemoteException(se); - } finally { - master.close(); - } - } - - /** - * @deprecated Use {@link Admin#getTableDescriptorsByTableName(List)} instead - */ - @Deprecated - @Override - public HTableDescriptor[] getHTableDescriptors( - List names) throws IOException { - List tableNames = new ArrayList(names.size()); - for(String name : names) { - tableNames.add(TableName.valueOf(name)); - } - - return getHTableDescriptorsByTableName(tableNames); - } - - @Override - public NonceGenerator getNonceGenerator() { - return this.nonceGenerator; - } - - /** - * Connects to the master to get the table descriptor. - * @param tableName table name - * @throws IOException if the connection to master fails or if the table - * is not found. - * @deprecated Use {@link Admin#getTableDescriptor(TableName)} instead - */ - @Deprecated - @Override - public HTableDescriptor getHTableDescriptor(final TableName tableName) - throws IOException { - if (tableName == null) return null; - MasterKeepAliveConnection master = getKeepAliveMasterService(); - GetTableDescriptorsResponse htds; - try { - GetTableDescriptorsRequest req = - RequestConverter.buildGetTableDescriptorsRequest(tableName); - htds = master.getTableDescriptors(null, req); - } catch (ServiceException se) { - throw ProtobufUtil.getRemoteException(se); - } finally { - master.close(); - } - if (!htds.getTableSchemaList().isEmpty()) { - return HTableDescriptor.convert(htds.getTableSchemaList().get(0)); - } - throw new TableNotFoundException(tableName.getNameAsString()); - } - - /** - * @deprecated Use {@link Admin#getTableDescriptor(TableName)} instead - */ - @Deprecated - @Override - public HTableDescriptor getHTableDescriptor(final byte[] tableName) - throws IOException { - return getHTableDescriptor(TableName.valueOf(tableName)); - } - - @Override - public TableState getTableState(TableName tableName) throws IOException { - ClusterConnection conn = getConnectionInternal(getConfiguration()); - TableState tableState = MetaTableAccessor.getTableState(conn, tableName); - if (tableState == null) - throw new TableNotFoundException(tableName); - return tableState; - } - - @Override - public RpcRetryingCallerFactory getNewRpcRetryingCallerFactory(Configuration conf) { - return RpcRetryingCallerFactory - .instantiate(conf, this.interceptor, this.getStatisticsTracker()); - } - - @Override - public boolean isManaged() { - return managed; - } - } - - /** - * The record of errors for servers. - */ - static class ServerErrorTracker { - // We need a concurrent map here, as we could have multiple threads updating it in parallel. - private final ConcurrentMap errorsByServer = - new ConcurrentHashMap(); - private final long canRetryUntil; - private final int maxRetries; - private final long startTrackingTime; - - public ServerErrorTracker(long timeout, int maxRetries) { - this.maxRetries = maxRetries; - this.canRetryUntil = EnvironmentEdgeManager.currentTime() + timeout; - this.startTrackingTime = new Date().getTime(); - } - - /** - * We stop to retry when we have exhausted BOTH the number of retries and the time allocated. - */ - boolean canRetryMore(int numRetry) { - // If there is a single try we must not take into account the time. - return numRetry < maxRetries || (maxRetries > 1 && - EnvironmentEdgeManager.currentTime() < this.canRetryUntil); - } - - /** - * Calculates the back-off time for a retrying request to a particular server. - * - * @param server The server in question. - * @param basePause The default hci pause. - * @return The time to wait before sending next request. - */ - long calculateBackoffTime(ServerName server, long basePause) { - long result; - ServerErrors errorStats = errorsByServer.get(server); - if (errorStats != null) { - result = ConnectionUtils.getPauseTime(basePause, errorStats.getCount()); - } else { - result = 0; // yes, if the server is not in our list we don't wait before retrying. - } - return result; - } - - /** - * Reports that there was an error on the server to do whatever bean-counting necessary. - * - * @param server The server in question. - */ - void reportServerError(ServerName server) { - ServerErrors errors = errorsByServer.get(server); - if (errors != null) { - errors.addError(); - } else { - errors = errorsByServer.putIfAbsent(server, new ServerErrors()); - if (errors != null){ - errors.addError(); - } - } - } - - long getStartTrackingTime() { - return startTrackingTime; - } - - /** - * The record of errors for a server. - */ - private static class ServerErrors { - private final AtomicInteger retries = new AtomicInteger(0); - - public int getCount() { - return retries.get(); - } - - public void addError() { - retries.incrementAndGet(); - } - } - } - - /** - * Look for an exception we know in the remote exception: - * - hadoop.ipc wrapped exceptions - * - nested exceptions - * - * Looks for: RegionMovedException / RegionOpeningException / RegionTooBusyException - * @return null if we didn't find the exception, the exception otherwise. - */ - public static Throwable findException(Object exception) { - if (exception == null || !(exception instanceof Throwable)) { - return null; - } - Throwable cur = (Throwable) exception; - while (cur != null) { - if (cur instanceof RegionMovedException || cur instanceof RegionOpeningException - || cur instanceof RegionTooBusyException) { - return cur; - } - if (cur instanceof RemoteException) { - RemoteException re = (RemoteException) cur; - cur = re.unwrapRemoteException( - RegionOpeningException.class, RegionMovedException.class, - RegionTooBusyException.class); - if (cur == null) { - cur = re.unwrapRemoteException(); - } - // unwrapRemoteException can return the exception given as a parameter when it cannot - // unwrap it. In this case, there is no need to look further - // noinspection ObjectEquality - if (cur == re) { - return null; - } - } else { - cur = cur.getCause(); - } - } - - return null; - } -} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java index dae2499c86d..323915b95fa 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java @@ -82,7 +82,7 @@ public final class ConnectionUtils { */ public static NonceGenerator injectNonceGeneratorForTesting( ClusterConnection conn, NonceGenerator cnm) { - return ConnectionManager.injectNonceGeneratorForTesting(conn, cnm); + return ConnectionImplementation.injectNonceGeneratorForTesting(conn, cnm); } /** @@ -94,6 +94,7 @@ public final class ConnectionUtils { */ public static void setServerSideHConnectionRetriesConfig( final Configuration c, final String sn, final Log log) { + // TODO: Fix this. Not all connections from server side should have 10 times the retries. int hcRetries = c.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER); // Go big. Multiply by 10. If we can't get to meta after this many retries @@ -145,10 +146,10 @@ public final class ConnectionUtils { * Some tests shut down the master. But table availability is a master RPC which is performed on * region re-lookups. */ - static class MasterlessConnection extends ConnectionManager.HConnectionImplementation { - MasterlessConnection(Configuration conf, boolean managed, + static class MasterlessConnection extends ConnectionImplementation { + MasterlessConnection(Configuration conf, ExecutorService pool, User user) throws IOException { - super(conf, managed, pool, user); + super(conf, pool, user); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java index d947ef8b7c9..86c246278db 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java @@ -253,7 +253,9 @@ public class Delete extends Mutation implements Comparable { * @param family family name * @param timestamp version timestamp * @return this for invocation chaining + * @deprecated Since hbase-1.0.0. Use {@link #addFamilyVersion(byte[], long)} */ + @Deprecated public Delete deleteFamilyVersion(byte [] family, long timestamp) { return addFamilyVersion(family, timestamp); } @@ -264,9 +266,7 @@ public class Delete extends Mutation implements Comparable { * @param family family name * @param timestamp version timestamp * @return this for invocation chaining - * @deprecated Since hbase-1.0.0. Use {@link #addFamilyVersion(byte[], long)} */ - @Deprecated public Delete addFamilyVersion(final byte [] family, final long timestamp) { List list = familyMap.get(family); if(list == null) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/FlushRegionCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/FlushRegionCallable.java new file mode 100644 index 00000000000..b2c4a57c0c9 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/FlushRegionCallable.java @@ -0,0 +1,102 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.client; + +import java.io.IOException; + +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController; +import org.apache.hadoop.hbase.ipc.RpcControllerFactory; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.RequestConverter; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionResponse; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.mortbay.log.Log; + +import com.google.protobuf.ServiceException; + +/** + * A Callable for flushRegion() RPC. + */ +@InterfaceAudience.Private +public class FlushRegionCallable extends RegionAdminServiceCallable { + + private final byte[] regionName; + private final boolean writeFlushWalMarker; + private boolean reload; + + public FlushRegionCallable(ClusterConnection connection, + RpcControllerFactory rpcControllerFactory, TableName tableName, byte[] regionName, + byte[] regionStartKey, boolean writeFlushWalMarker) { + super(connection, rpcControllerFactory, tableName, regionStartKey); + this.regionName = regionName; + this.writeFlushWalMarker = writeFlushWalMarker; + } + + public FlushRegionCallable(ClusterConnection connection, + RpcControllerFactory rpcControllerFactory, HRegionInfo regionInfo, + boolean writeFlushWalMarker) { + this(connection, rpcControllerFactory, regionInfo.getTable(), regionInfo.getRegionName(), + regionInfo.getStartKey(), writeFlushWalMarker); + } + + @Override + public FlushRegionResponse call(int callTimeout) throws Exception { + return flushRegion(); + } + + @Override + public void prepare(boolean reload) throws IOException { + super.prepare(reload); + this.reload = reload; + } + + private FlushRegionResponse flushRegion() throws IOException { + // check whether we should still do the flush to this region. If the regions are changed due + // to splits or merges, etc return success + if (!Bytes.equals(location.getRegionInfo().getRegionName(), regionName)) { + if (!reload) { + throw new IOException("Cached location seems to be different than requested region."); + } + Log.info("Skipping flush region, because the located region " + + Bytes.toStringBinary(location.getRegionInfo().getRegionName()) + " is different than " + + " requested region " + Bytes.toStringBinary(regionName)); + return FlushRegionResponse.newBuilder() + .setLastFlushTime(EnvironmentEdgeManager.currentTime()) + .setFlushed(false) + .setWroteFlushWalMarker(false) + .build(); + } + + FlushRegionRequest request = + RequestConverter.buildFlushRegionRequest(regionName, writeFlushWalMarker); + + try { + PayloadCarryingRpcController controller = rpcControllerFactory.newController(); + controller.setPriority(tableName); + return stub.flushRegion(controller, request); + } catch (ServiceException se) { + throw ProtobufUtil.getRemoteException(se); + } + } +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java index 701cd9c1cb7..3fa145c6b08 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java @@ -98,6 +98,7 @@ public class Get extends Query * @param get */ public Get(Get get) { + this(get.getRow()); this.filter = get.getFilter(); this.cacheBlocks = get.getCacheBlocks(); this.maxVersions = get.getMaxVersions(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index e5072459cb8..fe5a5f126e3 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -31,6 +31,10 @@ import java.util.Map; import java.util.Map.Entry; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import java.util.regex.Pattern; import org.apache.commons.logging.Log; @@ -61,9 +65,8 @@ import org.apache.hadoop.hbase.UnknownRegionException; import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; -import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor; -import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitorBase; import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.hadoop.hbase.exceptions.TimeoutIOException; import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; import org.apache.hadoop.hbase.ipc.MasterCoprocessorRpcChannel; import org.apache.hadoop.hbase.ipc.RegionServerCoprocessorRpcChannel; @@ -91,18 +94,24 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteColumnRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsRequest; @@ -144,6 +153,7 @@ import org.apache.hadoop.hbase.snapshot.UnknownSnapshotException; import org.apache.hadoop.hbase.util.Addressing; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.util.ForeignExceptionUtil; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; @@ -188,6 +198,7 @@ public class HBaseAdmin implements Admin { // numRetries is for 'normal' stuff... Multiply by this factor when // want to wait a long time. private final int retryLongerMultiplier; + private final int syncWaitTimeout; private boolean aborted; private boolean cleanupConnectionOnClose = false; // close the connection in close() private boolean closed = false; @@ -206,9 +217,7 @@ public class HBaseAdmin implements Admin { @Deprecated public HBaseAdmin(Configuration c) throws MasterNotRunningException, ZooKeeperConnectionException, IOException { - // Will not leak connections, as the new implementation of the constructor - // does not throw exceptions anymore. - this(ConnectionManager.getConnectionInternal(new Configuration(c))); + this(ConnectionFactory.createConnection(new Configuration(c))); this.cleanupConnectionOnClose = true; } @@ -246,6 +255,8 @@ public class HBaseAdmin implements Admin { "hbase.client.retries.longer.multiplier", 10); this.operationTimeout = this.conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT); + this.syncWaitTimeout = this.conf.getInt( + "hbase.client.sync.wait.timeout.msec", 10 * 60000); // 10min this.rpcCallerFactory = RpcRetryingCallerFactory.instantiate(this.conf); } @@ -545,92 +556,23 @@ public class HBaseAdmin implements Admin { */ @Override public void createTable(final HTableDescriptor desc, byte [][] splitKeys) - throws IOException { + throws IOException { + Future future = createTableAsyncV2(desc, splitKeys); try { - createTableAsync(desc, splitKeys); - } catch (SocketTimeoutException ste) { - LOG.warn("Creating " + desc.getTableName() + " took too long", ste); - } - int numRegs = (splitKeys == null ? 1 : splitKeys.length + 1) * desc.getRegionReplication(); - int prevRegCount = 0; - boolean tableWasEnabled = false; - for (int tries = 0; tries < this.numRetries * this.retryLongerMultiplier; - ++tries) { - if (tableWasEnabled) { - // Wait all table regions comes online - final AtomicInteger actualRegCount = new AtomicInteger(0); - MetaScannerVisitor visitor = new MetaScannerVisitorBase() { - @Override - public boolean processRow(Result rowResult) throws IOException { - RegionLocations list = MetaTableAccessor.getRegionLocations(rowResult); - if (list == null) { - LOG.warn("No serialized HRegionInfo in " + rowResult); - return true; - } - HRegionLocation l = list.getRegionLocation(); - if (l == null) { - return true; - } - if (!l.getRegionInfo().getTable().equals(desc.getTableName())) { - return false; - } - if (l.getRegionInfo().isOffline() || l.getRegionInfo().isSplit()) return true; - HRegionLocation[] locations = list.getRegionLocations(); - for (HRegionLocation location : locations) { - if (location == null) continue; - ServerName serverName = location.getServerName(); - // Make sure that regions are assigned to server - if (serverName != null && serverName.getHostAndPort() != null) { - actualRegCount.incrementAndGet(); - } - } - return true; - } - }; - MetaScanner.metaScan(connection, visitor, desc.getTableName()); - if (actualRegCount.get() < numRegs) { - if (tries == this.numRetries * this.retryLongerMultiplier - 1) { - throw new RegionOfflineException("Only " + actualRegCount.get() + - " of " + numRegs + " regions are online; retries exhausted."); - } - try { // Sleep - Thread.sleep(getPauseTime(tries)); - } catch (InterruptedException e) { - throw new InterruptedIOException("Interrupted when opening" + - " regions; " + actualRegCount.get() + " of " + numRegs + - " regions processed so far"); - } - if (actualRegCount.get() > prevRegCount) { // Making progress - prevRegCount = actualRegCount.get(); - tries = -1; - } - } else { - return; - } + // TODO: how long should we wait? spin forever? + future.get(syncWaitTimeout, TimeUnit.MILLISECONDS); + } catch (InterruptedException e) { + throw new InterruptedIOException("Interrupted when waiting" + + " for table to be enabled; meta scan was done"); + } catch (TimeoutException e) { + throw new TimeoutIOException(e); + } catch (ExecutionException e) { + if (e.getCause() instanceof IOException) { + throw (IOException)e.getCause(); } else { - try { - tableWasEnabled = isTableAvailable(desc.getTableName()); - } catch (TableNotFoundException tnfe) { - LOG.debug( - "Table " + desc.getTableName() + " was not enabled, sleeping, still " + numRetries - + " retries left"); - } - if (tableWasEnabled) { - // no we will scan meta to ensure all regions are online - tries = -1; - } else { - try { // Sleep - Thread.sleep(getPauseTime(tries)); - } catch (InterruptedException e) { - throw new InterruptedIOException("Interrupted when waiting" + - " for table to be enabled; meta scan was done"); - } - } + throw new IOException(e.getCause()); } } - throw new TableNotEnabledException( - "Retries exhausted while still waiting for table: " - + desc.getTableName() + " to be enabled"); } /** @@ -650,22 +592,42 @@ public class HBaseAdmin implements Admin { * @throws IOException */ @Override - public void createTableAsync( - final HTableDescriptor desc, final byte [][] splitKeys) - throws IOException { - if(desc.getTableName() == null) { + public void createTableAsync(final HTableDescriptor desc, final byte [][] splitKeys) + throws IOException { + createTableAsyncV2(desc, splitKeys); + } + + /** + * Creates a new table but does not block and wait for it to come online. + * You can use Future.get(long, TimeUnit) to wait on the operation to complete. + * It may throw ExecutionException if there was an error while executing the operation + * or TimeoutException in case the wait timeout was not long enough to allow the + * operation to complete. + * + * @param desc table descriptor for table + * @param splitKeys keys to check if the table has been created with all split keys + * @throws IllegalArgumentException Bad table name, if the split keys + * are repeated and if the split key has empty byte array. + * @throws IOException if a remote or network exception occurs + * @return the result of the async creation. You can use Future.get(long, TimeUnit) + * to wait on the operation to complete. + */ + // TODO: This should be called Async but it will break binary compatibility + private Future createTableAsyncV2(final HTableDescriptor desc, final byte[][] splitKeys) + throws IOException { + if (desc.getTableName() == null) { throw new IllegalArgumentException("TableName cannot be null"); } - if(splitKeys != null && splitKeys.length > 0) { + if (splitKeys != null && splitKeys.length > 0) { Arrays.sort(splitKeys, Bytes.BYTES_COMPARATOR); // Verify there are no duplicate split keys - byte [] lastKey = null; - for(byte [] splitKey : splitKeys) { + byte[] lastKey = null; + for (byte[] splitKey : splitKeys) { if (Bytes.compareTo(splitKey, HConstants.EMPTY_BYTE_ARRAY) == 0) { throw new IllegalArgumentException( "Empty split key must not be passed in the split keys."); } - if(lastKey != null && Bytes.equals(splitKey, lastKey)) { + if (lastKey != null && Bytes.equals(splitKey, lastKey)) { throw new IllegalArgumentException("All split keys must be unique, " + "found duplicate: " + Bytes.toStringBinary(splitKey) + ", " + Bytes.toStringBinary(lastKey)); @@ -674,14 +636,127 @@ public class HBaseAdmin implements Admin { } } - executeCallable(new MasterCallable(getConnection()) { + CreateTableResponse response = executeCallable( + new MasterCallable(getConnection()) { @Override - public Void call(int callTimeout) throws ServiceException { + public CreateTableResponse call(int callTimeout) throws ServiceException { CreateTableRequest request = RequestConverter.buildCreateTableRequest(desc, splitKeys); - master.createTable(null, request); - return null; + return master.createTable(null, request); } }); + return new CreateTableFuture(this, desc, splitKeys, response); + } + + private static class CreateTableFuture extends ProcedureFuture { + private final HTableDescriptor desc; + private final byte[][] splitKeys; + + public CreateTableFuture(final HBaseAdmin admin, final HTableDescriptor desc, + final byte[][] splitKeys, final CreateTableResponse response) { + super(admin, (response != null && response.hasProcId()) ? response.getProcId() : null); + this.splitKeys = splitKeys; + this.desc = desc; + } + + @Override + protected Void waitOperationResult(final long deadlineTs) + throws IOException, TimeoutException { + waitForTableEnabled(deadlineTs); + waitForAllRegionsOnline(deadlineTs); + return null; + } + + @Override + protected Void postOperationResult(final Void result, final long deadlineTs) + throws IOException, TimeoutException { + LOG.info("Created " + desc.getTableName()); + return result; + } + + private void waitForTableEnabled(final long deadlineTs) + throws IOException, TimeoutException { + waitForState(deadlineTs, new WaitForStateCallable() { + @Override + public boolean checkState(int tries) throws IOException { + try { + if (getAdmin().isTableAvailable(desc.getTableName())) { + return true; + } + } catch (TableNotFoundException tnfe) { + LOG.debug("Table "+ desc.getTableName() +" was not enabled, sleeping. tries="+ tries); + } + return false; + } + + @Override + public void throwInterruptedException() throws InterruptedIOException { + throw new InterruptedIOException("Interrupted when waiting for table " + + desc.getTableName() + " to be enabled"); + } + + @Override + public void throwTimeoutException(long elapsedTime) throws TimeoutException { + throw new TimeoutException("Table " + desc.getTableName() + + " not enabled after " + elapsedTime + "msec"); + } + }); + } + + private void waitForAllRegionsOnline(final long deadlineTs) + throws IOException, TimeoutException { + final AtomicInteger actualRegCount = new AtomicInteger(0); + final MetaTableAccessor.Visitor visitor = new MetaTableAccessor.Visitor() { + @Override + public boolean visit(Result rowResult) throws IOException { + RegionLocations list = MetaTableAccessor.getRegionLocations(rowResult); + if (list == null) { + LOG.warn("No serialized HRegionInfo in " + rowResult); + return true; + } + HRegionLocation l = list.getRegionLocation(); + if (l == null) { + return true; + } + if (!l.getRegionInfo().getTable().equals(desc.getTableName())) { + return false; + } + if (l.getRegionInfo().isOffline() || l.getRegionInfo().isSplit()) return true; + HRegionLocation[] locations = list.getRegionLocations(); + for (HRegionLocation location : locations) { + if (location == null) continue; + ServerName serverName = location.getServerName(); + // Make sure that regions are assigned to server + if (serverName != null && serverName.getHostAndPort() != null) { + actualRegCount.incrementAndGet(); + } + } + return true; + } + }; + + int tries = 0; + IOException serverEx = null; + int numRegs = (splitKeys == null ? 1 : splitKeys.length + 1) * desc.getRegionReplication(); + while (EnvironmentEdgeManager.currentTime() < deadlineTs) { + actualRegCount.set(0); + MetaTableAccessor.scanMetaForTableRegions( + getAdmin().getConnection(), visitor, desc.getTableName()); + if (actualRegCount.get() == numRegs) { + // all the regions are online + return; + } + + try { + Thread.sleep(getAdmin().getPauseTime(tries++)); + } catch (InterruptedException e) { + throw new InterruptedIOException("Interrupted when opening" + + " regions; " + actualRegCount.get() + " of " + numRegs + + " regions processed so far"); + } + } + throw new TimeoutException("Only " + actualRegCount.get() + + " of " + numRegs + " regions are online; retries exhausted."); + } } public void deleteTable(final String tableName) throws IOException { @@ -701,48 +776,93 @@ public class HBaseAdmin implements Admin { */ @Override public void deleteTable(final TableName tableName) throws IOException { - boolean tableExists = true; + Future future = deleteTableAsyncV2(tableName); + try { + future.get(syncWaitTimeout, TimeUnit.MILLISECONDS); + } catch (InterruptedException e) { + throw new InterruptedIOException("Interrupted when waiting for table to be deleted"); + } catch (TimeoutException e) { + throw new TimeoutIOException(e); + } catch (ExecutionException e) { + if (e.getCause() instanceof IOException) { + throw (IOException)e.getCause(); + } else { + throw new IOException(e.getCause()); + } + } + } - executeCallable(new MasterCallable(getConnection()) { + /** + * Deletes the table but does not block and wait for it be completely removed. + * You can use Future.get(long, TimeUnit) to wait on the operation to complete. + * It may throw ExecutionException if there was an error while executing the operation + * or TimeoutException in case the wait timeout was not long enough to allow the + * operation to complete. + * + * @param desc table descriptor for table + * @param tableName name of table to delete + * @throws IOException if a remote or network exception occurs + * @return the result of the async delete. You can use Future.get(long, TimeUnit) + * to wait on the operation to complete. + */ + // TODO: This should be called Async but it will break binary compatibility + private Future deleteTableAsyncV2(final TableName tableName) throws IOException { + DeleteTableResponse response = executeCallable( + new MasterCallable(getConnection()) { @Override - public Void call(int callTimeout) throws ServiceException { + public DeleteTableResponse call(int callTimeout) throws ServiceException { DeleteTableRequest req = RequestConverter.buildDeleteTableRequest(tableName); - master.deleteTable(null,req); - return null; + return master.deleteTable(null,req); } }); + return new DeleteTableFuture(this, tableName, response); + } - int failures = 0; - for (int tries = 0; tries < (this.numRetries * this.retryLongerMultiplier); tries++) { - try { - tableExists = tableExists(tableName); - if (!tableExists) - break; - } catch (IOException ex) { - failures++; - if(failures >= numRetries - 1) { // no more tries left - if (ex instanceof RemoteException) { - throw ((RemoteException) ex).unwrapRemoteException(); - } else { - throw ex; - } + private static class DeleteTableFuture extends ProcedureFuture { + private final TableName tableName; + + public DeleteTableFuture(final HBaseAdmin admin, final TableName tableName, + final DeleteTableResponse response) { + super(admin, (response != null && response.hasProcId()) ? response.getProcId() : null); + this.tableName = tableName; + } + + @Override + protected Void waitOperationResult(final long deadlineTs) + throws IOException, TimeoutException { + waitTableNotFound(deadlineTs); + return null; + } + + @Override + protected Void postOperationResult(final Void result, final long deadlineTs) + throws IOException, TimeoutException { + // Delete cached information to prevent clients from using old locations + getAdmin().getConnection().clearRegionCache(tableName); + LOG.info("Deleted " + tableName); + return result; + } + + private void waitTableNotFound(final long deadlineTs) + throws IOException, TimeoutException { + waitForState(deadlineTs, new WaitForStateCallable() { + @Override + public boolean checkState(int tries) throws IOException { + return !getAdmin().tableExists(tableName); } - } - try { - Thread.sleep(getPauseTime(tries)); - } catch (InterruptedException e) { - throw new InterruptedIOException("Interrupted when waiting" + - " for table to be deleted"); - } - } - if (tableExists) { - throw new IOException("Retries exhausted, it took too long to wait"+ - " for the table " + tableName + " to be deleted."); + @Override + public void throwInterruptedException() throws InterruptedIOException { + throw new InterruptedIOException("Interrupted when waiting for table to be deleted"); + } + + @Override + public void throwTimeoutException(long elapsedTime) throws TimeoutException { + throw new TimeoutException("Table " + tableName + " not yet deleted after " + + elapsedTime + "msec"); + } + }); } - // Delete cached information to prevent clients from using old locations - this.connection.clearRegionCache(tableName); - LOG.info("Deleted " + tableName); } /** @@ -826,12 +946,20 @@ public class HBaseAdmin implements Admin { @Override public void enableTable(final TableName tableName) throws IOException { - enableTableAsync(tableName); - - // Wait until all regions are enabled - waitUntilTableIsEnabled(tableName); - - LOG.info("Enabled table " + tableName); + Future future = enableTableAsyncV2(tableName); + try { + future.get(syncWaitTimeout, TimeUnit.MILLISECONDS); + } catch (InterruptedException e) { + throw new InterruptedIOException("Interrupted when waiting for table to be disabled"); + } catch (TimeoutException e) { + throw new TimeoutIOException(e); + } catch (ExecutionException e) { + if (e.getCause() instanceof IOException) { + throw (IOException)e.getCause(); + } else { + throw new IOException(e.getCause()); + } + } } public void enableTable(final byte[] tableName) @@ -898,16 +1026,7 @@ public class HBaseAdmin implements Admin { @Override public void enableTableAsync(final TableName tableName) throws IOException { - TableName.isLegalFullyQualifiedTableName(tableName.getName()); - executeCallable(new MasterCallable(getConnection()) { - @Override - public Void call(int callTimeout) throws ServiceException { - LOG.info("Started enable of " + tableName); - EnableTableRequest req = RequestConverter.buildEnableTableRequest(tableName); - master.enableTable(null,req); - return null; - } - }); + enableTableAsyncV2(tableName); } public void enableTableAsync(final byte[] tableName) @@ -920,6 +1039,84 @@ public class HBaseAdmin implements Admin { enableTableAsync(TableName.valueOf(tableName)); } + /** + * Enable the table but does not block and wait for it be completely enabled. + * You can use Future.get(long, TimeUnit) to wait on the operation to complete. + * It may throw ExecutionException if there was an error while executing the operation + * or TimeoutException in case the wait timeout was not long enough to allow the + * operation to complete. + * + * @param tableName name of table to delete + * @throws IOException if a remote or network exception occurs + * @return the result of the async enable. You can use Future.get(long, TimeUnit) + * to wait on the operation to complete. + */ + // TODO: This should be called Async but it will break binary compatibility + private Future enableTableAsyncV2(final TableName tableName) throws IOException { + TableName.isLegalFullyQualifiedTableName(tableName.getName()); + EnableTableResponse response = executeCallable( + new MasterCallable(getConnection()) { + @Override + public EnableTableResponse call(int callTimeout) throws ServiceException { + LOG.info("Started enable of " + tableName); + EnableTableRequest req = RequestConverter.buildEnableTableRequest(tableName); + return master.enableTable(null,req); + } + }); + return new EnableTableFuture(this, tableName, response); + } + + private static class EnableTableFuture extends ProcedureFuture { + private final TableName tableName; + + public EnableTableFuture(final HBaseAdmin admin, final TableName tableName, + final EnableTableResponse response) { + super(admin, (response != null && response.hasProcId()) ? response.getProcId() : null); + this.tableName = tableName; + } + + @Override + protected Void waitOperationResult(final long deadlineTs) + throws IOException, TimeoutException { + waitTableEnabled(deadlineTs); + return null; + } + + @Override + protected Void postOperationResult(final Void result, final long deadlineTs) + throws IOException, TimeoutException { + LOG.info("Enabled " + tableName); + return result; + } + + private void waitTableEnabled(final long deadlineTs) + throws IOException, TimeoutException { + waitForState(deadlineTs, new WaitForStateCallable() { + @Override + public boolean checkState(int tries) throws IOException { + boolean enabled; + try { + enabled = getAdmin().isTableEnabled(tableName); + } catch (TableNotFoundException tnfe) { + return false; + } + return enabled && getAdmin().isTableAvailable(tableName); + } + + @Override + public void throwInterruptedException() throws InterruptedIOException { + throw new InterruptedIOException("Interrupted when waiting for table to be enabled"); + } + + @Override + public void throwTimeoutException(long elapsedTime) throws TimeoutException { + throw new TimeoutException("Table " + tableName + " not yet enabled after " + + elapsedTime + "msec"); + } + }); + } + } + /** * Enable tables matching the passed in pattern and wait on completion. * @@ -978,16 +1175,7 @@ public class HBaseAdmin implements Admin { */ @Override public void disableTableAsync(final TableName tableName) throws IOException { - TableName.isLegalFullyQualifiedTableName(tableName.getName()); - executeCallable(new MasterCallable(getConnection()) { - @Override - public Void call(int callTimeout) throws ServiceException { - LOG.info("Started disable of " + tableName); - DisableTableRequest req = RequestConverter.buildDisableTableRequest(tableName); - master.disableTable(null,req); - return null; - } - }); + disableTableAsyncV2(tableName); } public void disableTableAsync(final byte[] tableName) throws IOException { @@ -1012,32 +1200,20 @@ public class HBaseAdmin implements Admin { @Override public void disableTable(final TableName tableName) throws IOException { - disableTableAsync(tableName); - // Wait until table is disabled - boolean disabled = false; - for (int tries = 0; tries < (this.numRetries * this.retryLongerMultiplier); tries++) { - disabled = isTableDisabled(tableName); - if (disabled) { - break; - } - long sleep = getPauseTime(tries); - if (LOG.isDebugEnabled()) { - LOG.debug("Sleeping= " + sleep + "ms, waiting for all regions to be " + - "disabled in " + tableName); - } - try { - Thread.sleep(sleep); - } catch (InterruptedException e) { - // Do this conversion rather than let it out because do not want to - // change the method signature. - throw (InterruptedIOException)new InterruptedIOException("Interrupted").initCause(e); + Future future = disableTableAsyncV2(tableName); + try { + future.get(syncWaitTimeout, TimeUnit.MILLISECONDS); + } catch (InterruptedException e) { + throw new InterruptedIOException("Interrupted when waiting for table to be disabled"); + } catch (TimeoutException e) { + throw new TimeoutIOException(e); + } catch (ExecutionException e) { + if (e.getCause() instanceof IOException) { + throw (IOException)e.getCause(); + } else { + throw new IOException(e.getCause()); } } - if (!disabled) { - throw new RegionException("Retries exhausted, it took too long to wait"+ - " for the table " + tableName + " to be disabled."); - } - LOG.info("Disabled " + tableName); } public void disableTable(final byte[] tableName) @@ -1050,6 +1226,78 @@ public class HBaseAdmin implements Admin { disableTable(TableName.valueOf(tableName)); } + /** + * Disable the table but does not block and wait for it be completely disabled. + * You can use Future.get(long, TimeUnit) to wait on the operation to complete. + * It may throw ExecutionException if there was an error while executing the operation + * or TimeoutException in case the wait timeout was not long enough to allow the + * operation to complete. + * + * @param tableName name of table to delete + * @throws IOException if a remote or network exception occurs + * @return the result of the async disable. You can use Future.get(long, TimeUnit) + * to wait on the operation to complete. + */ + // TODO: This should be called Async but it will break binary compatibility + private Future disableTableAsyncV2(final TableName tableName) throws IOException { + TableName.isLegalFullyQualifiedTableName(tableName.getName()); + DisableTableResponse response = executeCallable( + new MasterCallable(getConnection()) { + @Override + public DisableTableResponse call(int callTimeout) throws ServiceException { + LOG.info("Started disable of " + tableName); + DisableTableRequest req = RequestConverter.buildDisableTableRequest(tableName); + return master.disableTable(null, req); + } + }); + return new DisableTableFuture(this, tableName, response); + } + + private static class DisableTableFuture extends ProcedureFuture { + private final TableName tableName; + + public DisableTableFuture(final HBaseAdmin admin, final TableName tableName, + final DisableTableResponse response) { + super(admin, (response != null && response.hasProcId()) ? response.getProcId() : null); + this.tableName = tableName; + } + + @Override + protected Void waitOperationResult(final long deadlineTs) + throws IOException, TimeoutException { + waitTableDisabled(deadlineTs); + return null; + } + + @Override + protected Void postOperationResult(final Void result, final long deadlineTs) + throws IOException, TimeoutException { + LOG.info("Disabled " + tableName); + return result; + } + + private void waitTableDisabled(final long deadlineTs) + throws IOException, TimeoutException { + waitForState(deadlineTs, new WaitForStateCallable() { + @Override + public boolean checkState(int tries) throws IOException { + return getAdmin().isTableDisabled(tableName); + } + + @Override + public void throwInterruptedException() throws InterruptedIOException { + throw new InterruptedIOException("Interrupted when waiting for table to be disabled"); + } + + @Override + public void throwTimeoutException(long elapsedTime) throws TimeoutException { + throw new TimeoutException("Table " + tableName + " not yet disabled after " + + elapsedTime + "msec"); + } + }); + } + } + /** * Disable tables matching the passed in pattern and wait on completion. * @@ -1972,6 +2220,23 @@ public class HBaseAdmin implements Admin { }); } + /** + * Query the state of the balancer from the Master. It's not a guarantee that the balancer is + * actually running this very moment, but that it will run. + * + * @return True if the balancer is enabled, false otherwise. + */ + @Override + public boolean isBalancerEnabled() throws IOException { + return executeCallable(new MasterCallable(getConnection()) { + @Override + public Boolean call(int callTimeout) throws ServiceException { + return master.isBalancerEnabled(null, RequestConverter.buildIsBalancerEnabledRequest()) + .getEnabled(); + } + }); + } + /** * Enable/Disable the catalog janitor * @param enable if true enables the catalog janitor @@ -2243,9 +2508,9 @@ public class HBaseAdmin implements Admin { final AtomicReference> result = new AtomicReference>(null); final String encodedName = Bytes.toString(regionName); - MetaScannerVisitor visitor = new MetaScannerVisitorBase() { + MetaTableAccessor.Visitor visitor = new MetaTableAccessor.Visitor() { @Override - public boolean processRow(Result data) throws IOException { + public boolean visit(Result data) throws IOException { HRegionInfo info = HRegionInfo.getHRegionInfo(data); if (info == null) { LOG.warn("No serialized HRegionInfo in " + data); @@ -2254,11 +2519,13 @@ public class HBaseAdmin implements Admin { RegionLocations rl = MetaTableAccessor.getRegionLocations(data); boolean matched = false; ServerName sn = null; - for (HRegionLocation h : rl.getRegionLocations()) { - if (h != null && encodedName.equals(h.getRegionInfo().getEncodedName())) { - sn = h.getServerName(); - info = h.getRegionInfo(); - matched = true; + if (rl != null) { + for (HRegionLocation h : rl.getRegionLocations()) { + if (h != null && encodedName.equals(h.getRegionInfo().getEncodedName())) { + sn = h.getServerName(); + info = h.getRegionInfo(); + matched = true; + } } } if (!matched) return true; @@ -2267,7 +2534,7 @@ public class HBaseAdmin implements Admin { } }; - MetaScanner.metaScan(connection, visitor, null); + MetaTableAccessor.fullScanRegions(connection, visitor); pair = result.get(); } return pair; @@ -2561,7 +2828,7 @@ public class HBaseAdmin implements Admin { ZooKeeperKeepAliveConnection zkw = null; try { // This is NASTY. FIX!!!! Dependent on internal implementation! TODO - zkw = ((ConnectionManager.HConnectionImplementation)connection). + zkw = ((ConnectionImplementation)connection). getKeepAliveZooKeeperWatcher(); zkw.getRecoverableZooKeeper().getZooKeeper().exists(zkw.baseZNode, false); } catch (IOException e) { @@ -3781,8 +4048,8 @@ public class HBaseAdmin implements Admin { @Override public int getMasterInfoPort() throws IOException { // TODO: Fix! Reaching into internal implementation!!!! - ConnectionManager.HConnectionImplementation connection = - (ConnectionManager.HConnectionImplementation)this.connection; + ConnectionImplementation connection = + (ConnectionImplementation)this.connection; ZooKeeperKeepAliveConnection zkw = connection.getKeepAliveZooKeeperWatcher(); try { return MasterAddressTracker.getMasterInfoPort(zkw); @@ -3862,6 +4129,9 @@ public class HBaseAdmin implements Admin { compactMob(tableName, null, true); } + /** + * {@inheritDoc} + */ @Override public CompactionState getMobCompactionState(TableName tableName) throws IOException { checkTableNameNotNull(tableName); @@ -3915,7 +4185,239 @@ public class HBaseAdmin implements Admin { HColumnDescriptor family = htd.getFamily(columnFamily); if (family == null || !family.isMobEnabled()) { throw new IllegalArgumentException("Column family " + columnFamily - + " is not a mob column family"); + + " is not a mob column family"); + } + } + + /** + * Future that waits on a procedure result. + * Returned by the async version of the Admin calls, + * and used internally by the sync calls to wait on the result of the procedure. + */ + @InterfaceAudience.Private + @InterfaceStability.Evolving + protected static class ProcedureFuture implements Future { + private ExecutionException exception = null; + private boolean procResultFound = false; + private boolean done = false; + private V result = null; + + private final HBaseAdmin admin; + private final Long procId; + + public ProcedureFuture(final HBaseAdmin admin, final Long procId) { + this.admin = admin; + this.procId = procId; + } + + @Override + public boolean cancel(boolean mayInterruptIfRunning) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean isCancelled() { + // TODO: Abort not implemented yet + return false; + } + + @Override + public V get() throws InterruptedException, ExecutionException { + // TODO: should we ever spin forever? + throw new UnsupportedOperationException(); + } + + @Override + public V get(long timeout, TimeUnit unit) + throws InterruptedException, ExecutionException, TimeoutException { + if (!done) { + long deadlineTs = EnvironmentEdgeManager.currentTime() + unit.toMillis(timeout); + try { + try { + // if the master support procedures, try to wait the result + if (procId != null) { + result = waitProcedureResult(procId, deadlineTs); + } + // if we don't have a proc result, try the compatibility wait + if (!procResultFound) { + result = waitOperationResult(deadlineTs); + } + result = postOperationResult(result, deadlineTs); + done = true; + } catch (IOException e) { + result = postOpeartionFailure(e, deadlineTs); + done = true; + } + } catch (IOException e) { + exception = new ExecutionException(e); + done = true; + } + } + if (exception != null) { + throw exception; + } + return result; + } + + @Override + public boolean isDone() { + return done; + } + + protected HBaseAdmin getAdmin() { + return admin; + } + + private V waitProcedureResult(long procId, long deadlineTs) + throws IOException, TimeoutException, InterruptedException { + GetProcedureResultRequest request = GetProcedureResultRequest.newBuilder() + .setProcId(procId) + .build(); + + int tries = 0; + IOException serviceEx = null; + while (EnvironmentEdgeManager.currentTime() < deadlineTs) { + GetProcedureResultResponse response = null; + try { + // Try to fetch the result + response = getProcedureResult(request); + } catch (IOException e) { + serviceEx = unwrapException(e); + + // the master may be down + LOG.warn("failed to get the procedure result procId=" + procId, serviceEx); + + // Not much to do, if we have a DoNotRetryIOException + if (serviceEx instanceof DoNotRetryIOException) { + // TODO: looks like there is no way to unwrap this exception and get the proper + // UnsupportedOperationException aside from looking at the message. + // anyway, if we fail here we just failover to the compatibility side + // and that is always a valid solution. + LOG.warn("Proc-v2 is unsupported on this master: " + serviceEx.getMessage(), serviceEx); + procResultFound = false; + return null; + } + } + + // If the procedure is no longer running, we should have a result + if (response != null && response.getState() != GetProcedureResultResponse.State.RUNNING) { + procResultFound = response.getState() != GetProcedureResultResponse.State.NOT_FOUND; + return convertResult(response); + } + + try { + Thread.sleep(getAdmin().getPauseTime(tries++)); + } catch (InterruptedException e) { + throw new InterruptedException( + "Interrupted while waiting for the result of proc " + procId); + } + } + if (serviceEx != null) { + throw serviceEx; + } else { + throw new TimeoutException("The procedure " + procId + " is still running"); + } + } + + private static IOException unwrapException(IOException e) { + if (e instanceof RemoteException) { + return ((RemoteException)e).unwrapRemoteException(); + } + return e; + } + + protected GetProcedureResultResponse getProcedureResult(final GetProcedureResultRequest request) + throws IOException { + return admin.executeCallable(new MasterCallable( + admin.getConnection()) { + @Override + public GetProcedureResultResponse call(int callTimeout) throws ServiceException { + return master.getProcedureResult(null, request); + } + }); + } + + /** + * Convert the procedure result response to a specified type. + * @param response the procedure result object to parse + * @return the result data of the procedure. + */ + protected V convertResult(final GetProcedureResultResponse response) throws IOException { + if (response.hasException()) { + throw ForeignExceptionUtil.toIOException(response.getException()); + } + return null; + } + + /** + * Fallback implementation in case the procedure is not supported by the server. + * It should try to wait until the operation is completed. + * @param deadlineTs the timestamp after which this method should throw a TimeoutException + * @return the result data of the operation + */ + protected V waitOperationResult(final long deadlineTs) + throws IOException, TimeoutException { + return null; + } + + /** + * Called after the operation is completed and the result fetched. + * this allows to perform extra steps after the procedure is completed. + * it allows to apply transformations to the result that will be returned by get(). + * @param result the result of the procedure + * @param deadlineTs the timestamp after which this method should throw a TimeoutException + * @return the result of the procedure, which may be the same as the passed one + */ + protected V postOperationResult(final V result, final long deadlineTs) + throws IOException, TimeoutException { + return result; + } + + /** + * Called after the operation is terminated with a failure. + * this allows to perform extra steps after the procedure is terminated. + * it allows to apply transformations to the result that will be returned by get(). + * The default implementation will rethrow the exception + * @param exception the exception got from fetching the result + * @param deadlineTs the timestamp after which this method should throw a TimeoutException + * @return the result of the procedure, which may be the same as the passed one + */ + protected V postOpeartionFailure(final IOException exception, final long deadlineTs) + throws IOException, TimeoutException { + throw exception; + } + + protected interface WaitForStateCallable { + boolean checkState(int tries) throws IOException; + void throwInterruptedException() throws InterruptedIOException; + void throwTimeoutException(long elapsed) throws TimeoutException; + } + + protected void waitForState(final long deadlineTs, final WaitForStateCallable callable) + throws IOException, TimeoutException { + int tries = 0; + IOException serverEx = null; + long startTime = EnvironmentEdgeManager.currentTime(); + while (EnvironmentEdgeManager.currentTime() < deadlineTs) { + serverEx = null; + try { + if (callable.checkState(tries)) { + return; + } + } catch (IOException e) { + serverEx = e; + } + try { + Thread.sleep(getAdmin().getPauseTime(tries++)); + } catch (InterruptedException e) { + callable.throwInterruptedException(); + } + } + if (serverEx != null) { + throw unwrapException(serverEx); + } else { + callable.throwTimeoutException(EnvironmentEdgeManager.currentTime() - startTime); + } } } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java index f185cb20188..cc5e9faaaa0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java @@ -38,22 +38,16 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService; /** * A cluster connection. Knows how to find the master, locate regions out on the cluster, * keeps a cache of locations and then knows how to re-calibrate after they move. You need one - * of these to talk to your HBase cluster. {@link HConnectionManager} manages instances of this + * of these to talk to your HBase cluster. {@link ConnectionFactory} manages instances of this * class. See it for how to get one of these. * *

This is NOT a connection to a particular server but to ALL servers in the cluster. Individual * connections are managed at a lower level. * *

HConnections are used by {@link HTable} mostly but also by - * {@link HBaseAdmin}, and {@link org.apache.hadoop.hbase.zookeeper.MetaTableLocator}. - * HConnection instances can be shared. Sharing - * is usually what you want because rather than each HConnection instance - * having to do its own discovery of regions out on the cluster, instead, all - * clients get to share the one cache of locations. {@link HConnectionManager} does the - * sharing for you if you go by it getting connections. Sharing makes cleanup of - * HConnections awkward. See {@link HConnectionManager} for cleanup discussion. + * {@link HBaseAdmin}, and {@link org.apache.hadoop.hbase.zookeeper.MetaTableLocator}. * - * @see HConnectionManager + * @see ConnectionFactory * @deprecated in favor of {@link Connection} and {@link ConnectionFactory} */ @InterfaceAudience.Public @@ -78,8 +72,7 @@ public interface HConnection extends Connection { * be created for each using thread. * This is a lightweight operation, pooling or caching of the returned HTableInterface * is neither required nor desired. - * Note that the HConnection needs to be unmanaged - * (created with {@link HConnectionManager#createConnection(Configuration)}). + * (created with {@link ConnectionFactory#createConnection(Configuration)}). * @param tableName * @return an HTable to use for interactions with this table */ @@ -91,8 +84,7 @@ public interface HConnection extends Connection { * be created for each using thread. * This is a lightweight operation, pooling or caching of the returned HTableInterface * is neither required nor desired. - * Note that the HConnection needs to be unmanaged - * (created with {@link HConnectionManager#createConnection(Configuration)}). + * (created with {@link ConnectionFactory#createConnection(Configuration)}). * @param tableName * @return an HTable to use for interactions with this table */ @@ -104,8 +96,7 @@ public interface HConnection extends Connection { * be created for each using thread. * This is a lightweight operation, pooling or caching of the returned HTableInterface * is neither required nor desired. - * Note that the HConnection needs to be unmanaged - * (created with {@link HConnectionManager#createConnection(Configuration)}). + * (created with {@link ConnectionFactory#createConnection(Configuration)}). * @param tableName * @return an HTable to use for interactions with this table */ @@ -118,8 +109,7 @@ public interface HConnection extends Connection { * be created for each using thread. * This is a lightweight operation, pooling or caching of the returned HTableInterface * is neither required nor desired. - * Note that the HConnection needs to be unmanaged - * (created with {@link HConnectionManager#createConnection(Configuration)}). + * (created with {@link ConnectionFactory#createConnection(Configuration)}). * @param tableName * @param pool The thread pool to use for batch operations, null to use a default pool. * @return an HTable to use for interactions with this table @@ -132,8 +122,7 @@ public interface HConnection extends Connection { * be created for each using thread. * This is a lightweight operation, pooling or caching of the returned HTableInterface * is neither required nor desired. - * Note that the HConnection needs to be unmanaged - * (created with {@link HConnectionManager#createConnection(Configuration)}). + * (created with {@link ConnectionFactory#createConnection(Configuration)}). * @param tableName * @param pool The thread pool to use for batch operations, null to use a default pool. * @return an HTable to use for interactions with this table @@ -146,9 +135,8 @@ public interface HConnection extends Connection { * be created for each using thread. * This is a lightweight operation, pooling or caching of the returned HTableInterface * is neither required nor desired. - * Note that the HConnection needs to be unmanaged - * (created with {@link HConnectionManager#createConnection(Configuration)}). - * @param tableName + * (created with {@link ConnectionFactory#createConnection(Configuration)}). + * @param tableName table to get interface for * @param pool The thread pool to use for batch operations, null to use a default pool. * @return an HTable to use for interactions with this table */ @@ -161,10 +149,6 @@ public interface HConnection extends Connection { * * This is a lightweight operation. Pooling or caching of the returned RegionLocator is neither * required nor desired. - * - * RegionLocator needs to be unmanaged - * (created with {@link HConnectionManager#createConnection(Configuration)}). - * * @param tableName Name of the table who's region is to be examined * @return A RegionLocator instance */ @@ -175,7 +159,7 @@ public interface HConnection extends Connection { * Retrieve an Admin implementation to administer an HBase cluster. * The returned Admin is not guaranteed to be thread-safe. A new instance should be created for * each using thread. This is a lightweight operation. Pooling or caching of the returned - * Admin is not recommended. Note that HConnection needs to be unmanaged + * Admin is not recommended. * * @return an Admin instance for cluster administration */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionKey.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionKey.java deleted file mode 100644 index f37690ca709..00000000000 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionKey.java +++ /dev/null @@ -1,146 +0,0 @@ -/** - * Copyright The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.client; - -import java.io.IOException; -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.ipc.RpcControllerFactory; -import org.apache.hadoop.hbase.security.User; -import org.apache.hadoop.hbase.security.UserProvider; - -/** - * Denotes a unique key to an {@link HConnection} instance. - * - * In essence, this class captures the properties in {@link Configuration} - * that may be used in the process of establishing a connection. In light of - * that, if any new such properties are introduced into the mix, they must be - * added to the {@link HConnectionKey#properties} list. - * - */ -class HConnectionKey { - final static String[] CONNECTION_PROPERTIES = new String[] { - HConstants.ZOOKEEPER_QUORUM, HConstants.ZOOKEEPER_ZNODE_PARENT, - HConstants.ZOOKEEPER_CLIENT_PORT, - HConstants.ZOOKEEPER_RECOVERABLE_WAITTIME, - HConstants.HBASE_CLIENT_PAUSE, HConstants.HBASE_CLIENT_RETRIES_NUMBER, - HConstants.HBASE_RPC_TIMEOUT_KEY, - HConstants.HBASE_META_SCANNER_CACHING, - HConstants.HBASE_CLIENT_INSTANCE_ID, - HConstants.RPC_CODEC_CONF_KEY, - HConstants.USE_META_REPLICAS, - RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY}; - - private Map properties; - private String username; - - HConnectionKey(Configuration conf) { - Map m = new HashMap(); - if (conf != null) { - for (String property : CONNECTION_PROPERTIES) { - String value = conf.get(property); - if (value != null) { - m.put(property, value); - } - } - } - this.properties = Collections.unmodifiableMap(m); - - try { - UserProvider provider = UserProvider.instantiate(conf); - User currentUser = provider.getCurrent(); - if (currentUser != null) { - username = currentUser.getName(); - } - } catch (IOException ioe) { - ConnectionManager.LOG.warn( - "Error obtaining current user, skipping username in HConnectionKey", ioe); - } - } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - if (username != null) { - result = username.hashCode(); - } - for (String property : CONNECTION_PROPERTIES) { - String value = properties.get(property); - if (value != null) { - result = prime * result + value.hashCode(); - } - } - - return result; - } - - - @edu.umd.cs.findbugs.annotations.SuppressWarnings (value="ES_COMPARING_STRINGS_WITH_EQ", - justification="Optimization") - @Override - public boolean equals(Object obj) { - if (this == obj) - return true; - if (obj == null) - return false; - if (getClass() != obj.getClass()) - return false; - HConnectionKey that = (HConnectionKey) obj; - if (this.username != null && !this.username.equals(that.username)) { - return false; - } else if (this.username == null && that.username != null) { - return false; - } - if (this.properties == null) { - if (that.properties != null) { - return false; - } - } else { - if (that.properties == null) { - return false; - } - for (String property : CONNECTION_PROPERTIES) { - String thisValue = this.properties.get(property); - String thatValue = that.properties.get(property); - //noinspection StringEquality - if (thisValue == thatValue) { - continue; - } - if (thisValue == null || !thisValue.equals(thatValue)) { - return false; - } - } - } - return true; - } - - @Override - public String toString() { - return "HConnectionKey{" + - "properties=" + properties + - ", username='" + username + '\'' + - '}'; - } -} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java deleted file mode 100644 index edd071b7834..00000000000 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java +++ /dev/null @@ -1,324 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.client; - -import java.io.IOException; -import java.util.concurrent.ExecutorService; - -import org.apache.commons.logging.Log; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; -import org.apache.hadoop.hbase.security.User; - -/** - * A non-instantiable class that manages creation of {@link HConnection}s. - *

The simplest way to use this class is by using {@link #createConnection(Configuration)}. - * This creates a new {@link HConnection} to the cluster that is managed by the caller. - * From this {@link HConnection} {@link HTableInterface} implementations are retrieved - * with {@link HConnection#getTable(byte[])}. Example: - *

- * HConnection connection = HConnectionManager.createConnection(config);
- * HTableInterface table = connection.getTable(TableName.valueOf("table1"));
- * try {
- *   // Use the table as needed, for a single operation and a single thread
- * } finally {
- *   table.close();
- *   connection.close();
- * }
- * 
- *

This class has a static Map of {@link HConnection} instances keyed by - * {@link HConnectionKey}; A {@link HConnectionKey} is identified by a set of - * {@link Configuration} properties. Invocations of {@link #getConnection(Configuration)} - * that pass the same {@link Configuration} instance will return the same - * {@link HConnection} instance ONLY WHEN the set of properties are the same - * (i.e. if you change properties in your {@link Configuration} instance, such as RPC timeout, - * the codec used, HBase will create a new {@link HConnection} instance. For more details on - * how this is done see {@link HConnectionKey}). - *

Sharing {@link HConnection} instances is usually what you want; all clients - * of the {@link HConnection} instances share the HConnections' cache of Region - * locations rather than each having to discover for itself the location of meta, etc. - * But sharing connections makes clean up of {@link HConnection} instances a little awkward. - * Currently, clients cleanup by calling {@link #deleteConnection(Configuration)}. This will - * shutdown the zookeeper connection the HConnection was using and clean up all - * HConnection resources as well as stopping proxies to servers out on the - * cluster. Not running the cleanup will not end the world; it'll - * just stall the closeup some and spew some zookeeper connection failed - * messages into the log. Running the cleanup on a {@link HConnection} that is - * subsequently used by another will cause breakage so be careful running - * cleanup. - *

To create a {@link HConnection} that is not shared by others, you can - * set property "hbase.client.instance.id" to a unique value for your {@link Configuration} - * instance, like the following: - *

- * {@code
- * conf.set("hbase.client.instance.id", "12345");
- * HConnection connection = HConnectionManager.getConnection(conf);
- * // Use the connection to your hearts' delight and then when done...
- * conf.set("hbase.client.instance.id", "12345");
- * HConnectionManager.deleteConnection(conf, true);
- * }
- * 
- *

Cleanup used to be done inside in a shutdown hook. On startup we'd - * register a shutdown hook that called {@link #deleteAllConnections()} - * on its way out but the order in which shutdown hooks run is not defined so - * were problematic for clients of HConnection that wanted to register their - * own shutdown hooks so we removed ours though this shifts the onus for - * cleanup to the client. - * @deprecated Please use ConnectionFactory instead - */ -@InterfaceAudience.Public -@InterfaceStability.Evolving -@Deprecated -public final class HConnectionManager extends ConnectionFactory { - - /** @deprecated connection caching is going away */ - @Deprecated - public static final String RETRIES_BY_SERVER_KEY = - ConnectionManager.RETRIES_BY_SERVER_KEY; - - /** @deprecated connection caching is going away */ - @Deprecated - public static final int MAX_CACHED_CONNECTION_INSTANCES = - ConnectionManager.MAX_CACHED_CONNECTION_INSTANCES; - - /* - * Non-instantiable. - */ - private HConnectionManager() { - super(); - } - - /** - * Get the connection that goes with the passed conf configuration instance. - * If no current connection exists, method creates a new connection and keys it using - * connection-specific properties from the passed {@link Configuration}; see - * {@link HConnectionKey}. - * @param conf configuration - * @return HConnection object for conf - * @deprecated connection caching is going away - */ - @Deprecated - public static HConnection getConnection(final Configuration conf) throws IOException { - return ConnectionManager.getConnectionInternal(conf); - } - - /** - * Create a new HConnection instance using the passed conf instance. - *

Note: This bypasses the usual HConnection life cycle management done by - * {@link #getConnection(Configuration)}. The caller is responsible for - * calling {@link HConnection#close()} on the returned connection instance. - * - * This is the recommended way to create HConnections. - *

-   * HConnection connection = HConnectionManager.createConnection(conf);
-   * HTableInterface table = connection.getTable("mytable");
-   * try {
-   *   table.get(...);
-   *   ...
-   * } finally {
-   *   table.close();
-   *   connection.close();
-   * }
-   * 
- * - * @param conf configuration - * @return HConnection object for conf - * @deprecated in favor of {@link Connection} and {@link ConnectionFactory} - */ - @Deprecated - public static HConnection createConnection(Configuration conf) throws IOException { - return ConnectionManager.createConnectionInternal(conf); - } - - - /** - * Create a new HConnection instance using the passed conf instance. - *

Note: This bypasses the usual HConnection life cycle management done by - * {@link #getConnection(Configuration)}. The caller is responsible for - * calling {@link HConnection#close()} on the returned connection instance. - * This is the recommended way to create HConnections. - *

-   * ExecutorService pool = ...;
-   * HConnection connection = HConnectionManager.createConnection(conf, pool);
-   * HTableInterface table = connection.getTable("mytable");
-   * table.get(...);
-   * ...
-   * table.close();
-   * connection.close();
-   * 
- * @param conf configuration - * @param pool the thread pool to use for batch operation in HTables used via this HConnection - * @return HConnection object for conf - * @deprecated in favor of {@link Connection} and {@link ConnectionFactory} - */ - @Deprecated - public static HConnection createConnection(Configuration conf, ExecutorService pool) - throws IOException { - return ConnectionManager.createConnection(conf, pool); - } - - /** - * Create a new HConnection instance using the passed conf instance. - *

Note: This bypasses the usual HConnection life cycle management done by - * {@link #getConnection(Configuration)}. The caller is responsible for - * calling {@link HConnection#close()} on the returned connection instance. - * This is the recommended way to create HConnections. - *

-   * ExecutorService pool = ...;
-   * HConnection connection = HConnectionManager.createConnection(conf, pool);
-   * HTableInterface table = connection.getTable("mytable");
-   * table.get(...);
-   * ...
-   * table.close();
-   * connection.close();
-   * 
- * @param conf configuration - * @param user the user the connection is for - * @return HConnection object for conf - * @deprecated in favor of {@link Connection} and {@link ConnectionFactory} - */ - @Deprecated - public static HConnection createConnection(Configuration conf, User user) - throws IOException { - return ConnectionManager.createConnection(conf, user); - } - - /** - * Create a new HConnection instance using the passed conf instance. - *

Note: This bypasses the usual HConnection life cycle management done by - * {@link #getConnection(Configuration)}. The caller is responsible for - * calling {@link HConnection#close()} on the returned connection instance. - * This is the recommended way to create HConnections. - *

-   * ExecutorService pool = ...;
-   * HConnection connection = HConnectionManager.createConnection(conf, pool);
-   * HTableInterface table = connection.getTable("mytable");
-   * table.get(...);
-   * ...
-   * table.close();
-   * connection.close();
-   * 
- * @param conf configuration - * @param pool the thread pool to use for batch operation in HTables used via this HConnection - * @param user the user the connection is for - * @return HConnection object for conf - * @deprecated in favor of {@link Connection} and {@link ConnectionFactory} - */ - @Deprecated - public static HConnection createConnection(Configuration conf, ExecutorService pool, User user) - throws IOException { - return ConnectionManager.createConnection(conf, pool, user); - } - - /** - * @deprecated in favor of {@link Connection} and {@link ConnectionFactory} - */ - @Deprecated - static HConnection createConnection(final Configuration conf, final boolean managed) - throws IOException { - return ConnectionManager.createConnection(conf, managed); - } - - /** - * @deprecated in favor of {@link Connection} and {@link ConnectionFactory} - */ - @Deprecated - static ClusterConnection createConnection(final Configuration conf, final boolean managed, - final ExecutorService pool, final User user) throws IOException { - return ConnectionManager.createConnection(conf, managed, pool, user); - } - - /** - * Delete connection information for the instance specified by passed configuration. - * If there are no more references to the designated connection connection, this method will - * then close connection to the zookeeper ensemble and let go of all associated resources. - * - * @param conf configuration whose identity is used to find {@link HConnection} instance. - * @deprecated connection caching is going away. - */ - @Deprecated - public static void deleteConnection(Configuration conf) { - ConnectionManager.deleteConnection(conf); - } - - /** - * Cleanup a known stale connection. - * This will then close connection to the zookeeper ensemble and let go of all resources. - * - * @param connection - * @deprecated connection caching is going away. - */ - @Deprecated - public static void deleteStaleConnection(HConnection connection) { - ConnectionManager.deleteStaleConnection(connection); - } - - /** - * Delete information for all connections. Close or not the connection, depending on the - * staleConnection boolean and the ref count. By default, you should use it with - * staleConnection to true. - * @deprecated connection caching is going away. - */ - @Deprecated - public static void deleteAllConnections(boolean staleConnection) { - ConnectionManager.deleteAllConnections(staleConnection); - } - - /** - * Delete information for all connections.. - * @deprecated kept for backward compatibility, but the behavior is broken. HBASE-8983 - */ - @Deprecated - public static void deleteAllConnections() { - ConnectionManager.deleteAllConnections(); - } - - /** - * This convenience method invokes the given {@link HConnectable#connect} - * implementation using a {@link HConnection} instance that lasts just for the - * duration of the invocation. - * - * @param the return type of the connect method - * @param connectable the {@link HConnectable} instance - * @return the value returned by the connect method - * @throws IOException - * @deprecated Internal method, do not use thru HConnectionManager. - */ - @InterfaceAudience.Private - @Deprecated - public static T execute(HConnectable connectable) throws IOException { - return ConnectionManager.execute(connectable); - } - - /** - * Set the number of retries to use serverside when trying to communicate - * with another server over {@link HConnection}. Used updating catalog - * tables, etc. Call this method before we create any Connections. - * @param c The Configuration instance to set the retries into. - * @param log Used to log what we set in here. - * @deprecated Internal method, do not use. - */ - @InterfaceAudience.Private - @Deprecated - public static void setServerSideHConnectionRetries( - final Configuration c, final String sn, final Log log) { - ConnectionUtils.setServerSideHConnectionRetriesConfig(c, sn, log); - } -} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HRegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HRegionLocator.java index fa856538f68..782ab6622a3 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HRegionLocator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HRegionLocator.java @@ -21,12 +21,12 @@ package org.apache.hadoop.hbase.client; import java.io.IOException; import java.util.ArrayList; import java.util.List; -import java.util.NavigableMap; -import java.util.Map.Entry; +import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; @@ -34,8 +34,6 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.util.Pair; -import com.google.common.annotations.VisibleForTesting; - /** * An implementation of {@link RegionLocator}. Used to view region location information for a single * HBase table. Lightweight. Get as needed and just close when done. Instances of this class SHOULD @@ -85,11 +83,11 @@ public class HRegionLocator implements RegionLocator { @Override public List getAllRegionLocations() throws IOException { - NavigableMap locations = - MetaScanner.allTableRegions(this.connection, getName()); + List> locations = + MetaTableAccessor.getTableRegionsAndLocations(this.connection, getName()); ArrayList regions = new ArrayList<>(locations.size()); - for (Entry entry : locations.entrySet()) { - regions.add(new HRegionLocation(entry.getKey(), entry.getValue())); + for (Pair entry : locations) { + regions.add(new HRegionLocation(entry.getFirst(), entry.getSecond())); } return regions; } @@ -139,7 +137,18 @@ public class HRegionLocator implements RegionLocator { @VisibleForTesting List listRegionLocations() throws IOException { - return MetaScanner.listTableRegionLocations(getConfiguration(), this.connection, getName()); + final List regions = new ArrayList(); + MetaTableAccessor.Visitor visitor = new MetaTableAccessor.TableVisitorBase(tableName) { + @Override + public boolean visitInternal(Result result) throws IOException { + RegionLocations locations = MetaTableAccessor.getRegionLocations(result); + if (locations == null) return true; + regions.add(locations); + return true; + } + }; + MetaTableAccessor.scanMetaForTableRegions(connection, visitor, tableName); + return regions; } public Configuration getConfiguration() { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java index d15ab27ce6a..434e32fd258 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java @@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValueUtil; +import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; @@ -127,78 +128,6 @@ public class HTable implements HTableInterface { private RpcRetryingCallerFactory rpcCallerFactory; private RpcControllerFactory rpcControllerFactory; - /** - * Creates an object to access a HBase table. - * @param conf Configuration object to use. - * @param tableName Name of the table. - * @throws IOException if a remote or network exception occurs - * @deprecated Constructing HTable objects manually has been deprecated. Please use - * {@link Connection} to instantiate a {@link Table} instead. - */ - @Deprecated - public HTable(Configuration conf, final String tableName) - throws IOException { - this(conf, TableName.valueOf(tableName)); - } - - /** - * Creates an object to access a HBase table. - * @param conf Configuration object to use. - * @param tableName Name of the table. - * @throws IOException if a remote or network exception occurs - * @deprecated Constructing HTable objects manually has been deprecated. Please use - * {@link Connection} to instantiate a {@link Table} instead. - */ - @Deprecated - public HTable(Configuration conf, final byte[] tableName) - throws IOException { - this(conf, TableName.valueOf(tableName)); - } - - /** - * Creates an object to access a HBase table. - * @param conf Configuration object to use. - * @param tableName table name pojo - * @throws IOException if a remote or network exception occurs - * @deprecated Constructing HTable objects manually has been deprecated. Please use - * {@link Connection} to instantiate a {@link Table} instead. - */ - @Deprecated - public HTable(Configuration conf, final TableName tableName) - throws IOException { - this.tableName = tableName; - this.cleanupPoolOnClose = true; - this.cleanupConnectionOnClose = true; - if (conf == null) { - this.connection = null; - return; - } - this.connection = (ClusterConnection) ConnectionFactory.createConnection(conf); - this.configuration = conf; - - this.pool = getDefaultExecutor(conf); - this.finishSetup(); - } - - /** - * Creates an object to access a HBase table. - * @param tableName Name of the table. - * @param connection HConnection to be used. - * @throws IOException if a remote or network exception occurs - * @deprecated Do not use. - */ - @Deprecated - public HTable(TableName tableName, Connection connection) throws IOException { - this.tableName = tableName; - this.cleanupPoolOnClose = true; - this.cleanupConnectionOnClose = false; - this.connection = (ClusterConnection)connection; - this.configuration = connection.getConfiguration(); - - this.pool = getDefaultExecutor(this.configuration); - this.finishSetup(); - } - // Marked Private @since 1.0 @InterfaceAudience.Private public static ThreadPoolExecutor getDefaultExecutor(Configuration conf) { @@ -218,68 +147,6 @@ public class HTable implements HTableInterface { return pool; } - /** - * Creates an object to access a HBase table. - * @param conf Configuration object to use. - * @param tableName Name of the table. - * @param pool ExecutorService to be used. - * @throws IOException if a remote or network exception occurs - * @deprecated Constructing HTable objects manually has been deprecated. Please use - * {@link Connection} to instantiate a {@link Table} instead. - */ - @Deprecated - public HTable(Configuration conf, final byte[] tableName, final ExecutorService pool) - throws IOException { - this(conf, TableName.valueOf(tableName), pool); - } - - /** - * Creates an object to access a HBase table. - * @param conf Configuration object to use. - * @param tableName Name of the table. - * @param pool ExecutorService to be used. - * @throws IOException if a remote or network exception occurs - * @deprecated Constructing HTable objects manually has been deprecated. Please use - * {@link Connection} to instantiate a {@link Table} instead. - */ - @Deprecated - public HTable(Configuration conf, final TableName tableName, final ExecutorService pool) - throws IOException { - this.connection = (ClusterConnection) ConnectionFactory.createConnection(conf); - this.configuration = conf; - this.pool = pool; - if (pool == null) { - this.pool = getDefaultExecutor(conf); - this.cleanupPoolOnClose = true; - } else { - this.cleanupPoolOnClose = false; - } - this.tableName = tableName; - this.cleanupConnectionOnClose = true; - this.finishSetup(); - } - - /** - * Creates an object to access a HBase table. - * @param tableName Name of the table. - * @param connection HConnection to be used. - * @param pool ExecutorService to be used. - * @throws IOException if a remote or network exception occurs. - * @deprecated Do not use, internal ctor. - */ - @Deprecated - public HTable(final byte[] tableName, final Connection connection, - final ExecutorService pool) throws IOException { - this(TableName.valueOf(tableName), connection, pool); - } - - /** @deprecated Do not use, internal ctor. */ - @Deprecated - public HTable(TableName tableName, final Connection connection, - final ExecutorService pool) throws IOException { - this(tableName, (ClusterConnection)connection, null, null, null, pool); - } - /** * Creates an object to access a HBase table. * Used by HBase internally. DO NOT USE. See {@link ConnectionFactory} class comment for how to @@ -290,7 +157,7 @@ public class HTable implements HTableInterface { * @throws IOException if a remote or network exception occurs */ @InterfaceAudience.Private - public HTable(TableName tableName, final ClusterConnection connection, + protected HTable(TableName tableName, final ClusterConnection connection, final TableConfiguration tableConfig, final RpcRetryingCallerFactory rpcCallerFactory, final RpcControllerFactory rpcControllerFactory, @@ -452,12 +319,9 @@ public class HTable implements HTableInterface { @Deprecated public static boolean isTableEnabled(Configuration conf, final TableName tableName) throws IOException { - return HConnectionManager.execute(new HConnectable(conf) { - @Override - public Boolean connect(HConnection connection) throws IOException { - return connection.isTableEnabled(tableName); - } - }); + try(Connection conn = ConnectionFactory.createConnection(conf)) { + return conn.getAdmin().isTableEnabled(tableName); + } } /** @@ -517,17 +381,6 @@ public class HTable implements HTableInterface { return this.connection; } - /** - * Gets the number of rows that a scanner will fetch at once. - *

- * The default value comes from {@code hbase.client.scanner.caching}. - * @deprecated Use {@link Scan#setCaching(int)} and {@link Scan#getCaching()} - */ - @Deprecated - public int getScannerCaching() { - return scannerCaching; - } - /** * Kept in 0.96 for backward compatibility * @deprecated since 0.96. This is an internal buffer that should not be read nor write. @@ -537,22 +390,6 @@ public class HTable implements HTableInterface { return mutator == null ? null : mutator.getWriteBuffer(); } - /** - * Sets the number of rows that a scanner will fetch at once. - *

- * This will override the value specified by - * {@code hbase.client.scanner.caching}. - * Increasing this value will reduce the amount of work needed each time - * {@code next()} is called on a scanner, at the expense of memory use - * (since more rows will need to be maintained in memory by the scanners). - * @param scannerCaching the number of rows a scanner will fetch at once. - * @deprecated Use {@link Scan#setCaching(int)} - */ - @Deprecated - public void setScannerCaching(int scannerCaching) { - this.scannerCaching = scannerCaching; - } - /** * {@inheritDoc} */ @@ -625,11 +462,12 @@ public class HTable implements HTableInterface { * @throws IOException if a remote or network exception occurs * @deprecated This is no longer a public API. Use {@link #getAllRegionLocations()} instead. */ + @SuppressWarnings("deprecation") @Deprecated public NavigableMap getRegionLocations() throws IOException { // TODO: Odd that this returns a Map of HRI to SN whereas getRegionLocator, singular, // returns an HRegionLocation. - return MetaScanner.allTableRegions(this.connection, getName()); + return MetaTableAccessor.allTableRegions(this.connection, getName()); } /** @@ -775,7 +613,7 @@ public class HTable implements HTableInterface { throw new IllegalArgumentException("Small scan should not be used with batching"); } if (scan.getCaching() <= 0) { - scan.setCaching(getScannerCaching()); + scan.setCaching(scannerCaching); } if (scan.isReversed()) { @@ -1038,7 +876,15 @@ public class HTable implements HTableInterface { regionMutationBuilder.setAtomic(true); MultiRequest request = MultiRequest.newBuilder().addRegionAction(regionMutationBuilder.build()).build(); - getStub().multi(controller, request); + ClientProtos.MultiResponse response = getStub().multi(controller, request); + ClientProtos.RegionActionResult res = response.getRegionActionResultList().get(0); + if (res.hasException()) { + Throwable ex = ProtobufUtil.toException(res.getException()); + if(ex instanceof IOException) { + throw (IOException)ex; + } + throw new IOException("Failed to mutate row: "+Bytes.toStringBinary(rm.getRow()), ex); + } } catch (ServiceException se) { throw ProtobufUtil.getRemoteException(se); } @@ -1317,6 +1163,15 @@ public class HTable implements HTableInterface { getLocation().getRegionInfo().getRegionName(), row, family, qualifier, new BinaryComparator(value), compareType, rm); ClientProtos.MultiResponse response = getStub().multi(controller, request); + ClientProtos.RegionActionResult res = response.getRegionActionResultList().get(0); + if (res.hasException()) { + Throwable ex = ProtobufUtil.toException(res.getException()); + if(ex instanceof IOException) { + throw (IOException)ex; + } + throw new IOException("Failed to checkAndMutate row: "+ + Bytes.toStringBinary(rm.getRow()), ex); + } return Boolean.valueOf(response.getProcessed()); } catch (ServiceException se) { throw ProtobufUtil.getRemoteException(se); @@ -1438,6 +1293,7 @@ public class HTable implements HTableInterface { terminated = this.pool.awaitTermination(60, TimeUnit.SECONDS); } while (!terminated); } catch (InterruptedException e) { + this.pool.shutdownNow(); LOG.warn("waitForTermination interrupted"); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java index b6e6a522915..d37cf827b12 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java @@ -53,6 +53,8 @@ import org.apache.hadoop.hbase.util.ClassSize; public class Increment extends Mutation implements Comparable { private static final long HEAP_OVERHEAD = ClassSize.REFERENCE + ClassSize.TIMERANGE; + private static final String RETURN_RESULTS = "_rr_"; + private TimeRange tr = new TimeRange(); /** @@ -161,6 +163,24 @@ public class Increment extends Mutation implements Comparable { tr = new TimeRange(minStamp, maxStamp); return this; } + + /** + * @param returnResults True (default) if the increment operation should return the results. A + * client that is not interested in the result can save network bandwidth setting this + * to false. + */ + public Increment setReturnResults(boolean returnResults) { + setAttribute(RETURN_RESULTS, Bytes.toBytes(returnResults)); + return this; + } + + /** + * @return current value for returnResults + */ + public boolean isReturnResults() { + byte[] v = getAttribute(RETURN_RESULTS); + return v == null ? true : Bytes.toBoolean(v); + } /** * Method for retrieving the number of families to increment from diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java deleted file mode 100644 index 7d91dbbda99..00000000000 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java +++ /dev/null @@ -1,425 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.client; - -import java.io.Closeable; -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.NavigableMap; -import java.util.TreeMap; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HRegionLocation; -import org.apache.hadoop.hbase.MetaTableAccessor; -import org.apache.hadoop.hbase.RegionLocations; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.TableNotFoundException; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.ExceptionUtil; - -import com.google.common.annotations.VisibleForTesting; - -/** - * Scanner class that contains the hbase:meta table scanning logic. - * Provided visitors will be called for each row. - * - * Although public visibility, this is not a public-facing API and may evolve in - * minor releases. - * - *

Note that during concurrent region splits, the scanner might not see - * hbase:meta changes across rows (for parent and daughter entries) consistently. - * see HBASE-5986, and {@link DefaultMetaScannerVisitor} for details.

- */ -@InterfaceAudience.Private -//TODO: merge this to MetaTableAccessor, get rid of it. -public final class MetaScanner { - private static final Log LOG = LogFactory.getLog(MetaScanner.class); - - private MetaScanner() {} - - /** - * Scans the meta table and calls a visitor on each RowResult and uses a empty - * start row value as table name. - * - *

Visible for testing. Use {@link - * #metaScan(Connection, MetaScannerVisitor, TableName)} instead. - * - * @param visitor A custom visitor - * @throws IOException e - */ - @VisibleForTesting // Do not use. Used by tests only and hbck. - public static void metaScan(Connection connection, - MetaScannerVisitor visitor) throws IOException { - metaScan(connection, visitor, null, null, Integer.MAX_VALUE); - } - - /** - * Scans the meta table and calls a visitor on each RowResult. Uses a table - * name to locate meta regions. - * - * @param connection connection to use internally (null to use a new instance) - * @param visitor visitor object - * @param userTableName User table name in meta table to start scan at. Pass - * null if not interested in a particular table. - * @throws IOException e - */ - public static void metaScan(Connection connection, - MetaScannerVisitor visitor, TableName userTableName) throws IOException { - metaScan(connection, visitor, userTableName, null, Integer.MAX_VALUE, - TableName.META_TABLE_NAME); - } - - /** - * Scans the meta table and calls a visitor on each RowResult. Uses a table - * name and a row name to locate meta regions. And it only scans at most - * rowLimit of rows. - * - *

Visible for testing. Use {@link - * #metaScan(Connection, MetaScannerVisitor, TableName)} instead. - * - * @param connection to scan on - * @param visitor Visitor object. - * @param userTableName User table name in meta table to start scan at. Pass - * null if not interested in a particular table. - * @param row Name of the row at the user table. The scan will start from - * the region row where the row resides. - * @param rowLimit Max of processed rows. If it is less than 0, it - * will be set to default value Integer.MAX_VALUE. - * @throws IOException e - */ - @VisibleForTesting // Do not use. Used by Master but by a method that is used testing. - public static void metaScan(Connection connection, - MetaScannerVisitor visitor, TableName userTableName, byte[] row, - int rowLimit) - throws IOException { - metaScan(connection, visitor, userTableName, row, rowLimit, TableName - .META_TABLE_NAME); - } - - /** - * Scans the meta table and calls a visitor on each RowResult. Uses a table - * name and a row name to locate meta regions. And it only scans at most - * rowLimit of rows. - * - * @param connection connection to use internally (null to use a new instance) - * @param visitor Visitor object. Closes the visitor before returning. - * @param tableName User table name in meta table to start scan at. Pass - * null if not interested in a particular table. - * @param row Name of the row at the user table. The scan will start from - * the region row where the row resides. - * @param rowLimit Max of processed rows. If it is less than 0, it - * will be set to default value Integer.MAX_VALUE. - * @param metaTableName Meta table to scan, root or meta. - * @throws IOException e - */ - static void metaScan(Connection connection, - final MetaScannerVisitor visitor, final TableName tableName, - final byte[] row, final int rowLimit, final TableName metaTableName) - throws IOException { - - int rowUpperLimit = rowLimit > 0 ? rowLimit: Integer.MAX_VALUE; - // Calculate startrow for scan. - byte[] startRow; - // If the passed in 'connection' is 'managed' -- i.e. every second test uses - // an HTable or an HBaseAdmin with managed connections -- then doing - // connection.getTable will throw an exception saying you are NOT to use - // managed connections getting tables. Leaving this as it is for now. Will - // revisit when inclined to change all tests. User code probaby makes use of - // managed connections too so don't change it till post hbase 1.0. - try (Table metaTable = new HTable(TableName.META_TABLE_NAME, connection, null)) { - if (row != null) { - // Scan starting at a particular row in a particular table - Result startRowResult = getClosestRowOrBefore(metaTable, tableName, row, - connection.getConfiguration().getBoolean(HConstants.USE_META_REPLICAS, - HConstants.DEFAULT_USE_META_REPLICAS)); - if (startRowResult == null) { - throw new TableNotFoundException("Cannot find row in " + metaTable.getName() + - " for table: " + tableName + ", row=" + Bytes.toStringBinary(row)); - } - HRegionInfo regionInfo = getHRegionInfo(startRowResult); - if (regionInfo == null) { - throw new IOException("HRegionInfo was null or empty in Meta for " + - tableName + ", row=" + Bytes.toStringBinary(row)); - } - byte[] rowBefore = regionInfo.getStartKey(); - startRow = HRegionInfo.createRegionName(tableName, rowBefore, HConstants.ZEROES, false); - } else if (tableName == null || tableName.getName().length == 0) { - // Full hbase:meta scan - startRow = HConstants.EMPTY_START_ROW; - } else { - // Scan hbase:meta for an entire table - startRow = HRegionInfo.createRegionName(tableName, HConstants.EMPTY_START_ROW, - HConstants.ZEROES, false); - } - final Scan scan = new Scan(startRow).addFamily(HConstants.CATALOG_FAMILY); - int scannerCaching = connection.getConfiguration() - .getInt(HConstants.HBASE_META_SCANNER_CACHING, - HConstants.DEFAULT_HBASE_META_SCANNER_CACHING); - if (connection.getConfiguration().getBoolean(HConstants.USE_META_REPLICAS, - HConstants.DEFAULT_USE_META_REPLICAS)) { - scan.setConsistency(Consistency.TIMELINE); - } - if (rowUpperLimit <= scannerCaching) { - scan.setSmall(true); - } - int rows = Math.min(rowLimit, scannerCaching); - scan.setCaching(rows); - if (LOG.isTraceEnabled()) { - LOG.trace("Scanning " + metaTableName.getNameAsString() + " starting at row=" + - Bytes.toStringBinary(startRow) + " for max=" + rowUpperLimit + " with caching=" + rows); - } - // Run the scan - try (ResultScanner resultScanner = metaTable.getScanner(scan)) { - Result result; - int processedRows = 0; - while ((result = resultScanner.next()) != null) { - if (visitor != null) { - if (!visitor.processRow(result)) break; - } - processedRows++; - if (processedRows >= rowUpperLimit) break; - } - } - } finally { - if (visitor != null) { - try { - visitor.close(); - } catch (Throwable t) { - ExceptionUtil.rethrowIfInterrupt(t); - LOG.debug("Got exception in closing the meta scanner visitor", t); - } - } - } - } - - /** - * @return Get closest metatable region row to passed row - * @throws IOException - */ - private static Result getClosestRowOrBefore(final Table metaTable, final TableName userTableName, - final byte [] row, boolean useMetaReplicas) - throws IOException { - byte[] searchRow = HRegionInfo.createRegionName(userTableName, row, HConstants.NINES, false); - Scan scan = Scan.createGetClosestRowOrBeforeReverseScan(searchRow); - if (useMetaReplicas) { - scan.setConsistency(Consistency.TIMELINE); - } - try (ResultScanner resultScanner = metaTable.getScanner(scan)) { - return resultScanner.next(); - } - } - - /** - * Returns HRegionInfo object from the column - * HConstants.CATALOG_FAMILY:HConstants.REGIONINFO_QUALIFIER of the catalog - * table Result. - * @param data a Result object from the catalog table scan - * @return HRegionInfo or null - * @deprecated Use {@link org.apache.hadoop.hbase.MetaTableAccessor#getRegionLocations(Result)} - */ - @Deprecated - public static HRegionInfo getHRegionInfo(Result data) { - return HRegionInfo.getHRegionInfo(data); - } - - /** - * Lists all of the regions currently in META. - * @param conf configuration - * @param connection to connect with - * @param offlined True if we are to include offlined regions, false and we'll - * leave out offlined regions from returned list. - * @return List of all user-space regions. - * @throws IOException - */ - @VisibleForTesting // And for hbck. - public static List listAllRegions(Configuration conf, Connection connection, - final boolean offlined) - throws IOException { - final List regions = new ArrayList(); - MetaScannerVisitor visitor = new MetaScannerVisitorBase() { - @Override - public boolean processRow(Result result) throws IOException { - if (result == null || result.isEmpty()) { - return true; - } - - RegionLocations locations = MetaTableAccessor.getRegionLocations(result); - if (locations == null) return true; - for (HRegionLocation loc : locations.getRegionLocations()) { - if (loc != null) { - HRegionInfo regionInfo = loc.getRegionInfo(); - // If region offline AND we are not to include offlined regions, return. - if (regionInfo.isOffline() && !offlined) continue; - regions.add(regionInfo); - } - } - return true; - } - }; - metaScan(connection, visitor); - return regions; - } - - /** - * Lists all of the table regions currently in META. - * @param conf - * @param offlined True if we are to include offlined regions, false and we'll - * leave out offlined regions from returned list. - * @return Map of all user-space regions to servers - * @throws IOException - * @deprecated Use {@link #allTableRegions(Connection, TableName)} instead - */ - @Deprecated - public static NavigableMap allTableRegions(Configuration conf, - Connection connection, final TableName tableName, boolean offlined) throws IOException { - return allTableRegions(connection, tableName); - } - - /** - * Lists all of the table regions currently in META. - * @param connection - * @param tableName - * @return Map of all user-space regions to servers - * @throws IOException - */ - public static NavigableMap allTableRegions( - Connection connection, final TableName tableName) throws IOException { - final NavigableMap regions = - new TreeMap(); - MetaScannerVisitor visitor = new TableMetaScannerVisitor(tableName) { - @Override - public boolean processRowInternal(Result result) throws IOException { - RegionLocations locations = MetaTableAccessor.getRegionLocations(result); - if (locations == null) return true; - for (HRegionLocation loc : locations.getRegionLocations()) { - if (loc != null) { - HRegionInfo regionInfo = loc.getRegionInfo(); - regions.put(new UnmodifyableHRegionInfo(regionInfo), loc.getServerName()); - } - } - return true; - } - }; - metaScan(connection, visitor, tableName); - return regions; - } - - /** - * Lists table regions and locations grouped by region range from META. - */ - public static List listTableRegionLocations(Configuration conf, - Connection connection, final TableName tableName) throws IOException { - final List regions = new ArrayList(); - MetaScannerVisitor visitor = new TableMetaScannerVisitor(tableName) { - @Override - public boolean processRowInternal(Result result) throws IOException { - RegionLocations locations = MetaTableAccessor.getRegionLocations(result); - if (locations == null) return true; - regions.add(locations); - return true; - } - }; - metaScan(connection, visitor, tableName); - return regions; - } - - /** - * Visitor class called to process each row of the hbase:meta table - */ - public interface MetaScannerVisitor extends Closeable { - /** - * Visitor method that accepts a RowResult and the meta region location. - * Implementations can return false to stop the region's loop if it becomes - * unnecessary for some reason. - * - * @param rowResult result - * @return A boolean to know if it should continue to loop in the region - * @throws IOException e - */ - boolean processRow(Result rowResult) throws IOException; - } - - public static abstract class MetaScannerVisitorBase implements MetaScannerVisitor { - @Override - public void close() throws IOException { - } - } - - /** - * A MetaScannerVisitor that skips offline regions and split parents - */ - public static abstract class DefaultMetaScannerVisitor - extends MetaScannerVisitorBase { - - public DefaultMetaScannerVisitor() { - super(); - } - - public abstract boolean processRowInternal(Result rowResult) throws IOException; - - @Override - public boolean processRow(Result rowResult) throws IOException { - HRegionInfo info = getHRegionInfo(rowResult); - if (info == null) { - return true; - } - - //skip over offline and split regions - if (!(info.isOffline() || info.isSplit())) { - return processRowInternal(rowResult); - } - return true; - } - } - - /** - * A MetaScannerVisitor for a table. Provides a consistent view of the table's - * hbase:meta entries during concurrent splits (see HBASE-5986 for details). This class - * does not guarantee ordered traversal of meta entries, and can block until the - * hbase:meta entries for daughters are available during splits. - */ - public static abstract class TableMetaScannerVisitor extends DefaultMetaScannerVisitor { - private TableName tableName; - - public TableMetaScannerVisitor(TableName tableName) { - super(); - this.tableName = tableName; - } - - @Override - public final boolean processRow(Result rowResult) throws IOException { - HRegionInfo info = getHRegionInfo(rowResult); - if (info == null) { - return true; - } - if (!(info.getTable().equals(tableName))) { - return false; - } - return super.processRow(rowResult); - } - } -} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiAction.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiAction.java index b44803be3da..6d155ca243e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiAction.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiAction.java @@ -31,7 +31,7 @@ import org.apache.hadoop.hbase.util.Bytes; /** * Container for Actions (i.e. Get, Delete, or Put), which are grouped by - * regionName. Intended to be used with HConnectionManager.processBatch() + * regionName. Intended to be used with {@link AsyncProcess}. */ @InterfaceAudience.Private public final class MultiAction { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java index b9d652d8a80..364783f9ed3 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java @@ -137,9 +137,22 @@ public class Put extends Mutation implements HeapSize, Comparable { * @param qualifier column qualifier * @param value column value * @return this + * @deprecated Since 1.0.0. Use {@link #addColumn(byte[], byte[], byte[])} */ + @Deprecated public Put add(byte [] family, byte [] qualifier, byte [] value) { - return add(family, qualifier, this.ts, value); + return addColumn(family, qualifier, value); + } + + /** + * Add the specified column and value to this Put operation. + * @param family family name + * @param qualifier column qualifier + * @param value column value + * @return this + */ + public Put addColumn(byte [] family, byte [] qualifier, byte [] value) { + return addColumn(family, qualifier, this.ts, value); } /** @@ -167,8 +180,23 @@ public class Put extends Mutation implements HeapSize, Comparable { * @param ts version timestamp * @param value column value * @return this + * @deprecated Since 1.0.0. Use {@link #addColumn(byte[], byte[], long, byte[])} */ + @Deprecated public Put add(byte [] family, byte [] qualifier, long ts, byte [] value) { + return addColumn(family, qualifier, ts, value); + } + + /** + * Add the specified column and value, with the specified timestamp as + * its version to this Put operation. + * @param family family name + * @param qualifier column qualifier + * @param ts version timestamp + * @param value column value + * @return this + */ + public Put addColumn(byte [] family, byte [] qualifier, long ts, byte [] value) { if (ts < 0) { throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + ts); } @@ -199,7 +227,6 @@ public class Put extends Mutation implements HeapSize, Comparable { * This expects that the underlying arrays won't change. It's intended * for usage internal HBase to and for advanced client applications. */ - @SuppressWarnings("unchecked") public Put addImmutable(byte[] family, byte[] qualifier, long ts, byte[] value, Tag[] tag) { List list = getCellList(family); KeyValue kv = createPutKeyValue(family, qualifier, ts, value, tag); @@ -233,8 +260,23 @@ public class Put extends Mutation implements HeapSize, Comparable { * @param ts version timestamp * @param value column value * @return this + * @deprecated Since 1.0.0. Use {@link Put#addColumn(byte[], ByteBuffer, long, ByteBuffer)} */ + @Deprecated public Put add(byte[] family, ByteBuffer qualifier, long ts, ByteBuffer value) { + return addColumn(family, qualifier, ts, value); + } + + /** + * Add the specified column and value, with the specified timestamp as + * its version to this Put operation. + * @param family family name + * @param qualifier column qualifier + * @param ts version timestamp + * @param value column value + * @return this + */ + public Put addColumn(byte[] family, ByteBuffer qualifier, long ts, ByteBuffer value) { if (ts < 0) { throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + ts); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java index 66dcdce1d72..189dbaa2c96 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java @@ -23,13 +23,17 @@ import java.io.InterruptedIOException; import java.net.ConnectException; import java.net.SocketTimeoutException; +import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.NotServingRegionException; +import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.RegionMovedException; +import org.apache.hadoop.hbase.ipc.RpcControllerFactory; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService; +import org.apache.hadoop.hbase.util.Bytes; /** * Similar to {@link RegionServerCallable} but for the AdminService interface. This service callable @@ -42,25 +46,39 @@ public abstract class RegionAdminServiceCallable implements RetryingCallable< protected final ClusterConnection connection; + protected final RpcControllerFactory rpcControllerFactory; + protected AdminService.BlockingInterface stub; protected HRegionLocation location; protected final TableName tableName; protected final byte[] row; + protected final int replicaId; protected final static int MIN_WAIT_DEAD_SERVER = 10000; - public RegionAdminServiceCallable(ClusterConnection connection, TableName tableName, byte[] row) { - this(connection, null, tableName, row); + public RegionAdminServiceCallable(ClusterConnection connection, + RpcControllerFactory rpcControllerFactory, TableName tableName, byte[] row) { + this(connection, rpcControllerFactory, null, tableName, row); } - public RegionAdminServiceCallable(ClusterConnection connection, HRegionLocation location, + public RegionAdminServiceCallable(ClusterConnection connection, + RpcControllerFactory rpcControllerFactory, HRegionLocation location, TableName tableName, byte[] row) { + this(connection, rpcControllerFactory, location, + tableName, row, RegionReplicaUtil.DEFAULT_REPLICA_ID); + } + + public RegionAdminServiceCallable(ClusterConnection connection, + RpcControllerFactory rpcControllerFactory, HRegionLocation location, + TableName tableName, byte[] row, int replicaId) { this.connection = connection; + this.rpcControllerFactory = rpcControllerFactory; this.location = location; this.tableName = tableName; this.row = row; + this.replicaId = replicaId; } @Override @@ -85,7 +103,18 @@ public abstract class RegionAdminServiceCallable implements RetryingCallable< this.stub = stub; } - public abstract HRegionLocation getLocation(boolean useCache) throws IOException; + public HRegionLocation getLocation(boolean useCache) throws IOException { + RegionLocations rl = getRegionLocations(connection, tableName, row, useCache, replicaId); + if (rl == null) { + throw new HBaseIOException(getExceptionMessage()); + } + HRegionLocation location = rl.getRegionLocation(replicaId); + if (location == null) { + throw new HBaseIOException(getExceptionMessage()); + } + + return location; + } @Override public void throwable(Throwable t, boolean retrying) { @@ -115,7 +144,8 @@ public abstract class RegionAdminServiceCallable implements RetryingCallable< //subclasses can override this. protected String getExceptionMessage() { - return "There is no location"; + return "There is no location" + " table=" + tableName + + " ,replica=" + replicaId + ", row=" + Bytes.toStringBinary(row); } @Override @@ -132,4 +162,27 @@ public abstract class RegionAdminServiceCallable implements RetryingCallable< } return sleep; } + + public static RegionLocations getRegionLocations( + ClusterConnection connection, TableName tableName, byte[] row, + boolean useCache, int replicaId) + throws RetriesExhaustedException, DoNotRetryIOException, InterruptedIOException { + RegionLocations rl; + try { + rl = connection.locateRegion(tableName, row, useCache, true, replicaId); + } catch (DoNotRetryIOException e) { + throw e; + } catch (RetriesExhaustedException e) { + throw e; + } catch (InterruptedIOException e) { + throw e; + } catch (IOException e) { + throw new RetriesExhaustedException("Can't get the location", e); + } + if (rl == null) { + throw new RetriesExhaustedException("Can't get the locations"); + } + + return rl; + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionReplicaUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionReplicaUtil.java index 801e1b972b2..c2dcbc04d13 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionReplicaUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionReplicaUtil.java @@ -23,6 +23,7 @@ import java.util.Iterator; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.util.Bytes; /** * Utility methods which contain the logic for regions and replicas. @@ -30,6 +31,19 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; @InterfaceAudience.Private public class RegionReplicaUtil { + /** + * Whether or not the secondary region will wait for observing a flush / region open event + * from the primary region via async wal replication before enabling read requests. Since replayed + * edits from async wal replication from primary is not persisted in WAL, the memstore of the + * secondary region might be non-empty at the time of close or crash. For ensuring seqId's not + * "going back in time" in the secondary region replica, this should be enabled. However, in some + * cases the above semantics might be ok for some application classes. + * See HBASE-11580 for more context. + */ + public static final String REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY + = "hbase.region.replica.wait.for.primary.flush"; + protected static final boolean DEFAULT_REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH = true; + /** * The default replicaId for the region */ @@ -92,4 +106,46 @@ public class RegionReplicaUtil { } } } + + public static boolean isReplicasForSameRegion(HRegionInfo regionInfoA, HRegionInfo regionInfoB) { + return compareRegionInfosWithoutReplicaId(regionInfoA, regionInfoB) == 0; + } + + private static int compareRegionInfosWithoutReplicaId(HRegionInfo regionInfoA, + HRegionInfo regionInfoB) { + int result = regionInfoA.getTable().compareTo(regionInfoB.getTable()); + if (result != 0) { + return result; + } + + // Compare start keys. + result = Bytes.compareTo(regionInfoA.getStartKey(), regionInfoB.getStartKey()); + if (result != 0) { + return result; + } + + // Compare end keys. + result = Bytes.compareTo(regionInfoA.getEndKey(), regionInfoB.getEndKey()); + + if (result != 0) { + if (regionInfoA.getStartKey().length != 0 + && regionInfoA.getEndKey().length == 0) { + return 1; // this is last region + } + if (regionInfoB.getStartKey().length != 0 + && regionInfoB.getEndKey().length == 0) { + return -1; // o is the last region + } + return result; + } + + // regionId is usually milli timestamp -- this defines older stamps + // to be "smaller" than newer stamps in sort order. + if (regionInfoA.getRegionId() > regionInfoB.getRegionId()) { + return 1; + } else if (regionInfoA.getRegionId() < regionInfoB.getRegionId()) { + return -1; + } + return 0; + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java index faef0d3aa97..5a9aff3eda2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.client; +import java.io.IOException; import java.nio.BufferOverflowException; import java.nio.ByteBuffer; import java.util.ArrayList; @@ -33,6 +34,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellScannable; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.classification.InterfaceAudience; @@ -80,6 +82,17 @@ public class Result implements CellScannable, CellScanner { private Cell[] cells; private Boolean exists; // if the query was just to check existence. private boolean stale = false; + + /** + * Partial results do not contain the full row's worth of cells. The result had to be returned in + * parts because the size of the cells in the row exceeded the RPC result size on the server. + * Partial results must be combined client side with results representing the remainder of the + * row's cells to form the complete result. Partial results and RPC result size allow us to avoid + * OOME on the server when servicing requests for large rows. The Scan configuration used to + * control the result size on the server is {@link Scan#setMaxResultSize(long)} and the default + * value can be seen here: {@link HConstants#DEFAULT_HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE} + */ + private boolean partial = false; // We're not using java serialization. Transient here is just a marker to say // that this is where we cache row if we're ever asked for it. private transient byte [] row = null; @@ -89,7 +102,7 @@ public class Result implements CellScannable, CellScanner { private static ThreadLocal localBuffer = new ThreadLocal(); private static final int PAD_WIDTH = 128; - public static final Result EMPTY_RESULT = new Result(); + public static final Result EMPTY_RESULT = new Result(true); private final static int INITIAL_CELLSCANNER_INDEX = -1; @@ -99,6 +112,8 @@ public class Result implements CellScannable, CellScanner { private int cellScannerIndex = INITIAL_CELLSCANNER_INDEX; private ClientProtos.RegionLoadStats stats; + private final boolean readonly; + /** * Creates an empty Result w/ no KeyValue payload; returns null if you call {@link #rawCells()}. * Use this to represent no results if {@code null} won't do or in old 'mapred' as opposed @@ -106,7 +121,16 @@ public class Result implements CellScannable, CellScanner { * {@link #copyFrom(Result)} call. */ public Result() { - super(); + this(false); + } + + /** + * Allows to construct special purpose immutable Result objects, + * such as EMPTY_RESULT. + * @param readonly whether this Result instance is readonly + */ + private Result(boolean readonly) { + this.readonly = readonly; } /** @@ -115,7 +139,7 @@ public class Result implements CellScannable, CellScanner { * @param cells List of cells */ public static Result create(List cells) { - return new Result(cells.toArray(new Cell[cells.size()]), null, false); + return create(cells, null); } public static Result create(List cells, Boolean exists) { @@ -123,10 +147,14 @@ public class Result implements CellScannable, CellScanner { } public static Result create(List cells, Boolean exists, boolean stale) { + return create(cells, exists, stale, false); + } + + public static Result create(List cells, Boolean exists, boolean stale, boolean partial) { if (exists != null){ - return new Result(null, exists, stale); + return new Result(null, exists, stale, partial); } - return new Result(cells.toArray(new Cell[cells.size()]), null, stale); + return new Result(cells.toArray(new Cell[cells.size()]), null, stale, partial); } /** @@ -135,21 +163,27 @@ public class Result implements CellScannable, CellScanner { * @param cells array of cells */ public static Result create(Cell[] cells) { - return new Result(cells, null, false); + return create(cells, null, false); } public static Result create(Cell[] cells, Boolean exists, boolean stale) { + return create(cells, exists, stale, false); + } + + public static Result create(Cell[] cells, Boolean exists, boolean stale, boolean partial) { if (exists != null){ - return new Result(null, exists, stale); + return new Result(null, exists, stale, partial); } - return new Result(cells, null, stale); + return new Result(cells, null, stale, partial); } /** Private ctor. Use {@link #create(Cell[])}. */ - private Result(Cell[] cells, Boolean exists, boolean stale) { + private Result(Cell[] cells, Boolean exists, boolean stale, boolean partial) { this.cells = cells; this.exists = exists; this.stale = stale; + this.partial = partial; + this.readonly = false; } /** @@ -361,6 +395,9 @@ public class Result implements CellScannable, CellScanner { /** * Get the latest version of the specified column. + * Note: this call clones the value content of the hosting Cell. See + * {@link #getValueAsByteBuffer(byte[], byte[])}, etc., or {@link #listCells()} if you would + * avoid the cloning. * @param family family name * @param qualifier column qualifier * @return value of latest version of column, null if none found @@ -388,7 +425,8 @@ public class Result implements CellScannable, CellScanner { if (kv == null) { return null; } - return ByteBuffer.wrap(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()); + return ByteBuffer.wrap(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()). + asReadOnlyBuffer(); } /** @@ -411,7 +449,8 @@ public class Result implements CellScannable, CellScanner { if (kv == null) { return null; } - return ByteBuffer.wrap(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()); + return ByteBuffer.wrap(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()). + asReadOnlyBuffer(); } /** @@ -741,7 +780,59 @@ public class Result implements CellScannable, CellScanner { } /** - * Get total size of raw cells + * Forms a single result from the partial results in the partialResults list. This method is + * useful for reconstructing partial results on the client side. + * @param partialResults list of partial results + * @return The complete result that is formed by combining all of the partial results together + * @throws IOException A complete result cannot be formed because the results in the partial list + * come from different rows + */ + public static Result createCompleteResult(List partialResults) + throws IOException { + List cells = new ArrayList(); + boolean stale = false; + byte[] prevRow = null; + byte[] currentRow = null; + + if (partialResults != null && !partialResults.isEmpty()) { + for (int i = 0; i < partialResults.size(); i++) { + Result r = partialResults.get(i); + currentRow = r.getRow(); + if (prevRow != null && !Bytes.equals(prevRow, currentRow)) { + throw new IOException( + "Cannot form complete result. Rows of partial results do not match." + + " Partial Results: " + partialResults); + } + + // Ensure that all Results except the last one are marked as partials. The last result + // may not be marked as a partial because Results are only marked as partials when + // the scan on the server side must be stopped due to reaching the maxResultSize. + // Visualizing it makes it easier to understand: + // maxResultSize: 2 cells + // (-x-) represents cell number x in a row + // Example: row1: -1- -2- -3- -4- -5- (5 cells total) + // How row1 will be returned by the server as partial Results: + // Result1: -1- -2- (2 cells, size limit reached, mark as partial) + // Result2: -3- -4- (2 cells, size limit reached, mark as partial) + // Result3: -5- (1 cell, size limit NOT reached, NOT marked as partial) + if (i != (partialResults.size() - 1) && !r.isPartial()) { + throw new IOException( + "Cannot form complete result. Result is missing partial flag. " + + "Partial Results: " + partialResults); + } + prevRow = currentRow; + stale = stale || r.isStale(); + for (Cell c : r.rawCells()) { + cells.add(c); + } + } + } + + return Result.create(cells, null, stale); + } + + /** + * Get total size of raw cells * @param result * @return Total size. */ @@ -755,9 +846,12 @@ public class Result implements CellScannable, CellScanner { /** * Copy another Result into this one. Needed for the old Mapred framework + * @throws UnsupportedOperationException if invoked on instance of EMPTY_RESULT + * (which is supposed to be immutable). * @param other */ public void copyFrom(Result other) { + checkReadonly(); this.row = null; this.familyMap = null; this.cells = other.cells; @@ -787,6 +881,7 @@ public class Result implements CellScannable, CellScanner { } public void setExists(Boolean exists) { + checkReadonly(); this.exists = exists; } @@ -799,11 +894,34 @@ public class Result implements CellScannable, CellScanner { return stale; } + /** + * Whether or not the result is a partial result. Partial results contain a subset of the cells + * for a row and should be combined with a result representing the remaining cells in that row to + * form a complete (non-partial) result. + * @return Whether or not the result is a partial result + */ + public boolean isPartial() { + return partial; + } + /** * Add load information about the region to the information about the result * @param loadStats statistics about the current region from which this was returned + * @deprecated use {@link #setStatistics(ClientProtos.RegionLoadStats)} instead + * @throws UnsupportedOperationException if invoked on instance of EMPTY_RESULT + * (which is supposed to be immutable). */ + @Deprecated public void addResults(ClientProtos.RegionLoadStats loadStats) { + checkReadonly(); + this.stats = loadStats; + } + + /** + * Set load information about the region to the information about the result + * @param loadStats statistics about the current region from which this was returned + */ + public void setStatistics(ClientProtos.RegionLoadStats loadStats) { this.stats = loadStats; } @@ -814,4 +932,14 @@ public class Result implements CellScannable, CellScanner { public ClientProtos.RegionLoadStats getStats() { return stats; } + + /** + * All methods modifying state of Result object must call this method + * to ensure that special purpose immutable Results can't be accidentally modified. + */ + private void checkReadonly() { + if (readonly == true) { + throw new UnsupportedOperationException("Attempting to modify readonly EMPTY_RESULT!"); + } + } } \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetryingCallerInterceptorFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetryingCallerInterceptorFactory.java index 3fa4f64a90c..15475f8462d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetryingCallerInterceptorFactory.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetryingCallerInterceptorFactory.java @@ -27,7 +27,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.classification.InterfaceAudience; /** - * Factory implementation to provide the {@link HConnectionImplementation} with + * Factory implementation to provide the {@link ConnectionImplementation} with * the implementation of the {@link RetryingCallerInterceptor} that we would use * to intercept the {@link RpcRetryingCaller} during the course of their calls. * diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedClientScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedClientScanner.java index 0f244e03e60..ef4b89d6b46 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedClientScanner.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedClientScanner.java @@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.client; import java.io.IOException; -import java.util.Arrays; import java.util.concurrent.ExecutorService; import org.apache.commons.logging.Log; @@ -38,9 +37,6 @@ import org.apache.hadoop.hbase.util.ExceptionUtil; @InterfaceAudience.Private public class ReversedClientScanner extends ClientScanner { private static final Log LOG = LogFactory.getLog(ReversedClientScanner.class); - // A byte array in which all elements are the max byte, and it is used to - // construct closest front row - static byte[] MAX_BYTE_ARRAY = Bytes.createMaxByteArray(9); /** * Create a new ReversibleClientScanner for the specified table Note that the @@ -139,9 +135,10 @@ public class ReversedClientScanner extends ClientScanner { new ReversedScannerCallable(getConnection(), getTable(), scan, this.scanMetrics, locateStartRow, this.rpcControllerFactory); s.setCaching(nbRows); - ScannerCallableWithReplicas sr = new ScannerCallableWithReplicas(getTable(), getConnection(), - s, pool, primaryOperationTimeout, scan, - getRetries(), getScannerTimeout(), caching, getConf(), caller); + ScannerCallableWithReplicas sr = + new ScannerCallableWithReplicas(getTable(), getConnection(), s, pool, + primaryOperationTimeout, scan, getRetries(), getScannerTimeout(), caching, getConf(), + caller); return sr; } @@ -161,26 +158,4 @@ public class ReversedClientScanner extends ClientScanner { } return false; // unlikely. } - - /** - * Create the closest row before the specified row - * @param row - * @return a new byte array which is the closest front row of the specified one - */ - protected static byte[] createClosestRowBefore(byte[] row) { - if (row == null) { - throw new IllegalArgumentException("The passed row is empty"); - } - if (Bytes.equals(row, HConstants.EMPTY_BYTE_ARRAY)) { - return MAX_BYTE_ARRAY; - } - if (row[row.length - 1] == 0) { - return Arrays.copyOf(row, row.length - 1); - } else { - byte[] closestFrontRow = Arrays.copyOf(row, row.length); - closestFrontRow[row.length - 1] = (byte) ((closestFrontRow[row.length - 1] & 0xff) - 1); - closestFrontRow = Bytes.add(closestFrontRow, MAX_BYTE_ARRAY); - return closestFrontRow; - } - } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedScannerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedScannerCallable.java index 4a57adf5f05..0c2d3459d57 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedScannerCallable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedScannerCallable.java @@ -129,8 +129,6 @@ public class ReversedScannerCallable extends ScannerCallable { } // check how often we retry. - // HConnectionManager will call instantiateServer with reload==true - // if and only if for retries. if (reload && this.scanMetrics != null) { this.scanMetrics.countOfRPCRetries.incrementAndGet(); if (isRegionServerRemote) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerImpl.java index 1d037bc7b9b..03138ec8c4d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerImpl.java @@ -54,10 +54,7 @@ public class RpcRetryingCallerImpl implements RpcRetryingCaller { * When we started making calls. */ private long globalStartTime; - /** - * Start and end times for a single call. - */ - private final static int MIN_RPC_TIMEOUT = 2000; + /** How many retries are allowed before we start to log */ private final int startLogErrorsCnt; @@ -87,11 +84,11 @@ public class RpcRetryingCallerImpl implements RpcRetryingCaller { if (callTimeout == Integer.MAX_VALUE) return Integer.MAX_VALUE; int remainingTime = (int) (callTimeout - (EnvironmentEdgeManager.currentTime() - this.globalStartTime)); - if (remainingTime < MIN_RPC_TIMEOUT) { + if (remainingTime < 1) { // If there is no time left, we're trying anyway. It's too late. // 0 means no timeout, and it's not the intent here. So we secure both cases by // resetting to the minimum. - remainingTime = MIN_RPC_TIMEOUT; + remainingTime = 1; } return remainingTime; } @@ -222,7 +219,7 @@ public class RpcRetryingCallerImpl implements RpcRetryingCaller { } // Don't let ServiceException out; its rpc specific. t = cause; - // t could be a RemoteException so go aaround again. + // t could be a RemoteException so go around again. translateException(t); } else if (t instanceof DoNotRetryIOException) { throw (DoNotRetryIOException)t; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java index d2dd770c03a..3b6194f99b6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java @@ -34,9 +34,11 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.client.metrics.ScanMetrics; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.filter.IncompatibleFilterException; import org.apache.hadoop.hbase.io.TimeRange; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.security.access.Permission; import org.apache.hadoop.hbase.security.visibility.Authorizations; import org.apache.hadoop.hbase.util.Bytes; @@ -91,35 +93,40 @@ public class Scan extends Query { private static final String RAW_ATTR = "_raw_"; - /** - * EXPERT ONLY. - * An integer (not long) indicating to the scanner logic how many times we attempt to retrieve the - * next KV before we schedule a reseek. - * The right value depends on the size of the average KV. A reseek is more efficient when - * it can skip 5-10 KVs or 512B-1KB, or when the next KV is likely found in another HFile block. - * Setting this only has any effect when columns were added with - * {@link #addColumn(byte[], byte[])} - *

{@code
-   * Scan s = new Scan(...);
-   * s.addColumn(...);
-   * s.setAttribute(Scan.HINT_LOOKAHEAD, Bytes.toBytes(2));
-   * }
- * Default is 0 (always reseek). - */ - public static final String HINT_LOOKAHEAD = "_look_ahead_"; - private byte [] startRow = HConstants.EMPTY_START_ROW; private byte [] stopRow = HConstants.EMPTY_END_ROW; private int maxVersions = 1; private int batch = -1; + /** + * Partial {@link Result}s are {@link Result}s must be combined to form a complete {@link Result}. + * The {@link Result}s had to be returned in fragments (i.e. as partials) because the size of the + * cells in the row exceeded max result size on the server. Typically partial results will be + * combined client side into complete results before being delivered to the caller. However, if + * this flag is set, the caller is indicating that they do not mind seeing partial results (i.e. + * they understand that the results returned from the Scanner may only represent part of a + * particular row). In such a case, any attempt to combine the partials into a complete result on + * the client side will be skipped, and the caller will be able to see the exact results returned + * from the server. + */ + private boolean allowPartialResults = false; + private int storeLimit = -1; private int storeOffset = 0; private boolean getScan; - // If application wants to collect scan metrics, it needs to - // call scan.setAttribute(SCAN_ATTRIBUTES_ENABLE, Bytes.toBytes(Boolean.TRUE)) + /** + * @deprecated since 1.0.0. Use {@link #setScanMetricsEnabled(boolean)} + */ + // Make private or remove. + @Deprecated static public final String SCAN_ATTRIBUTES_METRICS_ENABLE = "scan.attributes.metrics.enable"; + + /** + * Use {@link #getScanMetrics()} + */ + // Make this private or remove. + @Deprecated static public final String SCAN_ATTRIBUTES_METRICS_DATA = "scan.attributes.metrics.data"; // If an application wants to use multiple scans over different tables each scan must @@ -680,6 +687,27 @@ public class Scan extends Query { return reversed; } + /** + * Setting whether the caller wants to see the partial results that may be returned from the + * server. By default this value is false and the complete results will be assembled client side + * before being delivered to the caller. + * @param allowPartialResults + * @return this + */ + public Scan setAllowPartialResults(final boolean allowPartialResults) { + this.allowPartialResults = allowPartialResults; + return this; + } + + /** + * @return true when the constructor of this scan understands that the results they will see may + * only represent a partial portion of a row. The entire row would be retrieved by + * subsequent calls to {@link ResultScanner#next()} + */ + public boolean getAllowPartialResults() { + return allowPartialResults; + } + /** * Set the value indicating whether loading CFs on demand should be allowed (cluster * default is false). On-demand CF loading doesn't load column families until necessary, e.g. @@ -916,4 +944,31 @@ public class Scan extends Query { scan.setCaching(1); return scan; } + + /** + * Enable collection of {@link ScanMetrics}. For advanced users. + * @param enabled Set to true to enable accumulating scan metrics + */ + public Scan setScanMetricsEnabled(final boolean enabled) { + setAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE, Bytes.toBytes(Boolean.valueOf(enabled))); + return this; + } + + /** + * @return True if collection of scan metrics is enabled. For advanced users. + */ + public boolean isScanMetricsEnabled() { + byte[] attr = getAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE); + return attr == null ? false : Bytes.toBoolean(attr); + } + + /** + * @return Metrics on this Scan, if metrics were enabled. + * @see #setScanMetricsEnabled(boolean) + */ + public ScanMetrics getScanMetrics() { + byte [] bytes = getAttribute(Scan.SCAN_ATTRIBUTES_METRICS_DATA); + if (bytes == null) return null; + return ProtobufUtil.toScanMetrics(bytes); + } } \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java index 6d5bb9ea670..714c9fe7783 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java @@ -76,6 +76,8 @@ public class ScannerCallable extends RegionServerCallable { private int logCutOffLatency = 1000; private static String myAddress; protected final int id; + protected boolean serverHasMoreResultsContext; + protected boolean serverHasMoreResults; static { try { myAddress = DNS.getDefaultHost("default", "default"); @@ -153,8 +155,6 @@ public class ScannerCallable extends RegionServerCallable { } // check how often we retry. - // HConnectionManager will call instantiateServer with reload==true - // if and only if for retries. if (reload && this.scanMetrics != null) { this.scanMetrics.countOfRPCRetries.incrementAndGet(); if (isRegionServerRemote) { @@ -177,7 +177,6 @@ public class ScannerCallable extends RegionServerCallable { @Override - @SuppressWarnings("deprecation") public Result [] call(int callTimeout) throws IOException { if (Thread.interrupted()) { throw new InterruptedIOException(); @@ -223,12 +222,23 @@ public class ScannerCallable extends RegionServerCallable { + rows + " rows from scanner=" + scannerId); } } - if (response.hasMoreResults() - && !response.getMoreResults()) { + // moreResults is only used for the case where a filter exhausts all elements + if (response.hasMoreResults() && !response.getMoreResults()) { scannerId = -1L; closed = true; + // Implied that no results were returned back, either. return null; } + // moreResultsInRegion explicitly defines when a RS may choose to terminate a batch due + // to size or quantity of results in the response. + if (response.hasMoreResultsInRegion()) { + // Set what the RS said + setHasMoreResultsContext(true); + setServerHasMoreResults(response.getMoreResultsInRegion()); + } else { + // Server didn't respond whether it has more results or not. + setHasMoreResultsContext(false); + } } catch (ServiceException se) { throw ProtobufUtil.getRemoteException(se); } @@ -394,4 +404,30 @@ public class ScannerCallable extends RegionServerCallable { s.setCaching(this.caching); return s; } + + /** + * Should the client attempt to fetch more results from this region + * @return True if the client should attempt to fetch more results, false otherwise. + */ + protected boolean getServerHasMoreResults() { + assert serverHasMoreResultsContext; + return this.serverHasMoreResults; + } + + protected void setServerHasMoreResults(boolean serverHasMoreResults) { + this.serverHasMoreResults = serverHasMoreResults; + } + + /** + * Did the server respond with information about whether more results might exist. + * Not guaranteed to respond with older server versions + * @return True if the server responded with information about more results. + */ + protected boolean hasMoreResultsContext() { + return serverHasMoreResultsContext; + } + + protected void setHasMoreResultsContext(boolean serverHasMoreResultsContext) { + this.serverHasMoreResultsContext = serverHasMoreResultsContext; + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.java index 0697840f19e..7ba152bd8b5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallableWithReplicas.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hbase.client; +import static org.apache.hadoop.hbase.client.ClientScanner.createClosestRowBefore; + import java.io.IOException; import java.io.InterruptedIOException; import java.util.ArrayList; @@ -44,8 +46,6 @@ import org.apache.hadoop.hbase.util.Pair; import com.google.common.annotations.VisibleForTesting; -import static org.apache.hadoop.hbase.client.ReversedClientScanner.createClosestRowBefore; - /** * This class has the logic for handling scanners for regions with and without replicas. * 1. A scan is attempted on the default (primary) region @@ -111,6 +111,22 @@ class ScannerCallableWithReplicas implements RetryingCallable { return currentScannerCallable.getHRegionInfo(); } + public boolean getServerHasMoreResults() { + return currentScannerCallable.getServerHasMoreResults(); + } + + public void setServerHasMoreResults(boolean serverHasMoreResults) { + currentScannerCallable.setServerHasMoreResults(serverHasMoreResults); + } + + public boolean hasMoreResultsContext() { + return currentScannerCallable.hasMoreResultsContext(); + } + + public void setHasMoreResultsContext(boolean serverHasMoreResultsContext) { + currentScannerCallable.setHasMoreResultsContext(serverHasMoreResultsContext); + } + @Override public Result [] call(int timeout) throws IOException { // If the active replica callable was closed somewhere, invoke the RPC to @@ -276,14 +292,7 @@ class ScannerCallableWithReplicas implements RetryingCallable { continue; //this was already scheduled earlier } ScannerCallable s = currentScannerCallable.getScannerCallableForReplica(id); - - if (this.lastResult != null) { - if(s.getScan().isReversed()){ - s.getScan().setStartRow(createClosestRowBefore(this.lastResult.getRow())); - }else { - s.getScan().setStartRow(Bytes.add(this.lastResult.getRow(), new byte[1])); - } - } + setStartRowForReplicaCallable(s); outstandingCallables.add(s); RetryingRPC retryingOnReplica = new RetryingRPC(s); cs.submit(retryingOnReplica, scannerTimeout, id); @@ -291,6 +300,31 @@ class ScannerCallableWithReplicas implements RetryingCallable { return max - min + 1; } + /** + * Set the start row for the replica callable based on the state of the last result received. + * @param callable The callable to set the start row on + */ + private void setStartRowForReplicaCallable(ScannerCallable callable) { + if (this.lastResult == null || callable == null) return; + + if (this.lastResult.isPartial()) { + // The last result was a partial result which means we have not received all of the cells + // for this row. Thus, use the last result's row as the start row. If a replica switch + // occurs, the scanner will ensure that any accumulated partial results are cleared, + // and the scan can resume from this row. + callable.getScan().setStartRow(this.lastResult.getRow()); + } else { + // The last result was not a partial result which means it contained all of the cells for + // that row (we no longer need any information from it). Set the start row to the next + // closest row that could be seen. + if (callable.getScan().isReversed()) { + callable.getScan().setStartRow(createClosestRowBefore(this.lastResult.getRow())); + } else { + callable.getScan().setStartRow(Bytes.add(this.lastResult.getRow(), new byte[1])); + } + } + } + @VisibleForTesting boolean isAnyRPCcancelled() { return someRPCcancelled; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java index 55fb1c49d0c..498c58707df 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java @@ -498,7 +498,9 @@ public interface Table extends Closeable { * The default value comes from the configuration parameter * {@code hbase.client.write.buffer}. * @return The size of the write buffer in bytes. + * @deprecated as of 1.0.1 (should not have been in 1.0.0). Replaced by {@link BufferedMutator#getWriteBufferSize()} */ + @Deprecated long getWriteBufferSize(); /** @@ -508,7 +510,10 @@ public interface Table extends Closeable { * write buffer, the buffer gets flushed. * @param writeBufferSize The new write buffer size, in bytes. * @throws IOException if a remote or network exception occurs. + * @deprecated as of 1.0.1 (should not have been in 1.0.0). Replaced by {@link BufferedMutator} and + * {@link BufferedMutatorParams#writeBufferSize(long)} */ + @Deprecated void setWriteBufferSize(long writeBufferSize) throws IOException; /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperKeepAliveConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperKeepAliveConnection.java index 04fd20fcc92..04d4b41262a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperKeepAliveConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperKeepAliveConnection.java @@ -39,14 +39,14 @@ import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; class ZooKeeperKeepAliveConnection extends ZooKeeperWatcher{ ZooKeeperKeepAliveConnection( Configuration conf, String descriptor, - ConnectionManager.HConnectionImplementation conn) throws IOException { + ConnectionImplementation conn) throws IOException { super(conf, descriptor, conn); } @Override public void close() { if (this.abortable != null) { - ((ConnectionManager.HConnectionImplementation)abortable).releaseZooKeeperWatcher(this); + ((ConnectionImplementation)abortable).releaseZooKeeperWatcher(this); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperRegistry.java index 26aca1858fa..8cc7160267e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperRegistry.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperRegistry.java @@ -37,14 +37,14 @@ import org.apache.zookeeper.KeeperException; class ZooKeeperRegistry implements Registry { static final Log LOG = LogFactory.getLog(ZooKeeperRegistry.class); // Needs an instance of hci to function. Set after construct this instance. - ConnectionManager.HConnectionImplementation hci; + ConnectionImplementation hci; @Override public void init(Connection connection) { - if (!(connection instanceof ConnectionManager.HConnectionImplementation)) { - throw new RuntimeException("This registry depends on HConnectionImplementation"); + if (!(connection instanceof ConnectionImplementation)) { + throw new RuntimeException("This registry depends on ConnectionImplementation"); } - this.hci = (ConnectionManager.HConnectionImplementation)connection; + this.hci = (ConnectionImplementation)connection; } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ScanMetrics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ScanMetrics.java index 86bc120d4a5..35c66678c93 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ScanMetrics.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ScanMetrics.java @@ -22,15 +22,14 @@ import java.util.HashMap; import java.util.Map; import java.util.concurrent.atomic.AtomicLong; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; import com.google.common.collect.ImmutableMap; /** - * Provides client-side metrics related to scan operations + * Provides client-side metrics related to scan operations. * The data can be passed to mapreduce framework or other systems. * We use atomic longs so that one thread can increment, * while another atomically resets to zero after the values are reported @@ -40,12 +39,10 @@ import com.google.common.collect.ImmutableMap; * However, there is no need for this. So they are defined under scan operation * for now. */ -@InterfaceAudience.Private +@InterfaceAudience.Public +@InterfaceStability.Evolving public class ScanMetrics { - - private static final Log LOG = LogFactory.getLog(ScanMetrics.class); - /** * Hash to hold the String -> Atomic Long mappings. */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java index 2d5c5e9e63f..c8b8b473477 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java @@ -38,17 +38,25 @@ import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.replication.ReplicationFactory; +import org.apache.hadoop.hbase.replication.ReplicationPeer; import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; +import org.apache.hadoop.hbase.replication.ReplicationPeerZKImpl; import org.apache.hadoop.hbase.replication.ReplicationPeers; import org.apache.hadoop.hbase.replication.ReplicationQueuesClient; +import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.zookeeper.KeeperException; +import org.apache.zookeeper.data.Stat; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Lists; @@ -114,16 +122,31 @@ public class ReplicationAdmin implements Closeable { "enable it in order to use replication"); } this.connection = ConnectionFactory.createConnection(conf); - zkw = createZooKeeperWatcher(); try { - this.replicationPeers = ReplicationFactory.getReplicationPeers(zkw, conf, this.connection); - this.replicationPeers.init(); - this.replicationQueuesClient = - ReplicationFactory.getReplicationQueuesClient(zkw, conf, this.connection); - this.replicationQueuesClient.init(); - - } catch (ReplicationException e) { - throw new IOException("Error initializing the replication admin client.", e); + zkw = createZooKeeperWatcher(); + try { + this.replicationPeers = ReplicationFactory.getReplicationPeers(zkw, conf, this.connection); + this.replicationPeers.init(); + this.replicationQueuesClient = + ReplicationFactory.getReplicationQueuesClient(zkw, conf, this.connection); + this.replicationQueuesClient.init(); + } catch (Exception exception) { + if (zkw != null) { + zkw.close(); + } + throw exception; + } + } catch (Exception exception) { + if (connection != null) { + connection.close(); + } + if (exception instanceof IOException) { + throw (IOException) exception; + } else if (exception instanceof RuntimeException) { + throw (RuntimeException) exception; + } else { + throw new IOException("Error initializing the replication admin client.", exception); + } } } @@ -501,4 +524,203 @@ public class ReplicationAdmin implements Closeable { return replicationColFams; } + + /** + * Enable a table's replication switch. + * @param tableName name of the table + * @throws IOException if a remote or network exception occurs + */ + public void enableTableRep(final TableName tableName) throws IOException { + if (tableName == null) { + throw new IllegalArgumentException("Table name cannot be null"); + } + try (Admin admin = this.connection.getAdmin()) { + if (!admin.tableExists(tableName)) { + throw new TableNotFoundException("Table '" + tableName.getNameAsString() + + "' does not exists."); + } + } + byte[][] splits = getTableSplitRowKeys(tableName); + checkAndSyncTableDescToPeers(tableName, splits); + setTableRep(tableName, true); + } + + /** + * Disable a table's replication switch. + * @param tableName name of the table + * @throws IOException if a remote or network exception occurs + */ + public void disableTableRep(final TableName tableName) throws IOException { + if (tableName == null) { + throw new IllegalArgumentException("Table name is null"); + } + try (Admin admin = this.connection.getAdmin()) { + if (!admin.tableExists(tableName)) { + throw new TableNotFoundException("Table '" + tableName.getNamespaceAsString() + + "' does not exists."); + } + } + setTableRep(tableName, false); + } + + /** + * Get the split row keys of table + * @param tableName table name + * @return array of split row keys + * @throws IOException + */ + private byte[][] getTableSplitRowKeys(TableName tableName) throws IOException { + try (RegionLocator locator = connection.getRegionLocator(tableName);) { + byte[][] startKeys = locator.getStartKeys(); + if (startKeys.length == 1) { + return null; + } + byte[][] splits = new byte[startKeys.length - 1][]; + for (int i = 1; i < startKeys.length; i++) { + splits[i - 1] = startKeys[i]; + } + return splits; + } + } + + /** + * Connect to peer and check the table descriptor on peer: + *
    + *
  1. Create the same table on peer when not exist.
  2. + *
  3. Throw exception if the table exists on peer cluster but descriptors are not same.
  4. + *
+ * @param tableName name of the table to sync to the peer + * @param splits table split keys + * @throws IOException + */ + private void checkAndSyncTableDescToPeers(final TableName tableName, final byte[][] splits) + throws IOException { + List repPeers = listValidReplicationPeers(); + if (repPeers == null || repPeers.size() <= 0) { + throw new IllegalArgumentException("Found no peer cluster for replication."); + } + for (ReplicationPeer repPeer : repPeers) { + Configuration peerConf = repPeer.getConfiguration(); + HTableDescriptor htd = null; + try (Connection conn = ConnectionFactory.createConnection(peerConf); + Admin admin = this.connection.getAdmin(); + Admin repHBaseAdmin = conn.getAdmin()) { + htd = admin.getTableDescriptor(tableName); + HTableDescriptor peerHtd = null; + if (!repHBaseAdmin.tableExists(tableName)) { + repHBaseAdmin.createTable(htd, splits); + } else { + peerHtd = repHBaseAdmin.getTableDescriptor(tableName); + if (peerHtd == null) { + throw new IllegalArgumentException("Failed to get table descriptor for table " + + tableName.getNameAsString() + " from peer cluster " + repPeer.getId()); + } else if (!peerHtd.equals(htd)) { + throw new IllegalArgumentException("Table " + tableName.getNameAsString() + + " exists in peer cluster " + repPeer.getId() + + ", but the table descriptors are not same when comapred with source cluster." + + " Thus can not enable the table's replication switch."); + } + } + } + } + } + + private List listValidReplicationPeers() { + Map peers = listPeerConfigs(); + if (peers == null || peers.size() <= 0) { + return null; + } + List validPeers = new ArrayList(peers.size()); + for (Entry peerEntry : peers.entrySet()) { + String peerId = peerEntry.getKey(); + String clusterKey = peerEntry.getValue().getClusterKey(); + Configuration peerConf = new Configuration(this.connection.getConfiguration()); + Stat s = null; + try { + ZKUtil.applyClusterKeyToConf(peerConf, clusterKey); + Pair pair = this.replicationPeers.getPeerConf(peerId); + ReplicationPeer peer = new ReplicationPeerZKImpl(peerConf, peerId, pair.getFirst()); + s = + zkw.getRecoverableZooKeeper().exists(peerConf.get(HConstants.ZOOKEEPER_ZNODE_PARENT), + null); + if (null == s) { + LOG.info(peerId + ' ' + clusterKey + " is invalid now."); + continue; + } + validPeers.add(peer); + } catch (ReplicationException e) { + LOG.warn("Failed to get valid replication peers. " + + "Error connecting to peer cluster with peerId=" + peerId); + LOG.debug("Failure details to get valid replication peers.", e); + continue; + } catch (KeeperException e) { + LOG.warn("Failed to get valid replication peers. KeeperException code=" + + e.code().intValue()); + LOG.debug("Failure details to get valid replication peers.", e); + continue; + } catch (InterruptedException e) { + LOG.warn("Failed to get valid replication peers due to InterruptedException."); + LOG.debug("Failure details to get valid replication peers.", e); + Thread.currentThread().interrupt(); + continue; + } catch (IOException e) { + LOG.warn("Failed to get valid replication peers due to IOException."); + LOG.debug("Failure details to get valid replication peers.", e); + continue; + } + } + return validPeers; + } + + /** + * Set the table's replication switch if the table's replication switch is already not set. + * @param tableName name of the table + * @param isRepEnabled is replication switch enable or disable + * @throws IOException if a remote or network exception occurs + */ + private void setTableRep(final TableName tableName, boolean isRepEnabled) throws IOException { + Admin admin = null; + try { + admin = this.connection.getAdmin(); + HTableDescriptor htd = admin.getTableDescriptor(tableName); + if (isTableRepEnabled(htd) ^ isRepEnabled) { + boolean isOnlineSchemaUpdateEnabled = + this.connection.getConfiguration() + .getBoolean("hbase.online.schema.update.enable", true); + if (!isOnlineSchemaUpdateEnabled) { + admin.disableTable(tableName); + } + for (HColumnDescriptor hcd : htd.getFamilies()) { + hcd.setScope(isRepEnabled ? HConstants.REPLICATION_SCOPE_GLOBAL + : HConstants.REPLICATION_SCOPE_LOCAL); + } + admin.modifyTable(tableName, htd); + if (!isOnlineSchemaUpdateEnabled) { + admin.enableTable(tableName); + } + } + } finally { + if (admin != null) { + try { + admin.close(); + } catch (IOException e) { + LOG.warn("Failed to close admin connection."); + LOG.debug("Details on failure to close admin connection.", e); + } + } + } + } + + /** + * @param htd table descriptor details for the table to check + * @return true if table's replication switch is enabled + */ + private boolean isTableRepEnabled(HTableDescriptor htd) { + for (HColumnDescriptor hcd : htd.getFamilies()) { + if (hcd.getScope() != HConstants.REPLICATION_SCOPE_GLOBAL) { + return false; + } + } + return true; + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ConnectionClosingException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ConnectionClosingException.java index 49134f1f092..43a4ee4a061 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ConnectionClosingException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/ConnectionClosingException.java @@ -45,7 +45,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability; * Thrown when the client believes that we are trying to communicate to has * been repeatedly unresponsive for a while. * -* On receiving such an exception. The HConnectionManager will skip all +* On receiving such an exception. The ConnectionManager will skip all * retries and fast fail the operation. */ @InterfaceAudience.Public diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/PreemptiveFastFailException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/PreemptiveFastFailException.java index 51c960df369..6ca1d886f92 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/PreemptiveFastFailException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/PreemptiveFastFailException.java @@ -29,7 +29,7 @@ import org.apache.hadoop.hbase.ServerName; * Thrown when the client believes that we are trying to communicate to has * been repeatedly unresponsive for a while. * - * On receiving such an exception. The HConnectionManager will skip all + * On receiving such an exception. The ConnectionManager will skip all * retries and fast fail the operation. */ @InterfaceAudience.Public diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/executor/EventType.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/executor/EventType.java index 9764efdc70d..ac76edbdeae 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/executor/EventType.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/executor/EventType.java @@ -37,22 +37,22 @@ public enum EventType { // Messages originating from RS (NOTE: there is NO direct communication from // RS to Master). These are a result of RS updates into ZK. // RS_ZK_REGION_CLOSING (1), // It is replaced by M_ZK_REGION_CLOSING(HBASE-4739) - + /** * RS_ZK_REGION_CLOSED
- * + * * RS has finished closing a region. */ RS_ZK_REGION_CLOSED (2, ExecutorType.MASTER_CLOSE_REGION), /** * RS_ZK_REGION_OPENING
- * + * * RS is in process of opening a region. */ RS_ZK_REGION_OPENING (3, null), /** * RS_ZK_REGION_OPENED
- * + * * RS has finished opening a region. */ RS_ZK_REGION_OPENED (4, ExecutorType.MASTER_OPEN_REGION), @@ -70,7 +70,7 @@ public enum EventType { RS_ZK_REGION_SPLIT (6, ExecutorType.MASTER_SERVER_OPERATIONS), /** * RS_ZK_REGION_FAILED_OPEN
- * + * * RS failed to open a region. */ RS_ZK_REGION_FAILED_OPEN (7, ExecutorType.MASTER_CLOSE_REGION), @@ -217,7 +217,7 @@ public enum EventType { * Master adds this region as closing in ZK */ M_ZK_REGION_CLOSING (51, null), - + /** * Master controlled events to be executed on the master * M_SERVER_SHUTDOWN @@ -232,14 +232,14 @@ public enum EventType { M_META_SERVER_SHUTDOWN (72, ExecutorType.MASTER_META_SERVER_OPERATIONS), /** * Master controlled events to be executed on the master.
- * + * * M_MASTER_RECOVERY
* Master is processing recovery of regions found in ZK RIT */ M_MASTER_RECOVERY (73, ExecutorType.MASTER_SERVER_OPERATIONS), /** * Master controlled events to be executed on the master.
- * + * * M_LOG_REPLAY
* Master is processing log replay of failed region server */ @@ -247,18 +247,25 @@ public enum EventType { /** * RS controlled events to be executed on the RS.
- * + * * RS_PARALLEL_SEEK */ RS_PARALLEL_SEEK (80, ExecutorType.RS_PARALLEL_SEEK), - + /** * RS wal recovery work items(either creating recover.edits or directly replay wals) * to be executed on the RS.
- * + * * RS_LOG_REPLAY */ - RS_LOG_REPLAY (81, ExecutorType.RS_LOG_REPLAY_OPS); + RS_LOG_REPLAY (81, ExecutorType.RS_LOG_REPLAY_OPS), + + /** + * RS flush triggering from secondary region replicas to primary region replica.
+ * + * RS_REGION_REPLICA_FLUSH + */ + RS_REGION_REPLICA_FLUSH (82, ExecutorType.RS_REGION_REPLICA_FLUSH_OPS); private final int code; private final ExecutorType executor; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java index 5590b0a782c..d0f6beedbb6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java @@ -45,7 +45,8 @@ public enum ExecutorType { RS_CLOSE_ROOT (24), RS_CLOSE_META (25), RS_PARALLEL_SEEK (26), - RS_LOG_REPLAY_OPS (27); + RS_LOG_REPLAY_OPS (27), + RS_REGION_REPLICA_FLUSH_OPS (28); ExecutorType(int value) {} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnRangeFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnRangeFilter.java index 9963af6451f..d8ea0949f68 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnRangeFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnRangeFilter.java @@ -133,7 +133,7 @@ public class ColumnRangeFilter extends FilterBase { } if (!this.minColumnInclusive && cmpMin == 0) { - return ReturnCode.SKIP; + return ReturnCode.NEXT_COL; } if (this.maxColumn == null) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FamilyFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FamilyFilter.java index e2890268e05..e79a4d5476e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FamilyFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FamilyFilter.java @@ -65,7 +65,7 @@ public class FamilyFilter extends CompareFilter { if (familyLength > 0) { if (doCompare(this.compareOp, this.comparator, v.getFamilyArray(), v.getFamilyOffset(), familyLength)) { - return ReturnCode.SKIP; + return ReturnCode.NEXT_ROW; } } return ReturnCode.INCLUDE; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncCall.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncCall.java index c35238ce9f6..68a494d7689 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncCall.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncCall.java @@ -72,7 +72,7 @@ public class AsyncCall extends DefaultPromise { this.responseDefaultType = responseDefaultType; this.startTime = EnvironmentEdgeManager.currentTime(); - this.rpcTimeout = controller.getCallTimeout(); + this.rpcTimeout = controller.hasCallTimeout() ? controller.getCallTimeout() : 0; } /** @@ -84,9 +84,10 @@ public class AsyncCall extends DefaultPromise { return this.startTime; } - @Override public String toString() { - return "callId: " + this.id + " methodName: " + this.method.getName() + " param {" + - (this.param != null ? ProtobufUtil.getShortTextFormat(this.param) : "") + "}"; + @Override + public String toString() { + return "callId: " + this.id + " methodName: " + this.method.getName() + " param {" + + (this.param != null ? ProtobufUtil.getShortTextFormat(this.param) : "") + "}"; } /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcChannel.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcChannel.java index 054c9b5690b..32537fa4f82 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcChannel.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcChannel.java @@ -17,9 +17,6 @@ */ package org.apache.hadoop.hbase.ipc; -import com.google.protobuf.Descriptors; -import com.google.protobuf.Message; -import com.google.protobuf.RpcCallback; import io.netty.bootstrap.Bootstrap; import io.netty.buffer.ByteBuf; import io.netty.buffer.ByteBufOutputStream; @@ -31,11 +28,29 @@ import io.netty.util.Timeout; import io.netty.util.TimerTask; import io.netty.util.concurrent.GenericFutureListener; import io.netty.util.concurrent.Promise; + +import java.io.IOException; +import java.net.ConnectException; +import java.net.InetSocketAddress; +import java.net.SocketException; +import java.nio.ByteBuffer; +import java.security.PrivilegedExceptionAction; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.concurrent.TimeUnit; + +import javax.security.sasl.SaslException; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.ConnectionClosingException; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos; import org.apache.hadoop.hbase.protobuf.generated.RPCProtos; import org.apache.hadoop.hbase.protobuf.generated.TracingProtos; @@ -56,18 +71,9 @@ import org.apache.hadoop.security.token.TokenSelector; import org.apache.htrace.Span; import org.apache.htrace.Trace; -import javax.security.sasl.SaslException; -import java.io.IOException; -import java.net.ConnectException; -import java.net.InetSocketAddress; -import java.net.SocketException; -import java.nio.ByteBuffer; -import java.security.PrivilegedExceptionAction; -import java.util.HashMap; -import java.util.Map; -import java.util.Random; -import java.util.concurrent.ConcurrentSkipListMap; -import java.util.concurrent.TimeUnit; +import com.google.protobuf.Descriptors; +import com.google.protobuf.Message; +import com.google.protobuf.RpcCallback; /** * Netty RPC channel @@ -97,8 +103,6 @@ public class AsyncRpcChannel { final String serviceName; final InetSocketAddress address; - ConcurrentSkipListMap calls = new ConcurrentSkipListMap<>(); - private int ioFailureCounter = 0; private int connectFailureCounter = 0; @@ -108,15 +112,18 @@ public class AsyncRpcChannel { private Token token; private String serverPrincipal; - volatile boolean shouldCloseConnection = false; - private IOException closeException; + + // NOTE: closed and connected flags below are only changed when a lock on pendingCalls + private final Map pendingCalls = new HashMap(); + private boolean connected = false; + private boolean closed = false; private Timeout cleanupTimer; private final TimerTask timeoutTask = new TimerTask() { - @Override public void run(Timeout timeout) throws Exception { - cleanupTimer = null; - cleanupCalls(false); + @Override + public void run(Timeout timeout) throws Exception { + cleanupCalls(); } }; @@ -183,10 +190,11 @@ public class AsyncRpcChannel { if (ticket == null) { throw new FatalConnectionException("ticket/user is null"); } + final UserGroupInformation realTicket = ticket; saslHandler = ticket.doAs(new PrivilegedExceptionAction() { @Override public SaslClientHandler run() throws IOException { - return getSaslHandler(bootstrap); + return getSaslHandler(realTicket, bootstrap); } }); if (saslHandler != null) { @@ -213,15 +221,20 @@ public class AsyncRpcChannel { ch.pipeline() .addLast("frameDecoder", new LengthFieldBasedFrameDecoder(Integer.MAX_VALUE, 0, 4, 0, 4)); ch.pipeline().addLast(new AsyncServerResponseHandler(this)); - try { writeChannelHeader(ch).addListener(new GenericFutureListener() { - @Override public void operationComplete(ChannelFuture future) throws Exception { + @Override + public void operationComplete(ChannelFuture future) throws Exception { if (!future.isSuccess()) { close(future.cause()); return; } - for (AsyncCall call : calls.values()) { + List callsToWrite; + synchronized (pendingCalls) { + connected = true; + callsToWrite = new ArrayList(pendingCalls.values()); + } + for (AsyncCall call : callsToWrite) { writeRequest(call); } } @@ -233,24 +246,26 @@ public class AsyncRpcChannel { /** * Get SASL handler - * * @param bootstrap to reconnect to * @return new SASL handler * @throws java.io.IOException if handler failed to create */ - private SaslClientHandler getSaslHandler(final Bootstrap bootstrap) throws IOException { - return new SaslClientHandler(authMethod, token, serverPrincipal, client.fallbackAllowed, - client.conf.get("hbase.rpc.protection", - SaslUtil.QualityOfProtection.AUTHENTICATION.name().toLowerCase()), + private SaslClientHandler getSaslHandler(final UserGroupInformation realTicket, + final Bootstrap bootstrap) throws IOException { + return new SaslClientHandler(realTicket, authMethod, token, serverPrincipal, + client.fallbackAllowed, client.conf.get("hbase.rpc.protection", + SaslUtil.QualityOfProtection.AUTHENTICATION.name().toLowerCase()), new SaslClientHandler.SaslExceptionHandler() { - @Override public void handle(int retryCount, Random random, Throwable cause) { + @Override + public void handle(int retryCount, Random random, Throwable cause) { try { // Handle Sasl failure. Try to potentially get new credentials - handleSaslConnectionFailure(retryCount, cause, ticket.getUGI()); + handleSaslConnectionFailure(retryCount, cause, realTicket); // Try to reconnect - AsyncRpcClient.WHEEL_TIMER.newTimeout(new TimerTask() { - @Override public void run(Timeout timeout) throws Exception { + client.newTimeout(new TimerTask() { + @Override + public void run(Timeout timeout) throws Exception { connect(bootstrap); } }, random.nextInt(reloginMaxBackoff) + 1, TimeUnit.MILLISECONDS); @@ -259,10 +274,11 @@ public class AsyncRpcChannel { } } }, new SaslClientHandler.SaslSuccessfulConnectHandler() { - @Override public void onSuccess(Channel channel) { - startHBaseConnection(channel); - } - }); + @Override + public void onSuccess(Channel channel) { + startHBaseConnection(channel); + } + }); } /** @@ -274,7 +290,7 @@ public class AsyncRpcChannel { */ private void retryOrClose(final Bootstrap bootstrap, int connectCounter, Throwable e) { if (connectCounter < client.maxRetries) { - AsyncRpcClient.WHEEL_TIMER.newTimeout(new TimerTask() { + client.newTimeout(new TimerTask() { @Override public void run(Timeout timeout) throws Exception { connect(bootstrap); } @@ -295,66 +311,50 @@ public class AsyncRpcChannel { public Promise callMethod(final Descriptors.MethodDescriptor method, final PayloadCarryingRpcController controller, final Message request, final Message responsePrototype) { - if (shouldCloseConnection) { - Promise promise = channel.eventLoop().newPromise(); - promise.setFailure(new ConnectException()); - return promise; - } - - final AsyncCall call = new AsyncCall(channel.eventLoop(), client.callIdCnt.getAndIncrement(), - method, request, controller, responsePrototype); - + final AsyncCall call = + new AsyncCall(channel.eventLoop(), client.callIdCnt.getAndIncrement(), method, request, + controller, responsePrototype); controller.notifyOnCancel(new RpcCallback() { @Override public void run(Object parameter) { - calls.remove(call.id); + // TODO: do not need to call AsyncCall.setFailed? + synchronized (pendingCalls) { + pendingCalls.remove(call.id); + } } }); + // TODO: this should be handled by PayloadCarryingRpcController. if (controller.isCanceled()) { // To finish if the call was cancelled before we set the notification (race condition) call.cancel(true); return call; } - calls.put(call.id, call); - - // check again, see https://issues.apache.org/jira/browse/HBASE-12951 - if (shouldCloseConnection) { - Promise promise = channel.eventLoop().newPromise(); - promise.setFailure(new ConnectException()); - return promise; + synchronized (pendingCalls) { + if (closed) { + Promise promise = channel.eventLoop().newPromise(); + promise.setFailure(new ConnectException()); + return promise; + } + pendingCalls.put(call.id, call); + // Add timeout for cleanup if none is present + if (cleanupTimer == null && call.getRpcTimeout() > 0) { + cleanupTimer = + client.newTimeout(timeoutTask, call.getRpcTimeout(), + TimeUnit.MILLISECONDS); + } + if (!connected) { + return call; + } } - - // Add timeout for cleanup if none is present - if (cleanupTimer == null) { - cleanupTimer = AsyncRpcClient.WHEEL_TIMER.newTimeout(timeoutTask, call.getRpcTimeout(), - TimeUnit.MILLISECONDS); - } - - if(channel.isActive()) { - writeRequest(call); - } - + writeRequest(call); return call; } - /** - * Calls method and returns a promise - * @param method to call - * @param controller to run call with - * @param request to send - * @param responsePrototype for response message - * @return Promise to listen to result - * @throws java.net.ConnectException on connection failures - */ - public Promise callMethodWithPromise( - final Descriptors.MethodDescriptor method, final PayloadCarryingRpcController controller, - final Message request, final Message responsePrototype) throws ConnectException { - if (shouldCloseConnection || !channel.isOpen()) { - throw new ConnectException(); + AsyncCall removePendingCall(int id) { + synchronized (pendingCalls) { + return pendingCalls.remove(id); } - - return this.callMethod(method, controller, request, responsePrototype); } /** @@ -380,6 +380,7 @@ public class AsyncRpcChannel { headerBuilder.setCellBlockCompressorClass(client.compressor.getClass().getCanonicalName()); } + headerBuilder.setVersionInfo(ProtobufUtil.getVersionInfo()); RPCProtos.ConnectionHeader header = headerBuilder.build(); @@ -400,10 +401,6 @@ public class AsyncRpcChannel { */ private void writeRequest(final AsyncCall call) { try { - if (shouldCloseConnection) { - return; - } - final RPCProtos.RequestHeader.Builder requestHeaderBuilder = RPCProtos.RequestHeader .newBuilder(); requestHeaderBuilder.setCallId(call.id) @@ -439,25 +436,12 @@ public class AsyncRpcChannel { IPCUtil.write(out, rh, call.param, cellBlock); } - channel.writeAndFlush(b).addListener(new CallWriteListener(this,call)); + channel.writeAndFlush(b).addListener(new CallWriteListener(this, call.id)); } catch (IOException e) { - if (!shouldCloseConnection) { - close(e); - } + close(e); } } - /** - * Fail a call - * - * @param call to fail - * @param cause of fail - */ - void failCall(AsyncCall call, IOException cause) { - calls.remove(call.id); - call.setFailed(cause); - } - /** * Set up server authorization * @@ -550,18 +534,22 @@ public class AsyncRpcChannel { * @param e exception on close */ public void close(final Throwable e) { - client.removeConnection(ConnectionId.hashCode(ticket,serviceName,address)); + client.removeConnection(this); // Move closing from the requesting thread to the channel thread channel.eventLoop().execute(new Runnable() { @Override public void run() { - if (shouldCloseConnection) { - return; + List toCleanup; + synchronized (pendingCalls) { + if (closed) { + return; + } + closed = true; + toCleanup = new ArrayList(pendingCalls.values()); + pendingCalls.clear(); } - - shouldCloseConnection = true; - + IOException closeException = null; if (e != null) { if (e instanceof IOException) { closeException = (IOException) e; @@ -569,16 +557,19 @@ public class AsyncRpcChannel { closeException = new IOException(e); } } - // log the info if (LOG.isDebugEnabled() && closeException != null) { - LOG.debug(name + ": closing ipc connection to " + address + ": " + - closeException.getMessage()); + LOG.debug(name + ": closing ipc connection to " + address, closeException); + } + if (cleanupTimer != null) { + cleanupTimer.cancel(); + cleanupTimer = null; + } + for (AsyncCall call : toCleanup) { + call.setFailed(closeException != null ? closeException : new ConnectionClosingException( + "Call id=" + call.id + " on server " + address + " aborted: connection is closing")); } - - cleanupCalls(true); channel.disconnect().addListener(ChannelFutureListener.CLOSE); - if (LOG.isDebugEnabled()) { LOG.debug(name + ": closed"); } @@ -591,64 +582,37 @@ public class AsyncRpcChannel { * * @param cleanAll true if all calls should be cleaned, false for only the timed out calls */ - public void cleanupCalls(boolean cleanAll) { - // Cancel outstanding timers - if (cleanupTimer != null) { - cleanupTimer.cancel(); - cleanupTimer = null; - } - - if (cleanAll) { - for (AsyncCall call : calls.values()) { - synchronized (call) { - // Calls can be done on another thread so check before failing them - if(!call.isDone()) { - if (closeException == null) { - failCall(call, new ConnectionClosingException("Call id=" + call.id + - " on server " + address + " aborted: connection is closing")); - } else { - failCall(call, closeException); - } - } - } - } - } else { - for (AsyncCall call : calls.values()) { - long waitTime = EnvironmentEdgeManager.currentTime() - call.getStartTime(); + private void cleanupCalls() { + List toCleanup = new ArrayList(); + long currentTime = EnvironmentEdgeManager.currentTime(); + long nextCleanupTaskDelay = -1L; + synchronized (pendingCalls) { + for (Iterator iter = pendingCalls.values().iterator(); iter.hasNext();) { + AsyncCall call = iter.next(); long timeout = call.getRpcTimeout(); - if (timeout > 0 && waitTime >= timeout) { - synchronized (call) { - // Calls can be done on another thread so check before failing them - if (!call.isDone()) { - closeException = new CallTimeoutException("Call id=" + call.id + - ", waitTime=" + waitTime + ", rpcTimeout=" + timeout); - failCall(call, closeException); + if (timeout > 0) { + if (currentTime - call.getStartTime() >= timeout) { + iter.remove(); + toCleanup.add(call); + } else { + if (nextCleanupTaskDelay < 0 || timeout < nextCleanupTaskDelay) { + nextCleanupTaskDelay = timeout; } } - } else { - // We expect the call to be ordered by timeout. It may not be the case, but stopping - // at the first valid call allows to be sure that we still have something to do without - // spending too much time by reading the full list. - break; } } - - if (!calls.isEmpty()) { - AsyncCall firstCall = calls.firstEntry().getValue(); - - final long newTimeout; - long maxWaitTime = EnvironmentEdgeManager.currentTime() - firstCall.getStartTime(); - if (maxWaitTime < firstCall.getRpcTimeout()) { - newTimeout = firstCall.getRpcTimeout() - maxWaitTime; - } else { - newTimeout = 0; - } - - closeException = null; - cleanupTimer = AsyncRpcClient.WHEEL_TIMER.newTimeout(timeoutTask, - newTimeout, TimeUnit.MILLISECONDS); + if (nextCleanupTaskDelay > 0) { + cleanupTimer = + client.newTimeout(timeoutTask, nextCleanupTaskDelay, + TimeUnit.MILLISECONDS); + } else { + cleanupTimer = null; } } + for (AsyncCall call : toCleanup) { + call.setFailed(new CallTimeoutException("Call id=" + call.id + ", waitTime=" + + (currentTime - call.getStartTime()) + ", rpcTimeout=" + call.getRpcTimeout())); + } } /** @@ -745,6 +709,10 @@ public class AsyncRpcChannel { }); } + public int getConnectionHashCode() { + return ConnectionId.hashCode(ticket, serviceName, address); + } + @Override public String toString() { return this.address.toString() + "/" + this.serviceName + "/" + this.ticket; @@ -755,20 +723,22 @@ public class AsyncRpcChannel { */ private static final class CallWriteListener implements ChannelFutureListener { private final AsyncRpcChannel rpcChannel; - private final AsyncCall call; + private final int id; - public CallWriteListener(AsyncRpcChannel asyncRpcChannel, AsyncCall call) { + public CallWriteListener(AsyncRpcChannel asyncRpcChannel, int id) { this.rpcChannel = asyncRpcChannel; - this.call = call; + this.id = id; } - @Override public void operationComplete(ChannelFuture future) throws Exception { + @Override + public void operationComplete(ChannelFuture future) throws Exception { if (!future.isSuccess()) { - if(!this.call.isDone()) { + AsyncCall call = rpcChannel.removePendingCall(id); + if (call != null) { if (future.cause() instanceof IOException) { - rpcChannel.failCall(call, (IOException) future.cause()); + call.setFailed((IOException) future.cause()); } else { - rpcChannel.failCall(call, new IOException(future.cause())); + call.setFailed(new IOException(future.cause())); } } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcClient.java index 30b622ad906..e55a7ebfce0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcClient.java @@ -17,14 +17,7 @@ */ package org.apache.hadoop.hbase.ipc; -import com.google.common.annotations.VisibleForTesting; -import com.google.protobuf.Descriptors; -import com.google.protobuf.Message; -import com.google.protobuf.RpcCallback; -import com.google.protobuf.RpcChannel; -import com.google.protobuf.RpcController; import io.netty.bootstrap.Bootstrap; -import io.netty.buffer.PooledByteBufAllocator; import io.netty.channel.Channel; import io.netty.channel.ChannelInitializer; import io.netty.channel.ChannelOption; @@ -35,11 +28,24 @@ import io.netty.channel.nio.NioEventLoopGroup; import io.netty.channel.socket.SocketChannel; import io.netty.channel.socket.nio.NioSocketChannel; import io.netty.util.HashedWheelTimer; +import io.netty.util.Timeout; +import io.netty.util.TimerTask; import io.netty.util.concurrent.Future; import io.netty.util.concurrent.GenericFutureListener; import io.netty.util.concurrent.Promise; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.net.SocketAddress; +import java.nio.ByteBuffer; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicInteger; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CellScanner; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.classification.InterfaceAudience; @@ -49,25 +55,26 @@ import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.PoolMap; import org.apache.hadoop.hbase.util.Threads; -import java.io.IOException; -import java.net.InetSocketAddress; -import java.net.SocketAddress; -import java.nio.ByteBuffer; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; +import com.google.common.annotations.VisibleForTesting; +import com.google.protobuf.Descriptors; +import com.google.protobuf.Message; +import com.google.protobuf.RpcCallback; +import com.google.protobuf.RpcChannel; +import com.google.protobuf.RpcController; /** * Netty client for the requests and responses */ -@InterfaceAudience.Private +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class AsyncRpcClient extends AbstractRpcClient { public static final String CLIENT_MAX_THREADS = "hbase.rpc.client.threads.max"; - public static final String USE_NATIVE_TRANSPORT = "hbase.rpc.client.useNativeTransport"; + public static final String USE_NATIVE_TRANSPORT = "hbase.rpc.client.nativetransport"; + public static final String USE_GLOBAL_EVENT_LOOP_GROUP = "hbase.rpc.client.globaleventloopgroup"; - public static final HashedWheelTimer WHEEL_TIMER = - new HashedWheelTimer(100, TimeUnit.MILLISECONDS); + private static final HashedWheelTimer WHEEL_TIMER = + new HashedWheelTimer(Threads.newDaemonThreadFactory("AsyncRpcChannel-timer"), + 100, TimeUnit.MILLISECONDS); private static final ChannelInitializer DEFAULT_CHANNEL_INITIALIZER = new ChannelInitializer() { @@ -79,12 +86,54 @@ public class AsyncRpcClient extends AbstractRpcClient { protected final AtomicInteger callIdCnt = new AtomicInteger(); - private final EventLoopGroup eventLoopGroup; private final PoolMap connections; final FailedServers failedServers; - private final Bootstrap bootstrap; + @VisibleForTesting + final Bootstrap bootstrap; + + private final boolean useGlobalEventLoopGroup; + + @VisibleForTesting + static Pair> GLOBAL_EVENT_LOOP_GROUP; + + private synchronized static Pair> + getGlobalEventLoopGroup(Configuration conf) { + if (GLOBAL_EVENT_LOOP_GROUP == null) { + GLOBAL_EVENT_LOOP_GROUP = createEventLoopGroup(conf); + if (LOG.isDebugEnabled()) { + LOG.debug("Create global event loop group " + + GLOBAL_EVENT_LOOP_GROUP.getFirst().getClass().getSimpleName()); + } + } + return GLOBAL_EVENT_LOOP_GROUP; + } + + private static Pair> createEventLoopGroup( + Configuration conf) { + // Max amount of threads to use. 0 lets Netty decide based on amount of cores + int maxThreads = conf.getInt(CLIENT_MAX_THREADS, 0); + + // Config to enable native transport. Does not seem to be stable at time of implementation + // although it is not extensively tested. + boolean epollEnabled = conf.getBoolean(USE_NATIVE_TRANSPORT, false); + + // Use the faster native epoll transport mechanism on linux if enabled + if (epollEnabled && JVM.isLinux()) { + if (LOG.isDebugEnabled()) { + LOG.debug("Create EpollEventLoopGroup with maxThreads = " + maxThreads); + } + return new Pair>(new EpollEventLoopGroup(maxThreads, + Threads.newDaemonThreadFactory("AsyncRpcChannel")), EpollSocketChannel.class); + } else { + if (LOG.isDebugEnabled()) { + LOG.debug("Create NioEventLoopGroup with maxThreads = " + maxThreads); + } + return new Pair>(new NioEventLoopGroup(maxThreads, + Threads.newDaemonThreadFactory("AsyncRpcChannel")), NioSocketChannel.class); + } + } /** * Constructor for tests @@ -103,23 +152,16 @@ public class AsyncRpcClient extends AbstractRpcClient { LOG.debug("Starting async Hbase RPC client"); } - // Max amount of threads to use. 0 lets Netty decide based on amount of cores - int maxThreads = conf.getInt(CLIENT_MAX_THREADS, 0); - - // Config to enable native transport. Does not seem to be stable at time of implementation - // although it is not extensively tested. - boolean epollEnabled = conf.getBoolean(USE_NATIVE_TRANSPORT, false); - - // Use the faster native epoll transport mechanism on linux if enabled - Class socketChannelClass; - if (epollEnabled && JVM.isLinux()) { - socketChannelClass = EpollSocketChannel.class; - this.eventLoopGroup = - new EpollEventLoopGroup(maxThreads, Threads.newDaemonThreadFactory("AsyncRpcChannel")); + Pair> eventLoopGroupAndChannelClass; + this.useGlobalEventLoopGroup = conf.getBoolean(USE_GLOBAL_EVENT_LOOP_GROUP, true); + if (useGlobalEventLoopGroup) { + eventLoopGroupAndChannelClass = getGlobalEventLoopGroup(configuration); } else { - socketChannelClass = NioSocketChannel.class; - this.eventLoopGroup = - new NioEventLoopGroup(maxThreads, Threads.newDaemonThreadFactory("AsyncRpcChannel")); + eventLoopGroupAndChannelClass = createEventLoopGroup(configuration); + } + if (LOG.isDebugEnabled()) { + LOG.debug("Use " + (useGlobalEventLoopGroup ? "global" : "individual") + " event loop group " + + eventLoopGroupAndChannelClass.getFirst().getClass().getSimpleName()); } this.connections = new PoolMap<>(getPoolType(configuration), getPoolSize(configuration)); @@ -130,8 +172,8 @@ public class AsyncRpcClient extends AbstractRpcClient { // Configure the default bootstrap. this.bootstrap = new Bootstrap(); - bootstrap.group(eventLoopGroup).channel(socketChannelClass) - .option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT) + bootstrap.group(eventLoopGroupAndChannelClass.getFirst()) + .channel(eventLoopGroupAndChannelClass.getSecond()) .option(ChannelOption.TCP_NODELAY, tcpNoDelay) .option(ChannelOption.SO_KEEPALIVE, tcpKeepAlive) .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, operationTimeout); @@ -169,16 +211,19 @@ public class AsyncRpcClient extends AbstractRpcClient { * @throws InterruptedException if call is interrupted * @throws java.io.IOException if a connection failure is encountered */ - @Override protected Pair call(PayloadCarryingRpcController pcrc, + @Override + protected Pair call(PayloadCarryingRpcController pcrc, Descriptors.MethodDescriptor md, Message param, Message returnType, User ticket, InetSocketAddress addr) throws IOException, InterruptedException { - + if (pcrc == null) { + pcrc = new PayloadCarryingRpcController(); + } final AsyncRpcChannel connection = createRpcChannel(md.getService().getName(), addr, ticket); - Promise promise = connection.callMethodWithPromise(md, pcrc, param, returnType); - + Promise promise = connection.callMethod(md, pcrc, param, returnType); + long timeout = pcrc.hasCallTimeout() ? pcrc.getCallTimeout() : 0; try { - Message response = promise.get(); + Message response = timeout > 0 ? promise.get(timeout, TimeUnit.MILLISECONDS) : promise.get(); return new Pair<>(response, pcrc.cellScanner()); } catch (ExecutionException e) { if (e.getCause() instanceof IOException) { @@ -186,6 +231,8 @@ public class AsyncRpcClient extends AbstractRpcClient { } else { throw new IOException(e.getCause()); } + } catch (TimeoutException e) { + throw new CallTimeoutException(promise.toString()); } } @@ -231,6 +278,8 @@ public class AsyncRpcClient extends AbstractRpcClient { } } + private boolean closed = false; + /** * Close netty */ @@ -240,12 +289,18 @@ public class AsyncRpcClient extends AbstractRpcClient { } synchronized (connections) { + if (closed) { + return; + } + closed = true; for (AsyncRpcChannel conn : connections.values()) { conn.close(null); } } - - eventLoopGroup.shutdownGracefully(); + // do not close global EventLoopGroup. + if (!useGlobalEventLoopGroup) { + bootstrap.group().shutdownGracefully(); + } } /** @@ -282,10 +337,6 @@ public class AsyncRpcClient extends AbstractRpcClient { */ private AsyncRpcChannel createRpcChannel(String serviceName, InetSocketAddress location, User ticket) throws StoppedRpcClientException, FailedServerException { - if (this.eventLoopGroup.isShuttingDown() || this.eventLoopGroup.isShutdown()) { - throw new StoppedRpcClientException(); - } - // Check if server is failed if (this.failedServers.isFailedServer(location)) { if (LOG.isDebugEnabled()) { @@ -300,6 +351,9 @@ public class AsyncRpcClient extends AbstractRpcClient { AsyncRpcChannel rpcChannel; synchronized (connections) { + if (closed) { + throw new StoppedRpcClientException(); + } rpcChannel = connections.get(hashCode); if (rpcChannel == null) { rpcChannel = new AsyncRpcChannel(this.bootstrap, this, ticket, serviceName, location); @@ -337,12 +391,20 @@ public class AsyncRpcClient extends AbstractRpcClient { /** * Remove connection from pool - * - * @param connectionHashCode of connection */ - public void removeConnection(int connectionHashCode) { + public void removeConnection(AsyncRpcChannel connection) { + int connectionHashCode = connection.getConnectionHashCode(); synchronized (connections) { - this.connections.remove(connectionHashCode); + // we use address as cache key, so we should check here to prevent removing the + // wrong connection + AsyncRpcChannel connectionInPool = this.connections.get(connectionHashCode); + if (connectionInPool == connection) { + this.connections.remove(connectionHashCode); + } else if (LOG.isDebugEnabled()) { + LOG.debug(String.format("%s already removed, expected instance %08x, actual %08x", + connection.toString(), System.identityHashCode(connection), + System.identityHashCode(connectionInPool))); + } } } @@ -399,4 +461,8 @@ public class AsyncRpcClient extends AbstractRpcClient { this.rpcClient.callMethod(md, pcrc, param, returnType, this.ticket, this.isa, done); } } -} \ No newline at end of file + + Timeout newTimeout(TimerTask task, long delay, TimeUnit unit) { + return WHEEL_TIMER.newTimeout(task, delay, unit); + } +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncServerResponseHandler.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncServerResponseHandler.java index 43028c60ddc..58825746c5e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncServerResponseHandler.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncServerResponseHandler.java @@ -17,11 +17,13 @@ */ package org.apache.hadoop.hbase.ipc; -import com.google.protobuf.Message; import io.netty.buffer.ByteBuf; import io.netty.buffer.ByteBufInputStream; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.ChannelInboundHandlerAdapter; + +import java.io.IOException; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.CellScanner; @@ -30,7 +32,7 @@ import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.RPCProtos; import org.apache.hadoop.ipc.RemoteException; -import java.io.IOException; +import com.google.protobuf.Message; /** * Handles Hbase responses @@ -53,16 +55,12 @@ public class AsyncServerResponseHandler extends ChannelInboundHandlerAdapter { @Override public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { ByteBuf inBuffer = (ByteBuf) msg; ByteBufInputStream in = new ByteBufInputStream(inBuffer); - - if (channel.shouldCloseConnection) { - return; - } int totalSize = inBuffer.readableBytes(); try { // Read the header RPCProtos.ResponseHeader responseHeader = RPCProtos.ResponseHeader.parseDelimitedFrom(in); int id = responseHeader.getCallId(); - AsyncCall call = channel.calls.get(id); + AsyncCall call = channel.removePendingCall(id); if (call == null) { // So we got a response for which we have no corresponding 'call' here on the client-side. // We probably timed out waiting, cleaned up all references, and now the server decides @@ -86,7 +84,7 @@ public class AsyncServerResponseHandler extends ChannelInboundHandlerAdapter { equals(FatalConnectionException.class.getName())) { channel.close(re); } else { - channel.failCall(call, re); + call.setFailed(re); } } else { Message value = null; @@ -105,13 +103,11 @@ public class AsyncServerResponseHandler extends ChannelInboundHandlerAdapter { } call.setSuccess(value, cellBlockScanner); } - channel.calls.remove(id); } catch (IOException e) { // Treat this as a fatal condition and close this connection channel.close(e); } finally { inBuffer.release(); - channel.cleanupCalls(false); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/IPCUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/IPCUtil.java index b7e7728fece..7c6c9ba2fdb 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/IPCUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/IPCUtil.java @@ -33,6 +33,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.codec.Codec; +import org.apache.hadoop.hbase.io.BoundedByteBufferPool; import org.apache.hadoop.hbase.io.ByteBufferOutputStream; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.util.Bytes; @@ -65,6 +66,7 @@ public class IPCUtil { this.conf = conf; this.cellBlockDecompressionMultiplier = conf.getInt("hbase.ipc.cellblock.decompression.buffersize.multiplier", 3); + // Guess that 16k is a good size for rpc buffer. Could go bigger. See the TODO below in // #buildCellBlock. this.cellBlockBuildingInitialBufferSize = @@ -90,24 +92,49 @@ public class IPCUtil { @SuppressWarnings("resource") public ByteBuffer buildCellBlock(final Codec codec, final CompressionCodec compressor, final CellScanner cellScanner) + throws IOException { + return buildCellBlock(codec, compressor, cellScanner, null); + } + + /** + * Puts CellScanner Cells into a cell block using passed in codec and/or + * compressor. + * @param codec + * @param compressor + * @param cellScanner + * @param pool Pool of ByteBuffers to make use of. Can be null and then we'll allocate + * our own ByteBuffer. + * @return Null or byte buffer filled with a cellblock filled with passed-in Cells encoded using + * passed in codec and/or compressor; the returned buffer has been + * flipped and is ready for reading. Use limit to find total size. If pool was not + * null, then this returned ByteBuffer came from there and should be returned to the pool when + * done. + * @throws IOException + */ + @SuppressWarnings("resource") + public ByteBuffer buildCellBlock(final Codec codec, final CompressionCodec compressor, + final CellScanner cellScanner, final BoundedByteBufferPool pool) throws IOException { if (cellScanner == null) return null; if (codec == null) throw new CellScannerButNoCodecException(); int bufferSize = this.cellBlockBuildingInitialBufferSize; - if (cellScanner instanceof HeapSize) { - long longSize = ((HeapSize)cellScanner).heapSize(); - // Just make sure we don't have a size bigger than an int. - if (longSize > Integer.MAX_VALUE) { - throw new IOException("Size " + longSize + " > " + Integer.MAX_VALUE); + ByteBufferOutputStream baos = null; + if (pool != null) { + ByteBuffer bb = pool.getBuffer(); + bufferSize = bb.capacity(); + baos = new ByteBufferOutputStream(bb); + } else { + // Then we need to make our own to return. + if (cellScanner instanceof HeapSize) { + long longSize = ((HeapSize)cellScanner).heapSize(); + // Just make sure we don't have a size bigger than an int. + if (longSize > Integer.MAX_VALUE) { + throw new IOException("Size " + longSize + " > " + Integer.MAX_VALUE); + } + bufferSize = ClassSize.align((int)longSize); } - bufferSize = ClassSize.align((int)longSize); - } // TODO: Else, get estimate on size of buffer rather than have the buffer resize. - // See TestIPCUtil main for experiment where we spin through the Cells getting estimate of - // total size before creating the buffer. It costs somw small percentage. If we are usually - // within the estimated buffer size, then the cost is not worth it. If we are often well - // outside the guesstimated buffer size, the processing can be done in half the time if we - // go w/ the estimated size rather than let the buffer resize. - ByteBufferOutputStream baos = new ByteBufferOutputStream(bufferSize); + baos = new ByteBufferOutputStream(bufferSize); + } OutputStream os = baos; Compressor poolCompressor = null; try { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java index dad51640cf3..9f5a9dc3447 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java @@ -263,7 +263,10 @@ public class RpcClientImpl extends AbstractRpcClient { try { Connection.this.tracedWriteRequest(cts.call, cts.priority, cts.span); } catch (IOException e) { - LOG.warn("call write error for call #" + cts.call.id + ", message =" + e.getMessage()); + if (LOG.isDebugEnabled()) { + LOG.debug("call write error for call #" + cts.call.id + + ", message =" + e.getMessage()); + } cts.call.setException(e); markClosed(e); } @@ -355,6 +358,7 @@ public class RpcClientImpl extends AbstractRpcClient { if (this.compressor != null) { builder.setCellBlockCompressorClass(this.compressor.getClass().getCanonicalName()); } + builder.setVersionInfo(ProtobufUtil.getVersionInfo()); this.header = builder.build(); this.setName("IPC Client (" + socketFactory.hashCode() +") connection to " + @@ -1133,6 +1137,7 @@ public class RpcClientImpl extends AbstractRpcClient { * @throws InterruptedException * @throws IOException */ + @Override protected Pair call(PayloadCarryingRpcController pcrc, MethodDescriptor md, Message param, Message returnType, User ticket, InetSocketAddress addr) throws IOException, InterruptedException { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java index 7bb9de1335c..3b714356da8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java @@ -40,7 +40,6 @@ import java.util.NavigableSet; import java.util.concurrent.TimeUnit; import com.google.protobuf.*; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; @@ -56,6 +55,7 @@ import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Tag; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.Consistency; import org.apache.hadoop.hbase.client.Delete; @@ -71,7 +71,6 @@ import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.filter.ByteArrayComparable; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.io.TimeRange; -import org.apache.hadoop.hbase.protobuf.ProtobufMagic; import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos; import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.AccessControlService; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService; @@ -89,6 +88,7 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsReques import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ServerInfo; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos; import org.apache.hadoop.hbase.protobuf.generated.CellProtos; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; @@ -107,6 +107,7 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.Col import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.DeleteType; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest; +import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos; import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad; import org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos; import org.apache.hadoop.hbase.protobuf.generated.FilterProtos; @@ -120,6 +121,7 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableReques import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService; import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos; +import org.apache.hadoop.hbase.protobuf.generated.RPCProtos; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest; import org.apache.hadoop.hbase.protobuf.generated.WALProtos; @@ -133,6 +135,8 @@ import org.apache.hadoop.hbase.protobuf.generated.WALProtos.StoreDescriptor; import org.apache.hadoop.hbase.quotas.QuotaScope; import org.apache.hadoop.hbase.quotas.QuotaType; import org.apache.hadoop.hbase.quotas.ThrottleType; +import org.apache.hadoop.hbase.replication.ReplicationLoadSink; +import org.apache.hadoop.hbase.replication.ReplicationLoadSource; import org.apache.hadoop.hbase.security.access.Permission; import org.apache.hadoop.hbase.security.access.TablePermission; import org.apache.hadoop.hbase.security.access.UserPermission; @@ -145,6 +149,7 @@ import org.apache.hadoop.hbase.util.DynamicClassLoader; import org.apache.hadoop.hbase.util.ExceptionUtil; import org.apache.hadoop.hbase.util.Methods; import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.hbase.util.VersionInfo; import org.apache.hadoop.io.Text; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.security.token.Token; @@ -1277,6 +1282,7 @@ public final class ProtobufUtil { } builder.setStale(result.isStale()); + builder.setPartial(result.isPartial()); return builder.build(); } @@ -1335,7 +1341,7 @@ public final class ProtobufUtil { for (CellProtos.Cell c : values) { cells.add(toCell(c)); } - return Result.create(cells, null, proto.getStale()); + return Result.create(cells, null, proto.getStale(), proto.getPartial()); } /** @@ -1718,6 +1724,26 @@ public final class ProtobufUtil { } } + /** + * A helper to warmup a region given a region name + * using admin protocol + * + * @param admin + * @param regionInfo + * + */ + public static void warmupRegion(final AdminService.BlockingInterface admin, + final HRegionInfo regionInfo) throws IOException { + + try { + WarmupRegionRequest warmupRegionRequest = + RequestConverter.buildWarmupRegionRequest(regionInfo); + + admin.warmupRegion(null, warmupRegionRequest); + } catch (ServiceException e) { + throw getRemoteException(e); + } + } /** * A helper to open a region using admin protocol. @@ -1736,6 +1762,7 @@ public final class ProtobufUtil { } } + /** * A helper to get the all the online regions on a region * server using admin protocol. @@ -2576,6 +2603,7 @@ public final class ProtobufUtil { FlushDescriptor.Builder desc = FlushDescriptor.newBuilder() .setAction(action) .setEncodedRegionName(ByteStringer.wrap(hri.getEncodedNameAsBytes())) + .setRegionName(ByteStringer.wrap(hri.getRegionName())) .setFlushSequenceNumber(flushSeqId) .setTableName(ByteStringer.wrap(hri.getTable().getName())); @@ -2601,6 +2629,7 @@ public final class ProtobufUtil { .setEventType(eventType) .setTableName(ByteStringer.wrap(hri.getTable().getName())) .setEncodedRegionName(ByteStringer.wrap(hri.getEncodedNameAsBytes())) + .setRegionName(ByteStringer.wrap(hri.getRegionName())) .setLogSequenceNumber(seqId) .setServer(toServerName(server)); @@ -2991,7 +3020,6 @@ public final class ProtobufUtil { } - /** * This version of protobuf's mergeDelimitedFrom avoid the hard-coded 64MB limit for decoding * buffers @@ -3071,4 +3099,40 @@ public final class ProtobufUtil { return result; } } + + public static ReplicationLoadSink toReplicationLoadSink( + ClusterStatusProtos.ReplicationLoadSink cls) { + return new ReplicationLoadSink(cls.getAgeOfLastAppliedOp(), cls.getTimeStampsOfLastAppliedOp()); + } + + public static ReplicationLoadSource toReplicationLoadSource( + ClusterStatusProtos.ReplicationLoadSource cls) { + return new ReplicationLoadSource(cls.getPeerID(), cls.getAgeOfLastShippedOp(), + cls.getSizeOfLogQueue(), cls.getTimeStampOfLastShippedOp(), cls.getReplicationLag()); + } + + public static List toReplicationLoadSourceList( + List clsList) { + ArrayList rlsList = new ArrayList(); + for (ClusterStatusProtos.ReplicationLoadSource cls : clsList) { + rlsList.add(toReplicationLoadSource(cls)); + } + return rlsList; + } + + /** + * Get a protocol buffer VersionInfo + * + * @return the converted protocol buffer VersionInfo + */ + public static RPCProtos.VersionInfo getVersionInfo() { + RPCProtos.VersionInfo.Builder builder = RPCProtos.VersionInfo.newBuilder(); + builder.setVersion(VersionInfo.getVersion()); + builder.setUrl(VersionInfo.getUrl()); + builder.setRevision(VersionInfo.getRevision()); + builder.setUser(VersionInfo.getUser()); + builder.setDate(VersionInfo.getDate()); + builder.setSrcChecksum(VersionInfo.getSrcChecksum()); + return builder.build(); + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java index d23aa023464..16c3dbfbc7b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java @@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.filter.ByteArrayComparable; import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetOnlineRegionRequest; @@ -91,6 +92,7 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterSta import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnRequest; @@ -486,6 +488,7 @@ public final class RequestConverter { builder.setCloseScanner(closeScanner); builder.setRegion(region); builder.setScan(ProtobufUtil.toScan(scan)); + builder.setClientHandlesPartials(true); return builder.build(); } @@ -503,6 +506,7 @@ public final class RequestConverter { builder.setNumberOfRows(numberOfRows); builder.setCloseScanner(closeScanner); builder.setScannerId(scannerId); + builder.setClientHandlesPartials(true); return builder.build(); } @@ -522,6 +526,7 @@ public final class RequestConverter { builder.setCloseScanner(closeScanner); builder.setScannerId(scannerId); builder.setNextCallSeq(nextCallSeq); + builder.setClientHandlesPartials(true); return builder.build(); } @@ -739,10 +744,22 @@ public final class RequestConverter { */ public static FlushRegionRequest buildFlushRegionRequest(final byte[] regionName) { + return buildFlushRegionRequest(regionName, false); + } + + /** + * Create a protocol buffer FlushRegionRequest for a given region name + * + * @param regionName the name of the region to get info + * @return a protocol buffer FlushRegionRequest + */ + public static FlushRegionRequest + buildFlushRegionRequest(final byte[] regionName, boolean writeFlushWALMarker) { FlushRegionRequest.Builder builder = FlushRegionRequest.newBuilder(); RegionSpecifier region = buildRegionSpecifier( RegionSpecifierType.REGION_NAME, regionName); builder.setRegion(region); + builder.setWriteFlushWalMarker(writeFlushWALMarker); return builder.build(); } @@ -834,6 +851,16 @@ public final class RequestConverter { return builder.build(); } + /** + * Create a WarmupRegionRequest for a given region name + * + * @param regionInfo Region we are warming up + */ + public static WarmupRegionRequest buildWarmupRegionRequest(final HRegionInfo regionInfo) { + WarmupRegionRequest.Builder builder = WarmupRegionRequest.newBuilder(); + builder.setRegionInfo(HRegionInfo.convert(regionInfo)); + return builder.build(); + } /** * Create a CloseRegionRequest for a given encoded region name * @@ -1313,6 +1340,15 @@ public final class RequestConverter { return SetBalancerRunningRequest.newBuilder().setOn(on).setSynchronous(synchronous).build(); } + /** + * Creates a protocol buffer IsBalancerEnabledRequest + * + * @return a IsBalancerEnabledRequest + */ + public static IsBalancerEnabledRequest buildIsBalancerEnabledRequest() { + return IsBalancerEnabledRequest.newBuilder().build(); + } + /** * @see {@link #buildGetClusterStatusRequest} */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java index 1d42a828275..65eaddec8f8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java @@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionAction; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionActionResult; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultOrException; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse; +import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse; @@ -300,8 +301,10 @@ public final class ResponseConverter { * @return A GetLastFlushedSequenceIdResponse */ public static GetLastFlushedSequenceIdResponse buildGetLastFlushedSequenceIdResponse( - long seqId) { - return GetLastFlushedSequenceIdResponse.newBuilder().setLastFlushedSequenceId(seqId).build(); + RegionStoreSequenceIds ids) { + return GetLastFlushedSequenceIdResponse.newBuilder() + .setLastFlushedSequenceId(ids.getLastFlushedSequenceId()) + .addAllStoreLastFlushedSequenceId(ids.getStoreSequenceIdList()).build(); } /** @@ -339,6 +342,9 @@ public final class ResponseConverter { // Cells are out in cellblocks. Group them up again as Results. How many to read at a // time will be found in getCellsLength -- length here is how many Cells in the i'th Result int noOfCells = response.getCellsPerResult(i); + boolean isPartial = + response.getPartialFlagPerResultCount() > i ? + response.getPartialFlagPerResult(i) : false; List cells = new ArrayList(noOfCells); for (int j = 0; j < noOfCells; j++) { try { @@ -361,7 +367,7 @@ public final class ResponseConverter { } cells.add(cellScanner.current()); } - results[i] = Result.create(cells, null, response.getStale()); + results[i] = Result.create(cells, null, response.getStale(), isPartial); } else { // Result is pure pb. results[i] = ProtobufUtil.toResult(response.getResults(i)); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/package.html b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/package.html index 292cac55ef2..d324653e61e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/package.html +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/package.html @@ -20,7 +20,7 @@ -Holds classes generated from protobuf +Holds classes generated from protobuf src/main/protobuf definition files.

See under src/main/protobuf for instruction on how to generate the content under diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/ThrottlingException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/ThrottlingException.java index dad1eddd127..293e9c6d0c8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/ThrottlingException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/ThrottlingException.java @@ -21,8 +21,6 @@ package org.apache.hadoop.hbase.quotas; import java.util.regex.Matcher; import java.util.regex.Pattern; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; @@ -38,12 +36,11 @@ import org.apache.hadoop.hbase.classification.InterfaceStability; public class ThrottlingException extends QuotaExceededException { private static final long serialVersionUID = 1406576492085155743L; - private static final Log LOG = LogFactory.getLog(ThrottlingException.class); - @InterfaceAudience.Public @InterfaceStability.Evolving public enum Type { NumRequestsExceeded, + RequestSizeExceeded, NumReadRequestsExceeded, NumWriteRequestsExceeded, WriteSizeExceeded, @@ -52,6 +49,7 @@ public class ThrottlingException extends QuotaExceededException { private static final String[] MSG_TYPE = new String[] { "number of requests exceeded", + "request size limit exceeded", "number of read requests exceeded", "number of write requests exceeded", "write size limit exceeded", @@ -98,6 +96,11 @@ public class ThrottlingException extends QuotaExceededException { throwThrottlingException(Type.NumRequestsExceeded, waitInterval); } + public static void throwRequestSizeExceeded(final long waitInterval) + throws ThrottlingException { + throwThrottlingException(Type.RequestSizeExceeded, waitInterval); + } + public static void throwNumReadRequestsExceeded(final long waitInterval) throws ThrottlingException { throwThrottlingException(Type.NumReadRequestsExceeded, waitInterval); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationLoadSink.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationLoadSink.java new file mode 100644 index 00000000000..63fe3349585 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationLoadSink.java @@ -0,0 +1,36 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license + * agreements. See the NOTICE file distributed with this work for additional information regarding + * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable + * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under the License. + */ +package org.apache.hadoop.hbase.replication; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; + +/** + * A HBase ReplicationLoad to present MetricsSink information + */ +@InterfaceAudience.Private +public class ReplicationLoadSink { + private long ageOfLastAppliedOp; + private long timeStampsOfLastAppliedOp; + + public ReplicationLoadSink(long age, long timeStamp) { + this.ageOfLastAppliedOp = age; + this.timeStampsOfLastAppliedOp = timeStamp; + } + + public long getAgeOfLastAppliedOp() { + return this.ageOfLastAppliedOp; + } + + public long getTimeStampsOfLastAppliedOp() { + return this.timeStampsOfLastAppliedOp; + } + +} \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationLoadSource.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationLoadSource.java new file mode 100644 index 00000000000..bfd15990be1 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationLoadSource.java @@ -0,0 +1,53 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license + * agreements. See the NOTICE file distributed with this work for additional information regarding + * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable + * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under the License. + */ +package org.apache.hadoop.hbase.replication; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; + +/** + * A HBase ReplicationLoad to present MetricsSource information + */ +@InterfaceAudience.Private +public class ReplicationLoadSource { + private String peerID; + private long ageOfLastShippedOp; + private int sizeOfLogQueue; + private long timeStampOfLastShippedOp; + private long replicationLag; + + public ReplicationLoadSource(String id, long age, int size, long timeStamp, long lag) { + this.peerID = id; + this.ageOfLastShippedOp = age; + this.sizeOfLogQueue = size; + this.timeStampOfLastShippedOp = timeStamp; + this.replicationLag = lag; + } + + public String getPeerID() { + return this.peerID; + } + + public long getAgeOfLastShippedOp() { + return this.ageOfLastShippedOp; + } + + public long getSizeOfLogQueue() { + return this.sizeOfLogQueue; + } + + public long getTimeStampOfLastShippedOp() { + return this.timeStampOfLastShippedOp; + } + + public long getReplicationLag() { + return this.replicationLag; + } +} \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java index f4bc3e9c681..c787efe1623 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java @@ -63,8 +63,7 @@ public class EncryptionUtil { /** * Protect a key by encrypting it with the secret key of the given subject. - * The configuration must be set up correctly for key alias resolution. Keys - * are always wrapped using AES. + * The configuration must be set up correctly for key alias resolution. * @param conf configuration * @param subject subject key alias * @param key the key @@ -72,10 +71,12 @@ public class EncryptionUtil { */ public static byte[] wrapKey(Configuration conf, String subject, Key key) throws IOException { - // Wrap the key with AES - Cipher cipher = Encryption.getCipher(conf, "AES"); + // Wrap the key with the configured encryption algorithm. + String algorithm = + conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); + Cipher cipher = Encryption.getCipher(conf, algorithm); if (cipher == null) { - throw new RuntimeException("Cipher 'AES' not available"); + throw new RuntimeException("Cipher '" + algorithm + "' not available"); } EncryptionProtos.WrappedKey.Builder builder = EncryptionProtos.WrappedKey.newBuilder(); builder.setAlgorithm(key.getAlgorithm()); @@ -100,8 +101,7 @@ public class EncryptionUtil { /** * Unwrap a key by decrypting it with the secret key of the given subject. - * The configuration must be set up correctly for key alias resolution. Keys - * are always unwrapped using AES. + * The configuration must be set up correctly for key alias resolution. * @param conf configuration * @param subject subject key alias * @param value the encrypted key bytes @@ -113,10 +113,17 @@ public class EncryptionUtil { throws IOException, KeyException { EncryptionProtos.WrappedKey wrappedKey = EncryptionProtos.WrappedKey.PARSER .parseDelimitedFrom(new ByteArrayInputStream(value)); - Cipher cipher = Encryption.getCipher(conf, "AES"); + String algorithm = conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, + HConstants.CIPHER_AES); + Cipher cipher = Encryption.getCipher(conf, algorithm); if (cipher == null) { - throw new RuntimeException("Algorithm 'AES' not available"); + throw new RuntimeException("Cipher '" + algorithm + "' not available"); } + return getUnwrapKey(conf, subject, wrappedKey, cipher); + } + + private static Key getUnwrapKey(Configuration conf, String subject, + EncryptionProtos.WrappedKey wrappedKey, Cipher cipher) throws IOException, KeyException { ByteArrayOutputStream out = new ByteArrayOutputStream(); byte[] iv = wrappedKey.hasIv() ? wrappedKey.getIv().toByteArray() : null; Encryption.decryptWithSubjectKey(out, wrappedKey.getData().newInput(), @@ -130,4 +137,26 @@ public class EncryptionUtil { return new SecretKeySpec(keyBytes, wrappedKey.getAlgorithm()); } + /** + * Unwrap a wal key by decrypting it with the secret key of the given subject. The configuration + * must be set up correctly for key alias resolution. + * @param conf configuration + * @param subject subject key alias + * @param value the encrypted key bytes + * @return the raw key bytes + * @throws IOException if key is not found for the subject, or if some I/O error occurs + * @throws KeyException if fail to unwrap the key + */ + public static Key unwrapWALKey(Configuration conf, String subject, byte[] value) + throws IOException, KeyException { + EncryptionProtos.WrappedKey wrappedKey = + EncryptionProtos.WrappedKey.PARSER.parseDelimitedFrom(new ByteArrayInputStream(value)); + String algorithm = conf.get(HConstants.CRYPTO_WAL_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); + Cipher cipher = Encryption.getCipher(conf, algorithm); + if (cipher == null) { + throw new RuntimeException("Cipher '" + algorithm + "' not available"); + } + return getUnwrapKey(conf, subject, wrappedKey, cipher); + } + } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslClientHandler.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslClientHandler.java index 50445c144c7..1be59bc7a62 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslClientHandler.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslClientHandler.java @@ -24,10 +24,12 @@ import io.netty.channel.ChannelFuture; import io.netty.channel.ChannelFutureListener; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.ChannelPromise; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; @@ -35,8 +37,10 @@ import javax.security.auth.callback.CallbackHandler; import javax.security.sasl.Sasl; import javax.security.sasl.SaslClient; import javax.security.sasl.SaslException; + import java.io.IOException; import java.nio.charset.Charset; +import java.security.PrivilegedExceptionAction; import java.util.Random; /** @@ -48,6 +52,8 @@ public class SaslClientHandler extends ChannelDuplexHandler { private final boolean fallbackAllowed; + private final UserGroupInformation ticket; + /** * Used for client or server's token to send or receive from each other. */ @@ -63,6 +69,7 @@ public class SaslClientHandler extends ChannelDuplexHandler { /** * Constructor * + * @param ticket the ugi * @param method auth method * @param token for Sasl * @param serverPrincipal Server's Kerberos principal name @@ -72,10 +79,11 @@ public class SaslClientHandler extends ChannelDuplexHandler { * @param successfulConnectHandler handler for succesful connects * @throws java.io.IOException if handler could not be created */ - public SaslClientHandler(AuthMethod method, Token token, - String serverPrincipal, boolean fallbackAllowed, String rpcProtection, - SaslExceptionHandler exceptionHandler, SaslSuccessfulConnectHandler successfulConnectHandler) - throws IOException { + public SaslClientHandler(UserGroupInformation ticket, AuthMethod method, + Token token, String serverPrincipal, boolean fallbackAllowed, + String rpcProtection, SaslExceptionHandler exceptionHandler, + SaslSuccessfulConnectHandler successfulConnectHandler) throws IOException { + this.ticket = ticket; this.fallbackAllowed = fallbackAllowed; this.exceptionHandler = exceptionHandler; @@ -109,8 +117,9 @@ public class SaslClientHandler extends ChannelDuplexHandler { default: throw new IOException("Unknown authentication method " + method); } - if (saslClient == null) + if (saslClient == null) { throw new IOException("Unable to find SASL client implementation"); + } } /** @@ -144,14 +153,26 @@ public class SaslClientHandler extends ChannelDuplexHandler { null); } - @Override public void channelUnregistered(ChannelHandlerContext ctx) throws Exception { + @Override + public void channelUnregistered(ChannelHandlerContext ctx) throws Exception { saslClient.dispose(); } - @Override public void handlerAdded(ChannelHandlerContext ctx) throws Exception { - this.saslToken = new byte[0]; + private byte[] evaluateChallenge(final byte[] challenge) throws Exception { + return ticket.doAs(new PrivilegedExceptionAction() { + + @Override + public byte[] run() throws Exception { + return saslClient.evaluateChallenge(challenge); + } + }); + } + + @Override + public void handlerAdded(final ChannelHandlerContext ctx) throws Exception { + saslToken = new byte[0]; if (saslClient.hasInitialResponse()) { - saslToken = saslClient.evaluateChallenge(saslToken); + saslToken = evaluateChallenge(saslToken); } if (saslToken != null) { writeSaslToken(ctx, saslToken); @@ -161,7 +182,8 @@ public class SaslClientHandler extends ChannelDuplexHandler { } } - @Override public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { + @Override + public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { ByteBuf in = (ByteBuf) msg; // If not complete, try to negotiate @@ -187,15 +209,17 @@ public class SaslClientHandler extends ChannelDuplexHandler { } } saslToken = new byte[len]; - if (LOG.isDebugEnabled()) + if (LOG.isDebugEnabled()) { LOG.debug("Will read input token of size " + saslToken.length + " for processing by initSASLContext"); + } in.readBytes(saslToken); - saslToken = saslClient.evaluateChallenge(saslToken); + saslToken = evaluateChallenge(saslToken); if (saslToken != null) { - if (LOG.isDebugEnabled()) + if (LOG.isDebugEnabled()) { LOG.debug("Will send token of size " + saslToken.length + " from initSASLContext."); + } writeSaslToken(ctx, saslToken); } } @@ -246,8 +270,7 @@ public class SaslClientHandler extends ChannelDuplexHandler { /** * Write SASL token - * - * @param ctx to write to + * @param ctx to write to * @param saslToken to write */ private void writeSaslToken(final ChannelHandlerContext ctx, byte[] saslToken) { @@ -255,7 +278,8 @@ public class SaslClientHandler extends ChannelDuplexHandler { b.writeInt(saslToken.length); b.writeBytes(saslToken, 0, saslToken.length); ctx.writeAndFlush(b).addListener(new ChannelFutureListener() { - @Override public void operationComplete(ChannelFuture future) throws Exception { + @Override + public void operationComplete(ChannelFuture future) throws Exception { if (!future.isSuccess()) { exceptionCaught(ctx, future.cause()); } @@ -289,7 +313,8 @@ public class SaslClientHandler extends ChannelDuplexHandler { exceptionHandler.handle(this.retryCount++, this.random, cause); } - @Override public void write(final ChannelHandlerContext ctx, Object msg, ChannelPromise promise) + @Override + public void write(final ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception { // If not complete, try to negotiate if (!saslClient.isComplete()) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java index d0eb40d76a1..9a0d1044f7d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java @@ -22,7 +22,6 @@ import java.util.ArrayList; import java.util.List; import java.util.regex.Pattern; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.MasterNotRunningException; @@ -33,7 +32,6 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; -import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; @@ -60,7 +58,7 @@ public class AccessControlClient { /** * Grants permission on the specified table for the specified user - * @param conf + * @param connection The Connection instance to use * @param tableName * @param userName * @param family @@ -68,66 +66,51 @@ public class AccessControlClient { * @param actions * @throws Throwable */ - public static void grant(Configuration conf, final TableName tableName, + public static void grant(Connection connection, final TableName tableName, final String userName, final byte[] family, final byte[] qual, final Permission.Action... actions) throws Throwable { - // TODO: Make it so caller passes in a Connection rather than have us do this expensive - // setup each time. This class only used in test and shell at moment though. - try (Connection connection = ConnectionFactory.createConnection(conf)) { - try (Table table = connection.getTable(ACL_TABLE_NAME)) { - ProtobufUtil.grant(getAccessControlServiceStub(table), userName, tableName, family, qual, + try (Table table = connection.getTable(ACL_TABLE_NAME)) { + ProtobufUtil.grant(getAccessControlServiceStub(table), userName, tableName, family, qual, actions); - } } } /** * Grants permission on the specified namespace for the specified user. - * @param conf + * @param connection The Connection instance to use * @param namespace * @param userName * @param actions * @throws Throwable */ - public static void grant(Configuration conf, final String namespace, + public static void grant(Connection connection, final String namespace, final String userName, final Permission.Action... actions) throws Throwable { - // TODO: Make it so caller passes in a Connection rather than have us do this expensive - // setup each time. This class only used in test and shell at moment though. - try (Connection connection = ConnectionFactory.createConnection(conf)) { - try (Table table = connection.getTable(ACL_TABLE_NAME)) { - ProtobufUtil.grant(getAccessControlServiceStub(table), userName, namespace, actions); - } + try (Table table = connection.getTable(ACL_TABLE_NAME)) { + ProtobufUtil.grant(getAccessControlServiceStub(table), userName, namespace, actions); } } /** + * @param connection The Connection instance to use * Grant global permissions for the specified user. */ - public static void grant(Configuration conf, final String userName, + public static void grant(Connection connection, final String userName, final Permission.Action... actions) throws Throwable { - // TODO: Make it so caller passes in a Connection rather than have us do this expensive - // setup each time. This class only used in test and shell at moment though. - try (Connection connection = ConnectionFactory.createConnection(conf)) { - try (Table table = connection.getTable(ACL_TABLE_NAME)) { - ProtobufUtil.grant(getAccessControlServiceStub(table), userName, actions); - } + try (Table table = connection.getTable(ACL_TABLE_NAME)) { + ProtobufUtil.grant(getAccessControlServiceStub(table), userName, actions); } } - public static boolean isAccessControllerRunning(Configuration conf) + public static boolean isAccessControllerRunning(Connection connection) throws MasterNotRunningException, ZooKeeperConnectionException, IOException { - // TODO: Make it so caller passes in a Connection rather than have us do this expensive - // setup each time. This class only used in test and shell at moment though. - try (Connection connection = ConnectionFactory.createConnection(conf)) { - try (Admin admin = connection.getAdmin()) { - return admin.isTableAvailable(ACL_TABLE_NAME); - } + try (Admin admin = connection.getAdmin()) { + return admin.isTableAvailable(ACL_TABLE_NAME); } } /** * Revokes the permission on the table - * @param conf + * @param connection The Connection instance to use * @param tableName * @param username * @param family @@ -135,81 +118,67 @@ public class AccessControlClient { * @param actions * @throws Throwable */ - public static void revoke(Configuration conf, final TableName tableName, + public static void revoke(Connection connection, final TableName tableName, final String username, final byte[] family, final byte[] qualifier, final Permission.Action... actions) throws Throwable { - // TODO: Make it so caller passes in a Connection rather than have us do this expensive - // setup each time. This class only used in test and shell at moment though. - try (Connection connection = ConnectionFactory.createConnection(conf)) { - try (Table table = connection.getTable(ACL_TABLE_NAME)) { - ProtobufUtil.revoke(getAccessControlServiceStub(table), username, tableName, family, + try (Table table = connection.getTable(ACL_TABLE_NAME)) { + ProtobufUtil.revoke(getAccessControlServiceStub(table), username, tableName, family, qualifier, actions); - } } } /** * Revokes the permission on the table for the specified user. - * @param conf + * @param connection The Connection instance to use * @param namespace * @param userName * @param actions * @throws Throwable */ - public static void revoke(Configuration conf, final String namespace, - final String userName, final Permission.Action... actions) throws Throwable { - // TODO: Make it so caller passes in a Connection rather than have us do this expensive - // setup each time. This class only used in test and shell at moment though. - try (Connection connection = ConnectionFactory.createConnection(conf)) { - try (Table table = connection.getTable(ACL_TABLE_NAME)) { - ProtobufUtil.revoke(getAccessControlServiceStub(table), userName, namespace, actions); - } + public static void revoke(Connection connection, final String namespace, + final String userName, final Permission.Action... actions) throws Throwable { + try (Table table = connection.getTable(ACL_TABLE_NAME)) { + ProtobufUtil.revoke(getAccessControlServiceStub(table), userName, namespace, actions); } } /** * Revoke global permissions for the specified user. + * @param connection The Connection instance to use */ - public static void revoke(Configuration conf, final String userName, + public static void revoke(Connection connection, final String userName, final Permission.Action... actions) throws Throwable { - // TODO: Make it so caller passes in a Connection rather than have us do this expensive - // setup each time. This class only used in test and shell at moment though. - try (Connection connection = ConnectionFactory.createConnection(conf)) { - try (Table table = connection.getTable(ACL_TABLE_NAME)) { - ProtobufUtil.revoke(getAccessControlServiceStub(table), userName, actions); - } + try (Table table = connection.getTable(ACL_TABLE_NAME)) { + ProtobufUtil.revoke(getAccessControlServiceStub(table), userName, actions); } + } /** * List all the userPermissions matching the given pattern. - * @param conf + * @param connection The Connection instance to use * @param tableRegex The regular expression string to match against * @return - returns an array of UserPermissions * @throws Throwable */ - public static List getUserPermissions(Configuration conf, String tableRegex) + public static List getUserPermissions(Connection connection, String tableRegex) throws Throwable { List permList = new ArrayList(); - // TODO: Make it so caller passes in a Connection rather than have us do this expensive - // setup each time. This class only used in test and shell at moment though. - try (Connection connection = ConnectionFactory.createConnection(conf)) { - try (Table table = connection.getTable(ACL_TABLE_NAME)) { - try (Admin admin = connection.getAdmin()) { - CoprocessorRpcChannel service = table.coprocessorService(HConstants.EMPTY_START_ROW); - BlockingInterface protocol = + try (Table table = connection.getTable(ACL_TABLE_NAME)) { + try (Admin admin = connection.getAdmin()) { + CoprocessorRpcChannel service = table.coprocessorService(HConstants.EMPTY_START_ROW); + BlockingInterface protocol = AccessControlProtos.AccessControlService.newBlockingStub(service); - HTableDescriptor[] htds = null; - if (tableRegex == null || tableRegex.isEmpty()) { - permList = ProtobufUtil.getUserPermissions(protocol); - } else if (tableRegex.charAt(0) == '@') { - String namespace = tableRegex.substring(1); - permList = ProtobufUtil.getUserPermissions(protocol, Bytes.toBytes(namespace)); - } else { - htds = admin.listTables(Pattern.compile(tableRegex), true); - for (HTableDescriptor hd : htds) { - permList.addAll(ProtobufUtil.getUserPermissions(protocol, hd.getTableName())); - } + HTableDescriptor[] htds = null; + if (tableRegex == null || tableRegex.isEmpty()) { + permList = ProtobufUtil.getUserPermissions(protocol); + } else if (tableRegex.charAt(0) == '@') { + String namespace = tableRegex.substring(1); + permList = ProtobufUtil.getUserPermissions(protocol, Bytes.toBytes(namespace)); + } else { + htds = admin.listTables(Pattern.compile(tableRegex), true); + for (HTableDescriptor hd : htds) { + permList.addAll(ProtobufUtil.getUserPermissions(protocol, hd.getTableName())); } } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotExistsException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotExistsException.java index 2c609d94801..172c89e70fb 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotExistsException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotExistsException.java @@ -28,6 +28,9 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptio @InterfaceAudience.Public @InterfaceStability.Evolving public class SnapshotExistsException extends HBaseSnapshotException { + public SnapshotExistsException(String msg) { + super(msg); + } /** * Failure due to the snapshot already existing diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java index 3dc9aa63f14..7cf4b8d8a71 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java @@ -33,6 +33,11 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; /** * Utility methods for reading, and building the ZooKeeper configuration. + * + * The order and priority for reading the config are as follows: + * (1). zoo.cfg if ""hbase.config.read.zookeeper.config" is true + * (2). Property with "hbase.zookeeper.property." prefix from HBase XML + * (3). other zookeeper related properties in HBASE XML */ @InterfaceAudience.Private public class ZKConfig { @@ -51,6 +56,24 @@ public class ZKConfig { * @return Properties holding mappings representing ZooKeeper config file. */ public static Properties makeZKProps(Configuration conf) { + Properties zkProperties = makeZKPropsFromZooCfg(conf); + + if (zkProperties == null) { + // Otherwise, use the configuration options from HBase's XML files. + zkProperties = makeZKPropsFromHbaseConfig(conf); + } + return zkProperties; + } + + /** + * Parses the corresponding config options from the zoo.cfg file + * and make a Properties object holding the Zookeeper config. + * + * @param conf Configuration to read from. + * @return Properties holding mappings representing the ZooKeeper config file or null if + * the HBASE_CONFIG_READ_ZOOKEEPER_CONFIG is false or the file does not exist. + */ + private static Properties makeZKPropsFromZooCfg(Configuration conf) { if (conf.getBoolean(HConstants.HBASE_CONFIG_READ_ZOOKEEPER_CONFIG, false)) { LOG.warn( "Parsing ZooKeeper's " + HConstants.ZOOKEEPER_CONFIG_NAME + @@ -80,7 +103,18 @@ public class ZKConfig { } } - // Otherwise, use the configuration options from HBase's XML files. + return null; + } + + /** + * Make a Properties object holding ZooKeeper config. + * Parses the corresponding config options from the HBase XML configs + * and generates the appropriate ZooKeeper properties. + * + * @param conf Configuration to read from. + * @return Properties holding mappings representing ZooKeeper config file. + */ + private static Properties makeZKPropsFromHbaseConfig(Configuration conf) { Properties zkProperties = new Properties(); // Directly map all of the hbase.zookeeper.property.KEY properties. @@ -112,10 +146,17 @@ public class ZKConfig { final String[] serverHosts = conf.getStrings(HConstants.ZOOKEEPER_QUORUM, HConstants.LOCALHOST); + String serverHost; + String address; + String key; for (int i = 0; i < serverHosts.length; ++i) { - String serverHost = serverHosts[i]; - String address = serverHost + ":" + peerPort + ":" + leaderPort; - String key = "server." + i; + if (serverHosts[i].contains(":")) { + serverHost = serverHosts[i].substring(0, serverHosts[i].indexOf(':')); + } else { + serverHost = serverHosts[i]; + } + address = serverHost + ":" + peerPort + ":" + leaderPort; + key = "server." + i; zkProperties.put(key, address); } @@ -177,7 +218,8 @@ public class ZKConfig { } // Special case for 'hbase.cluster.distributed' property being 'true' if (key.startsWith("server.")) { - boolean mode = conf.getBoolean(HConstants.CLUSTER_DISTRIBUTED, HConstants.DEFAULT_CLUSTER_DISTRIBUTED); + boolean mode = + conf.getBoolean(HConstants.CLUSTER_DISTRIBUTED, HConstants.DEFAULT_CLUSTER_DISTRIBUTED); if (mode == HConstants.CLUSTER_IS_DISTRIBUTED && value.startsWith(HConstants.LOCALHOST)) { String msg = "The server in zoo.cfg cannot be set to localhost " + "in a fully-distributed setup because it won't be reachable. " + @@ -198,7 +240,7 @@ public class ZKConfig { * @param properties * @return Quorum servers String */ - public static String getZKQuorumServersString(Properties properties) { + private static String getZKQuorumServersString(Properties properties) { String clientPort = null; List servers = new ArrayList(); @@ -250,12 +292,59 @@ public class ZKConfig { return hostPortBuilder.toString(); } + /** + * Return the ZK Quorum servers string given the specified configuration + * + * @param conf + * @return Quorum servers String + */ + private static String getZKQuorumServersStringFromHbaseConfig(Configuration conf) { + String defaultClientPort = Integer.toString( + conf.getInt(HConstants.ZOOKEEPER_CLIENT_PORT, HConstants.DEFAULT_ZOOKEPER_CLIENT_PORT)); + + // Build the ZK quorum server string with "server:clientport" list, separated by ',' + final String[] serverHosts = + conf.getStrings(HConstants.ZOOKEEPER_QUORUM, HConstants.LOCALHOST); + return buildQuorumServerString(serverHosts, defaultClientPort); + } + + /** + * Build the ZK quorum server string with "server:clientport" list, separated by ',' + * + * @param serverHosts a list of servers for ZK quorum + * @param clientPort the default client port + * @return the string for a list of "server:port" separated by "," + */ + public static String buildQuorumServerString(String[] serverHosts, String clientPort) { + StringBuilder quorumStringBuilder = new StringBuilder(); + String serverHost; + for (int i = 0; i < serverHosts.length; ++i) { + if (serverHosts[i].contains(":")) { + serverHost = serverHosts[i]; // just use the port specified from the input + } else { + serverHost = serverHosts[i] + ":" + clientPort; + } + if (i > 0) { + quorumStringBuilder.append(','); + } + quorumStringBuilder.append(serverHost); + } + return quorumStringBuilder.toString(); + } + /** * Return the ZK Quorum servers string given the specified configuration. * @param conf * @return Quorum servers */ public static String getZKQuorumServersString(Configuration conf) { - return getZKQuorumServersString(makeZKProps(conf)); + // First try zoo.cfg; if not applicable, then try config XML. + Properties zkProperties = makeZKPropsFromZooCfg(conf); + + if (zkProperties != null) { + return getZKQuorumServersString(zkProperties); + } + + return getZKQuorumServersStringFromHbaseConfig(conf); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java index 84e7f577aa4..413bc982d08 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java @@ -31,7 +31,6 @@ import java.util.HashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; -import java.util.Properties; import javax.security.auth.login.AppConfigurationEntry; import javax.security.auth.login.AppConfigurationEntry.LoginModuleControlFlag; @@ -46,8 +45,9 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos; +import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds; import org.apache.hadoop.hbase.util.ByteStringer; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Threads; @@ -74,6 +74,7 @@ import org.apache.zookeeper.proto.DeleteRequest; import org.apache.zookeeper.proto.SetDataRequest; import org.apache.zookeeper.server.ZooKeeperSaslServer; +import com.google.common.annotations.VisibleForTesting; import com.google.protobuf.InvalidProtocolBufferException; /** @@ -93,6 +94,25 @@ public class ZKUtil { public static final char ZNODE_PATH_SEPARATOR = '/'; private static int zkDumpConnectionTimeOut; + // The Quorum for the ZK cluster can have one the following format (see examples below): + // (1). s1,s2,s3 (no client port in the list, the client port could be obtained from clientPort) + // (2). s1:p1,s2:p2,s3:p3 (with client port, which could be same or different for each server, + // in this case, the clientPort would be ignored) + // (3). s1:p1,s2,s3:p3 (mix of (1) and (2) - if port is not specified in a server, it would use + // the clientPort; otherwise, it would use the specified port) + @VisibleForTesting + public static class ZKClusterKey { + public String quorumString; + public int clientPort; + public String znodeParent; + + ZKClusterKey(String quorumString, int clientPort, String znodeParent) { + this.quorumString = quorumString; + this.clientPort = clientPort; + this.znodeParent = znodeParent; + } + } + /** * Creates a new connection to ZooKeeper, pulling settings and ensemble config * from the specified configuration object using methods from {@link ZKConfig}. @@ -106,8 +126,7 @@ public class ZKUtil { */ public static RecoverableZooKeeper connect(Configuration conf, Watcher watcher) throws IOException { - Properties properties = ZKConfig.makeZKProps(conf); - String ensemble = ZKConfig.getZKQuorumServersString(properties); + String ensemble = ZKConfig.getZKQuorumServersString(conf); return connect(conf, ensemble, watcher); } @@ -381,10 +400,10 @@ public class ZKUtil { */ public static void applyClusterKeyToConf(Configuration conf, String key) throws IOException{ - String[] parts = transformClusterKey(key); - conf.set(HConstants.ZOOKEEPER_QUORUM, parts[0]); - conf.set(HConstants.ZOOKEEPER_CLIENT_PORT, parts[1]); - conf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, parts[2]); + ZKClusterKey zkClusterKey = transformClusterKey(key); + conf.set(HConstants.ZOOKEEPER_QUORUM, zkClusterKey.quorumString); + conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, zkClusterKey.clientPort); + conf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, zkClusterKey.znodeParent); } /** @@ -395,14 +414,53 @@ public class ZKUtil { * @return the three configuration in the described order * @throws IOException */ - public static String[] transformClusterKey(String key) throws IOException { + public static ZKClusterKey transformClusterKey(String key) throws IOException { String[] parts = key.split(":"); - if (parts.length != 3) { - throw new IOException("Cluster key passed " + key + " is invalid, the format should be:" + - HConstants.ZOOKEEPER_QUORUM + ":hbase.zookeeper.client.port:" - + HConstants.ZOOKEEPER_ZNODE_PARENT); + + if (parts.length == 3) { + return new ZKClusterKey(parts [0], Integer.parseInt(parts [1]), parts [2]); } - return parts; + + if (parts.length > 3) { + // The quorum could contain client port in server:clientport format, try to transform more. + String zNodeParent = parts [parts.length - 1]; + String clientPort = parts [parts.length - 2]; + + // The first part length is the total length minus the lengths of other parts and minus 2 ":" + int endQuorumIndex = key.length() - zNodeParent.length() - clientPort.length() - 2; + String quorumStringInput = key.substring(0, endQuorumIndex); + String[] serverHosts = quorumStringInput.split(","); + + // The common case is that every server has its own client port specified - this means + // that (total parts - the ZNodeParent part - the ClientPort part) is equal to + // (the number of "," + 1) - "+ 1" because the last server has no ",". + if ((parts.length - 2) == (serverHosts.length + 1)) { + return new ZKClusterKey(quorumStringInput, Integer.parseInt(clientPort), zNodeParent); + } + + // For the uncommon case that some servers has no port specified, we need to build the + // server:clientport list using default client port for servers without specified port. + return new ZKClusterKey( + ZKConfig.buildQuorumServerString(serverHosts, clientPort), + Integer.parseInt(clientPort), + zNodeParent); + } + + throw new IOException("Cluster key passed " + key + " is invalid, the format should be:" + + HConstants.ZOOKEEPER_QUORUM + ":" + HConstants.ZOOKEEPER_CLIENT_PORT + ":" + + HConstants.ZOOKEEPER_ZNODE_PARENT); + } + + /** + * Standardize the ZK quorum string: make it a "server:clientport" list, separated by ',' + * @param quorumStringInput a string contains a list of servers for ZK quorum + * @param clientPort the default client port + * @return the string for a list of "server:port" separated by "," + */ + @VisibleForTesting + public static String standardizeQuorumServerString(String quorumStringInput, String clientPort) { + String[] serverHosts = quorumStringInput.split(","); + return ZKConfig.buildQuorumServerString(serverHosts, clientPort); } // @@ -939,7 +997,8 @@ public class ZKUtil { // Detection for embedded HBase client with jaas configuration // defined for third party programs. try { - javax.security.auth.login.Configuration testConfig = javax.security.auth.login.Configuration.getConfiguration(); + javax.security.auth.login.Configuration testConfig = + javax.security.auth.login.Configuration.getConfiguration(); if(testConfig.getAppConfigurationEntry("Client") == null) { return false; } @@ -1207,7 +1266,6 @@ public class ZKUtil { } catch (InterruptedException ie) { zkw.interruptedException(ie); } - } catch(InterruptedException ie) { zkw.interruptedException(ie); } @@ -1334,8 +1392,8 @@ public class ZKUtil { deleteNodeRecursively(zkw, joinZNode(node, child)); } } - //Zookeeper Watches are one time triggers; When children of parent nodes are deleted recursively. - //Must set another watch, get notified of delete node + //Zookeeper Watches are one time triggers; When children of parent nodes are deleted + //recursively, must set another watch, get notified of delete node if (zkw.getRecoverableZooKeeper().exists(node, zkw) != null){ zkw.getRecoverableZooKeeper().delete(node, -1); } @@ -1986,7 +2044,8 @@ public class ZKUtil { * @see #logZKTree(ZooKeeperWatcher, String) * @throws KeeperException if an unexpected exception occurs */ - protected static void logZKTree(ZooKeeperWatcher zkw, String root, String prefix) throws KeeperException { + protected static void logZKTree(ZooKeeperWatcher zkw, String root, String prefix) + throws KeeperException { List children = ZKUtil.listChildrenNoWatch(zkw, root); if (children == null) return; for (String child : children) { @@ -2044,10 +2103,10 @@ public class ZKUtil { */ public static byte[] regionSequenceIdsToByteArray(final Long regionLastFlushedSequenceId, final Map storeSequenceIds) { - ZooKeeperProtos.RegionStoreSequenceIds.Builder regionSequenceIdsBuilder = - ZooKeeperProtos.RegionStoreSequenceIds.newBuilder(); - ZooKeeperProtos.StoreSequenceId.Builder storeSequenceIdBuilder = - ZooKeeperProtos.StoreSequenceId.newBuilder(); + ClusterStatusProtos.RegionStoreSequenceIds.Builder regionSequenceIdsBuilder = + ClusterStatusProtos.RegionStoreSequenceIds.newBuilder(); + ClusterStatusProtos.StoreSequenceId.Builder storeSequenceIdBuilder = + ClusterStatusProtos.StoreSequenceId.newBuilder(); if (storeSequenceIds != null) { for (Map.Entry e : storeSequenceIds.entrySet()){ byte[] columnFamilyName = e.getKey(); @@ -2074,7 +2133,7 @@ public class ZKUtil { throw new DeserializationException("Unable to parse RegionStoreSequenceIds."); } RegionStoreSequenceIds.Builder regionSequenceIdsBuilder = - ZooKeeperProtos.RegionStoreSequenceIds.newBuilder(); + ClusterStatusProtos.RegionStoreSequenceIds.newBuilder(); int pblen = ProtobufUtil.lengthOfPBMagic(); RegionStoreSequenceIds storeIds = null; try { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZkAclReset.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZkAclReset.java new file mode 100644 index 00000000000..f5a41df9f68 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZkAclReset.java @@ -0,0 +1,110 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.zookeeper; + +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.util.Tool; +import org.apache.hadoop.util.ToolRunner; +import org.apache.zookeeper.ZooDefs; +import org.apache.zookeeper.ZooKeeper; +import org.apache.zookeeper.Watcher; +import org.apache.zookeeper.WatchedEvent; + +/** + * You may add the jaas.conf option + * -Djava.security.auth.login.config=/PATH/jaas.conf + * + * You may also specify -D to set options + * "hbase.zookeeper.quorum" (it should be in hbase-site.xml) + * "zookeeper.znode.parent" (it should be in hbase-site.xml) + */ +@InterfaceAudience.Private +public class ZkAclReset extends Configured implements Tool { + private static final Log LOG = LogFactory.getLog(ZkAclReset.class); + + private static final int ZK_SESSION_TIMEOUT_DEFAULT = 5 * 1000; + + private static class ZkWatcher implements Watcher { + public ZkWatcher() { + } + + @Override + public void process(WatchedEvent event) { + LOG.info("Received ZooKeeper Event, " + + "type=" + event.getType() + ", " + + "state=" + event.getState() + ", " + + "path=" + event.getPath()); + } + } + + private static void resetAcls(final ZooKeeper zk, final String znode) + throws Exception { + List children = zk.getChildren(znode, false); + if (children != null) { + for (String child: children) { + resetAcls(zk, znode + '/' + child); + } + } + LOG.info(" - reset acl for " + znode); + zk.setACL(znode, ZooDefs.Ids.OPEN_ACL_UNSAFE, -1); + } + + private static void resetAcls(final String quorumServers, final int zkTimeout, final String znode) + throws Exception { + ZooKeeper zk = new ZooKeeper(quorumServers, zkTimeout, new ZkWatcher()); + try { + resetAcls(zk, znode); + } finally { + zk.close(); + } + } + + private void resetHBaseAcls(final Configuration conf) throws Exception { + String quorumServers = conf.get("hbase.zookeeper.quorum", HConstants.LOCALHOST); + int sessionTimeout = conf.getInt("zookeeper.session.timeout", ZK_SESSION_TIMEOUT_DEFAULT); + String znode = conf.get("zookeeper.znode.parent", HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); + if (quorumServers == null) { + LOG.error("Unable to load hbase.zookeeper.quorum (try with: -conf hbase-site.xml)"); + return; + } + + LOG.info("Reset HBase ACLs for " + quorumServers + " " + znode); + resetAcls(quorumServers, sessionTimeout, znode); + } + + + @Override + public int run(String[] args) throws Exception { + Configuration conf = getConf(); + resetHBaseAcls(conf); + return(0); + } + + public static void main(String[] args) throws Exception { + System.exit(ToolRunner.run(new Configuration(), new ZkAclReset(), args)); + } +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java index 575dcdfde1d..b428d98d358 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java @@ -370,6 +370,13 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable { return quorum; } + /** + * @return the base znode of this zookeeper connection instance. + */ + public String getBaseZNode() { + return baseZNode; + } + /** * Method called from ZooKeeper for events and connection status. *

diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/TestRegionLocations.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/TestRegionLocations.java index 7331b4d676c..dddfb82f244 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/TestRegionLocations.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/TestRegionLocations.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; @@ -41,6 +42,9 @@ public class TestRegionLocations { HRegionInfo info2 = hri(2); HRegionInfo info9 = hri(9); + long regionId1 = 1000; + long regionId2 = 2000; + @Test public void testSizeMethods() { RegionLocations list = new RegionLocations(); @@ -72,10 +76,13 @@ public class TestRegionLocations { } private HRegionInfo hri(int replicaId) { + return hri(regionId1, replicaId); + } + + private HRegionInfo hri(long regionId, int replicaId) { TableName table = TableName.valueOf("table"); byte[] startKey = HConstants.EMPTY_START_ROW; byte[] endKey = HConstants.EMPTY_END_ROW; - long regionId = System.currentTimeMillis(); HRegionInfo info = new HRegionInfo(table, startKey, endKey, false, regionId, replicaId); return info; } @@ -276,6 +283,54 @@ public class TestRegionLocations { assertEquals(sn3, list1.getRegionLocation(9).getServerName()); } + @Test + public void testMergeLocationsWithDifferentRegionId() { + RegionLocations list1, list2; + + // test merging two lists. But the list2 contains region replicas with a different region id + HRegionInfo info0 = hri(regionId1, 0); + HRegionInfo info1 = hri(regionId1, 1); + HRegionInfo info2 = hri(regionId2, 2); + + list1 = hrll(hrl(info2, sn1)); + list2 = hrll(hrl(info0, sn2), hrl(info1, sn2)); + list1 = list2.mergeLocations(list1); + assertNull(list1.getRegionLocation(0)); + assertNull(list1.getRegionLocation(1)); + assertNotNull(list1.getRegionLocation(2)); + assertEquals(sn1, list1.getRegionLocation(2).getServerName()); + assertEquals(3, list1.size()); + + // try the other way merge + list1 = hrll(hrl(info2, sn1)); + list2 = hrll(hrl(info0, sn2), hrl(info1, sn2)); + list2 = list1.mergeLocations(list2); + assertNotNull(list2.getRegionLocation(0)); + assertNotNull(list2.getRegionLocation(1)); + assertNull(list2.getRegionLocation(2)); + } + + @Test + public void testUpdateLocationWithDifferentRegionId() { + RegionLocations list; + + HRegionInfo info0 = hri(regionId1, 0); + HRegionInfo info1 = hri(regionId2, 1); + HRegionInfo info2 = hri(regionId1, 2); + + list = new RegionLocations(hrl(info0, sn1), hrl(info2, sn1)); + + list = list.updateLocation(hrl(info1, sn2), false, true); // force update + + // the other locations should be removed now + assertNull(list.getRegionLocation(0)); + assertNotNull(list.getRegionLocation(1)); + assertNull(list.getRegionLocation(2)); + assertEquals(sn2, list.getRegionLocation(1).getServerName()); + assertEquals(3, list.size()); + } + + @Test public void testConstructWithNullElements() { // RegionLocations can contain null elements as well. These null elements can diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java index aa41939ce3c..52d66d9b405 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java @@ -338,7 +338,7 @@ public class TestAsyncProcess { /** * Returns our async process. */ - static class MyConnectionImpl extends ConnectionManager.HConnectionImplementation { + static class MyConnectionImpl extends ConnectionImplementation { final AtomicInteger nbThreads = new AtomicInteger(0); @@ -798,7 +798,7 @@ public class TestAsyncProcess { ClusterConnection conn = new MyConnectionImpl(configuration); BufferedMutatorImpl mutator = new BufferedMutatorImpl(conn, null, null, new BufferedMutatorParams(DUMMY_TABLE)); - configuration.setBoolean(ConnectionManager.RETRIES_BY_SERVER_KEY, true); + configuration.setBoolean(ConnectionImplementation.RETRIES_BY_SERVER_KEY, true); MyAsyncProcess ap = new MyAsyncProcess(conn, configuration, true); mutator.ap = ap; diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientExponentialBackoff.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientExponentialBackoff.java index 3a902d01d3e..275adc02276 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientExponentialBackoff.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientExponentialBackoff.java @@ -23,13 +23,17 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.backoff.ExponentialClientBackoffPolicy; import org.apache.hadoop.hbase.client.backoff.ServerStatistics; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; +import org.apache.hadoop.hbase.testclassification.ClientTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; import org.junit.Test; +import org.junit.experimental.categories.Category; import org.mockito.Mockito; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; +@Category({ClientTests.class, SmallTests.class}) public class TestClientExponentialBackoff { ServerName server = Mockito.mock(ServerName.class); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java index d155fd7a0af..f085aceaab1 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.RegionTooBusyException; import org.apache.hadoop.hbase.ServerName; @@ -102,7 +103,7 @@ public class TestClientNoCluster extends Configured implements Tool { @Before public void setUp() throws Exception { this.conf = HBaseConfiguration.create(); - // Run my HConnection overrides. Use my little HConnectionImplementation below which + // Run my HConnection overrides. Use my little ConnectionImplementation below which // allows me insert mocks and also use my Registry below rather than the default zk based // one so tests run faster and don't have zk dependency. this.conf.set("hbase.client.registry.impl", SimpleRegistry.class.getName()); @@ -205,11 +206,11 @@ public class TestClientNoCluster extends Configured implements Tool { } @Test - public void testDoNotRetryMetaScanner() throws IOException { + public void testDoNotRetryMetaTableAccessor() throws IOException { this.conf.set("hbase.client.connection.impl", RegionServerStoppedOnScannerOpenConnection.class.getName()); try (Connection connection = ConnectionFactory.createConnection(conf)) { - MetaScanner.metaScan(connection, null); + MetaTableAccessor.fullScanRegions(connection); } } @@ -257,48 +258,16 @@ public class TestClientNoCluster extends Configured implements Tool { } } - /** - * Override to shutdown going to zookeeper for cluster id and meta location. - */ - static class ScanOpenNextThenExceptionThenRecoverConnection - extends ConnectionManager.HConnectionImplementation { - final ClientService.BlockingInterface stub; - - ScanOpenNextThenExceptionThenRecoverConnection(Configuration conf, - boolean managed, ExecutorService pool) throws IOException { - super(conf, managed); - // Mock up my stub so open scanner returns a scanner id and then on next, we throw - // exceptions for three times and then after that, we return no more to scan. - this.stub = Mockito.mock(ClientService.BlockingInterface.class); - long sid = 12345L; - try { - Mockito.when(stub.scan((RpcController)Mockito.any(), - (ClientProtos.ScanRequest)Mockito.any())). - thenReturn(ClientProtos.ScanResponse.newBuilder().setScannerId(sid).build()). - thenThrow(new ServiceException(new RegionServerStoppedException("From Mockito"))). - thenReturn(ClientProtos.ScanResponse.newBuilder().setScannerId(sid). - setMoreResults(false).build()); - } catch (ServiceException e) { - throw new IOException(e); - } - } - - @Override - public BlockingInterface getClient(ServerName sn) throws IOException { - return this.stub; - } - } - /** * Override to shutdown going to zookeeper for cluster id and meta location. */ static class RegionServerStoppedOnScannerOpenConnection - extends ConnectionManager.HConnectionImplementation { + extends ConnectionImplementation { final ClientService.BlockingInterface stub; - RegionServerStoppedOnScannerOpenConnection(Configuration conf, boolean managed, + RegionServerStoppedOnScannerOpenConnection(Configuration conf, ExecutorService pool, User user) throws IOException { - super(conf, managed); + super(conf, pool, user); // Mock up my stub so open scanner returns a scanner id and then on next, we throw // exceptions for three times and then after that, we return no more to scan. this.stub = Mockito.mock(ClientService.BlockingInterface.class); @@ -325,12 +294,12 @@ public class TestClientNoCluster extends Configured implements Tool { * Override to check we are setting rpc timeout right. */ static class RpcTimeoutConnection - extends ConnectionManager.HConnectionImplementation { + extends ConnectionImplementation { final ClientService.BlockingInterface stub; - RpcTimeoutConnection(Configuration conf, boolean managed, ExecutorService pool, User user) + RpcTimeoutConnection(Configuration conf, ExecutorService pool, User user) throws IOException { - super(conf, managed); + super(conf, pool, user); // Mock up my stub so an exists call -- which turns into a get -- throws an exception this.stub = Mockito.mock(ClientService.BlockingInterface.class); try { @@ -352,7 +321,7 @@ public class TestClientNoCluster extends Configured implements Tool { * Fake many regionservers and many regions on a connection implementation. */ static class ManyServersManyRegionsConnection - extends ConnectionManager.HConnectionImplementation { + extends ConnectionImplementation { // All access should be synchronized final Map serversByClient; @@ -363,10 +332,10 @@ public class TestClientNoCluster extends Configured implements Tool { final AtomicLong sequenceids = new AtomicLong(0); private final Configuration conf; - ManyServersManyRegionsConnection(Configuration conf, boolean managed, + ManyServersManyRegionsConnection(Configuration conf, ExecutorService pool, User user) throws IOException { - super(conf, managed, pool, user); + super(conf, pool, user); int serverCount = conf.getInt("hbase.test.servers", 10); this.serversByClient = new HashMap(serverCount); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientScanner.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientScanner.java new file mode 100644 index 00000000000..a91def38b91 --- /dev/null +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientScanner.java @@ -0,0 +1,489 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellScanner; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.KeyValue.Type; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.ipc.RpcControllerFactory; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.mockito.InOrder; +import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +/** + * Test the ClientScanner. + */ +@Category(SmallTests.class) +public class TestClientScanner { + + Scan scan; + ExecutorService pool; + Configuration conf; + + ClusterConnection clusterConn; + RpcRetryingCallerFactory rpcFactory; + RpcControllerFactory controllerFactory; + + @Before + @SuppressWarnings("deprecation") + public void setup() throws IOException { + clusterConn = Mockito.mock(ClusterConnection.class); + rpcFactory = Mockito.mock(RpcRetryingCallerFactory.class); + controllerFactory = Mockito.mock(RpcControllerFactory.class); + pool = Executors.newSingleThreadExecutor(); + scan = new Scan(); + conf = new Configuration(); + Mockito.when(clusterConn.getConfiguration()).thenReturn(conf); + } + + @After + public void teardown() { + if (null != pool) { + pool.shutdownNow(); + } + } + + private static class MockClientScanner extends ClientScanner { + + private boolean rpcFinished = false; + private boolean rpcFinishedFired = false; + + public MockClientScanner(final Configuration conf, final Scan scan, final TableName tableName, + ClusterConnection connection, RpcRetryingCallerFactory rpcFactory, + RpcControllerFactory controllerFactory, ExecutorService pool, int primaryOperationTimeout) + throws IOException { + super(conf, scan, tableName, connection, rpcFactory, controllerFactory, pool, + primaryOperationTimeout); + } + + @Override + protected boolean nextScanner(int nbRows, final boolean done) throws IOException { + if (!rpcFinished) { + return super.nextScanner(nbRows, done); + } + + // Enforce that we don't short-circuit more than once + if (rpcFinishedFired) { + throw new RuntimeException("Expected nextScanner to only be called once after " + + " short-circuit was triggered."); + } + rpcFinishedFired = true; + return false; + } + + @Override + protected ScannerCallableWithReplicas getScannerCallable(byte [] localStartKey, + int nbRows) { + scan.setStartRow(localStartKey); + ScannerCallable s = + new ScannerCallable(getConnection(), getTable(), scan, this.scanMetrics, + this.rpcControllerFactory); + s.setCaching(nbRows); + ScannerCallableWithReplicas sr = new ScannerCallableWithReplicas(getTable(), getConnection(), + s, pool, primaryOperationTimeout, scan, + getRetries(), scannerTimeout, caching, conf, caller); + return sr; + } + + public void setRpcFinished(boolean rpcFinished) { + this.rpcFinished = rpcFinished; + } + } + + @Test + @SuppressWarnings("unchecked") + public void testNoResultsHint() throws IOException { + final Result[] results = new Result[1]; + KeyValue kv1 = new KeyValue("row".getBytes(), "cf".getBytes(), "cq".getBytes(), 1, + Type.Maximum); + results[0] = Result.create(new Cell[] {kv1}); + + RpcRetryingCaller caller = Mockito.mock(RpcRetryingCaller.class); + + Mockito.when(rpcFactory. newCaller()).thenReturn(caller); + Mockito.when(caller.callWithoutRetries(Mockito.any(RetryingCallable.class), + Mockito.anyInt())).thenAnswer(new Answer() { + private int count = 0; + @Override + public Result[] answer(InvocationOnMock invocation) throws Throwable { + ScannerCallableWithReplicas callable = invocation.getArgumentAt(0, + ScannerCallableWithReplicas.class); + switch (count) { + case 0: // initialize + case 2: // close + count++; + return null; + case 1: + count++; + callable.setHasMoreResultsContext(false); + return results; + default: + throw new RuntimeException("Expected only 2 invocations"); + } + } + }); + + // Set a much larger cache and buffer size than we'll provide + scan.setCaching(100); + scan.setMaxResultSize(1000*1000); + + try (MockClientScanner scanner = new MockClientScanner(conf, scan, TableName.valueOf("table"), + clusterConn, rpcFactory, controllerFactory, pool, Integer.MAX_VALUE)) { + + scanner.setRpcFinished(true); + + InOrder inOrder = Mockito.inOrder(caller); + + scanner.loadCache(); + + // One more call due to initializeScannerInConstruction() + inOrder.verify(caller, Mockito.times(2)).callWithoutRetries( + Mockito.any(RetryingCallable.class), Mockito.anyInt()); + + assertEquals(1, scanner.cache.size()); + Result r = scanner.cache.poll(); + assertNotNull(r); + CellScanner cs = r.cellScanner(); + assertTrue(cs.advance()); + assertEquals(kv1, cs.current()); + assertFalse(cs.advance()); + } + } + + @Test + @SuppressWarnings("unchecked") + public void testSizeLimit() throws IOException { + final Result[] results = new Result[1]; + KeyValue kv1 = new KeyValue("row".getBytes(), "cf".getBytes(), "cq".getBytes(), 1, + Type.Maximum); + results[0] = Result.create(new Cell[] {kv1}); + + RpcRetryingCaller caller = Mockito.mock(RpcRetryingCaller.class); + + Mockito.when(rpcFactory. newCaller()).thenReturn(caller); + Mockito.when(caller.callWithoutRetries(Mockito.any(RetryingCallable.class), + Mockito.anyInt())).thenAnswer(new Answer() { + private int count = 0; + @Override + public Result[] answer(InvocationOnMock invocation) throws Throwable { + ScannerCallableWithReplicas callable = invocation.getArgumentAt(0, + ScannerCallableWithReplicas.class); + switch (count) { + case 0: // initialize + case 2: // close + count++; + return null; + case 1: + count++; + callable.setHasMoreResultsContext(true); + callable.setServerHasMoreResults(false); + return results; + default: + throw new RuntimeException("Expected only 2 invocations"); + } + } + }); + + Mockito.when(rpcFactory. newCaller()).thenReturn(caller); + + // Set a much larger cache + scan.setCaching(100); + // The single key-value will exit the loop + scan.setMaxResultSize(1); + + try (MockClientScanner scanner = new MockClientScanner(conf, scan, TableName.valueOf("table"), + clusterConn, rpcFactory, controllerFactory, pool, Integer.MAX_VALUE)) { + + // Due to initializeScannerInConstruction() + Mockito.verify(caller).callWithoutRetries(Mockito.any(RetryingCallable.class), + Mockito.anyInt()); + + InOrder inOrder = Mockito.inOrder(caller); + + scanner.loadCache(); + + inOrder.verify(caller, Mockito.times(2)).callWithoutRetries( + Mockito.any(RetryingCallable.class), Mockito.anyInt()); + + assertEquals(1, scanner.cache.size()); + Result r = scanner.cache.poll(); + assertNotNull(r); + CellScanner cs = r.cellScanner(); + assertTrue(cs.advance()); + assertEquals(kv1, cs.current()); + assertFalse(cs.advance()); + } + } + + @Test + @SuppressWarnings("unchecked") + public void testCacheLimit() throws IOException { + KeyValue kv1 = new KeyValue("row1".getBytes(), "cf".getBytes(), "cq".getBytes(), 1, + Type.Maximum), kv2 = new KeyValue("row2".getBytes(), "cf".getBytes(), "cq".getBytes(), 1, + Type.Maximum), kv3 = new KeyValue("row3".getBytes(), "cf".getBytes(), "cq".getBytes(), 1, + Type.Maximum); + final Result[] results = new Result[] {Result.create(new Cell[] {kv1}), + Result.create(new Cell[] {kv2}), Result.create(new Cell[] {kv3})}; + + RpcRetryingCaller caller = Mockito.mock(RpcRetryingCaller.class); + + Mockito.when(rpcFactory. newCaller()).thenReturn(caller); + Mockito.when(caller.callWithoutRetries(Mockito.any(RetryingCallable.class), + Mockito.anyInt())).thenAnswer(new Answer() { + private int count = 0; + @Override + public Result[] answer(InvocationOnMock invocation) throws Throwable { + ScannerCallableWithReplicas callable = invocation.getArgumentAt(0, + ScannerCallableWithReplicas.class); + switch (count) { + case 0: // initialize + case 2: // close + count++; + return null; + case 1: + count++; + callable.setHasMoreResultsContext(true); + callable.setServerHasMoreResults(false); + return results; + default: + throw new RuntimeException("Expected only 2 invocations"); + } + } + }); + + Mockito.when(rpcFactory. newCaller()).thenReturn(caller); + + // Set a small cache + scan.setCaching(1); + // Set a very large size + scan.setMaxResultSize(1000*1000); + + try (MockClientScanner scanner = new MockClientScanner(conf, scan, TableName.valueOf("table"), + clusterConn, rpcFactory, controllerFactory, pool, Integer.MAX_VALUE)) { + + // Due to initializeScannerInConstruction() + Mockito.verify(caller).callWithoutRetries(Mockito.any(RetryingCallable.class), + Mockito.anyInt()); + + InOrder inOrder = Mockito.inOrder(caller); + + scanner.loadCache(); + + // Ensures that possiblyNextScanner isn't called at the end which would trigger + // another call to callWithoutRetries + inOrder.verify(caller, Mockito.times(2)).callWithoutRetries( + Mockito.any(RetryingCallable.class), Mockito.anyInt()); + + assertEquals(3, scanner.cache.size()); + Result r = scanner.cache.poll(); + assertNotNull(r); + CellScanner cs = r.cellScanner(); + assertTrue(cs.advance()); + assertEquals(kv1, cs.current()); + assertFalse(cs.advance()); + + r = scanner.cache.poll(); + assertNotNull(r); + cs = r.cellScanner(); + assertTrue(cs.advance()); + assertEquals(kv2, cs.current()); + assertFalse(cs.advance()); + + r = scanner.cache.poll(); + assertNotNull(r); + cs = r.cellScanner(); + assertTrue(cs.advance()); + assertEquals(kv3, cs.current()); + assertFalse(cs.advance()); + } + } + + @Test + @SuppressWarnings("unchecked") + public void testNoMoreResults() throws IOException { + final Result[] results = new Result[1]; + KeyValue kv1 = new KeyValue("row".getBytes(), "cf".getBytes(), "cq".getBytes(), 1, + Type.Maximum); + results[0] = Result.create(new Cell[] {kv1}); + + RpcRetryingCaller caller = Mockito.mock(RpcRetryingCaller.class); + + Mockito.when(rpcFactory. newCaller()).thenReturn(caller); + Mockito.when(caller.callWithoutRetries(Mockito.any(RetryingCallable.class), + Mockito.anyInt())).thenAnswer(new Answer() { + private int count = 0; + @Override + public Result[] answer(InvocationOnMock invocation) throws Throwable { + ScannerCallableWithReplicas callable = invocation.getArgumentAt(0, + ScannerCallableWithReplicas.class); + switch (count) { + case 0: // initialize + case 2: // close + count++; + return null; + case 1: + count++; + callable.setHasMoreResultsContext(true); + callable.setServerHasMoreResults(false); + return results; + default: + throw new RuntimeException("Expected only 2 invocations"); + } + } + }); + + Mockito.when(rpcFactory. newCaller()).thenReturn(caller); + + // Set a much larger cache and buffer size than we'll provide + scan.setCaching(100); + scan.setMaxResultSize(1000*1000); + + try (MockClientScanner scanner = new MockClientScanner(conf, scan, TableName.valueOf("table"), + clusterConn, rpcFactory, controllerFactory, pool, Integer.MAX_VALUE)) { + + // Due to initializeScannerInConstruction() + Mockito.verify(caller).callWithoutRetries(Mockito.any(RetryingCallable.class), + Mockito.anyInt()); + + scanner.setRpcFinished(true); + + InOrder inOrder = Mockito.inOrder(caller); + + scanner.loadCache(); + + inOrder.verify(caller, Mockito.times(2)).callWithoutRetries( + Mockito.any(RetryingCallable.class), Mockito.anyInt()); + + assertEquals(1, scanner.cache.size()); + Result r = scanner.cache.poll(); + assertNotNull(r); + CellScanner cs = r.cellScanner(); + assertTrue(cs.advance()); + assertEquals(kv1, cs.current()); + assertFalse(cs.advance()); + } + } + + @Test + @SuppressWarnings("unchecked") + public void testMoreResults() throws IOException { + final Result[] results1 = new Result[1]; + KeyValue kv1 = new KeyValue("row".getBytes(), "cf".getBytes(), "cq".getBytes(), 1, + Type.Maximum); + results1[0] = Result.create(new Cell[] {kv1}); + + final Result[] results2 = new Result[1]; + KeyValue kv2 = new KeyValue("row2".getBytes(), "cf".getBytes(), "cq".getBytes(), 1, + Type.Maximum); + results2[0] = Result.create(new Cell[] {kv2}); + + + RpcRetryingCaller caller = Mockito.mock(RpcRetryingCaller.class); + + Mockito.when(rpcFactory. newCaller()).thenReturn(caller); + Mockito.when(caller.callWithoutRetries(Mockito.any(RetryingCallable.class), + Mockito.anyInt())).thenAnswer(new Answer() { + private int count = 0; + @Override + public Result[] answer(InvocationOnMock invocation) throws Throwable { + ScannerCallableWithReplicas callable = invocation.getArgumentAt(0, + ScannerCallableWithReplicas.class); + switch (count) { + case 0: // initialize + case 3: // close + count++; + return null; + case 1: + count++; + callable.setHasMoreResultsContext(true); + callable.setServerHasMoreResults(true); + return results1; + case 2: + count++; + // The server reports back false WRT more results + callable.setHasMoreResultsContext(true); + callable.setServerHasMoreResults(false); + return results2; + default: + throw new RuntimeException("Expected only 2 invocations"); + } + } + }); + + // Set a much larger cache and buffer size than we'll provide + scan.setCaching(100); + scan.setMaxResultSize(1000*1000); + + try (MockClientScanner scanner = new MockClientScanner(conf, scan, TableName.valueOf("table"), + clusterConn, rpcFactory, controllerFactory, pool, Integer.MAX_VALUE)) { + + // Due to initializeScannerInConstruction() + Mockito.verify(caller).callWithoutRetries(Mockito.any(RetryingCallable.class), + Mockito.anyInt()); + + InOrder inOrder = Mockito.inOrder(caller); + + scanner.loadCache(); + + inOrder.verify(caller, Mockito.times(2)).callWithoutRetries( + Mockito.any(RetryingCallable.class), Mockito.anyInt()); + + assertEquals(1, scanner.cache.size()); + Result r = scanner.cache.poll(); + assertNotNull(r); + CellScanner cs = r.cellScanner(); + assertTrue(cs.advance()); + assertEquals(kv1, cs.current()); + assertFalse(cs.advance()); + + scanner.setRpcFinished(true); + + inOrder = Mockito.inOrder(caller); + + scanner.loadCache(); + + inOrder.verify(caller, Mockito.times(3)).callWithoutRetries( + Mockito.any(RetryingCallable.class), Mockito.anyInt()); + + r = scanner.cache.poll(); + assertNotNull(r); + cs = r.cellScanner(); + assertTrue(cs.advance()); + assertEquals(kv2, cs.current()); + assertFalse(cs.advance()); + } + } +} diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientSmallReversedScanner.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientSmallReversedScanner.java new file mode 100644 index 00000000000..4611d08dfe1 --- /dev/null +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientSmallReversedScanner.java @@ -0,0 +1,349 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.Iterator; +import java.util.List; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.KeyValue.Type; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.ClientSmallScanner.SmallScannerCallableFactory; +import org.apache.hadoop.hbase.client.metrics.ScanMetrics; +import org.apache.hadoop.hbase.ipc.RpcControllerFactory; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +/** + * Test the ClientSmallReversedScanner. + */ +@Category(SmallTests.class) +public class TestClientSmallReversedScanner { + + Scan scan; + ExecutorService pool; + Configuration conf; + + ClusterConnection clusterConn; + RpcRetryingCallerFactory rpcFactory; + RpcControllerFactory controllerFactory; + RpcRetryingCaller caller; + + @Before + @SuppressWarnings({"deprecation", "unchecked"}) + public void setup() throws IOException { + clusterConn = Mockito.mock(ClusterConnection.class); + rpcFactory = Mockito.mock(RpcRetryingCallerFactory.class); + controllerFactory = Mockito.mock(RpcControllerFactory.class); + pool = Executors.newSingleThreadExecutor(); + scan = new Scan(); + conf = new Configuration(); + Mockito.when(clusterConn.getConfiguration()).thenReturn(conf); + // Mock out the RpcCaller + caller = Mockito.mock(RpcRetryingCaller.class); + // Return the mock from the factory + Mockito.when(rpcFactory. newCaller()).thenReturn(caller); + } + + @After + public void teardown() { + if (null != pool) { + pool.shutdownNow(); + } + } + + /** + * Create a simple Answer which returns true the first time, and false every time after. + */ + private Answer createTrueThenFalseAnswer() { + return new Answer() { + boolean first = true; + + @Override + public Boolean answer(InvocationOnMock invocation) { + if (first) { + first = false; + return true; + } + return false; + } + }; + } + + private SmallScannerCallableFactory getFactory( + final ScannerCallableWithReplicas callableWithReplicas) { + return new SmallScannerCallableFactory() { + @Override + public ScannerCallableWithReplicas getCallable(ClusterConnection connection, TableName table, + Scan scan, ScanMetrics scanMetrics, byte[] localStartKey, int cacheNum, + RpcControllerFactory controllerFactory, ExecutorService pool, + int primaryOperationTimeout, int retries, int scannerTimeout, Configuration conf, + RpcRetryingCaller caller) { + return callableWithReplicas; + } + }; + } + + @Test + public void testContextPresent() throws Exception { + final KeyValue kv1 = new KeyValue("row1".getBytes(), "cf".getBytes(), "cq".getBytes(), 1, + Type.Maximum), kv2 = new KeyValue("row2".getBytes(), "cf".getBytes(), "cq".getBytes(), 1, + Type.Maximum), kv3 = new KeyValue("row3".getBytes(), "cf".getBytes(), "cq".getBytes(), 1, + Type.Maximum); + + ScannerCallableWithReplicas callableWithReplicas = Mockito + .mock(ScannerCallableWithReplicas.class); + + // Mock out the RpcCaller + @SuppressWarnings("unchecked") + RpcRetryingCaller caller = Mockito.mock(RpcRetryingCaller.class); + // Return the mock from the factory + Mockito.when(rpcFactory. newCaller()).thenReturn(caller); + + // Intentionally leave a "default" caching size in the Scan. No matter the value, we + // should continue based on the server context + + SmallScannerCallableFactory factory = getFactory(callableWithReplicas); + + try (ClientSmallReversedScanner csrs = new ClientSmallReversedScanner(conf, scan, + TableName.valueOf("table"), clusterConn, rpcFactory, controllerFactory, pool, + Integer.MAX_VALUE)) { + + csrs.setScannerCallableFactory(factory); + + // Return some data the first time, less the second, and none after that + Mockito.when(caller.callWithoutRetries(callableWithReplicas, csrs.getScannerTimeout())) + .thenAnswer(new Answer() { + int count = 0; + + @Override + public Result[] answer(InvocationOnMock invocation) { + Result[] results; + if (0 == count) { + results = new Result[] {Result.create(new Cell[] {kv3}), + Result.create(new Cell[] {kv2})}; + } else if (1 == count) { + results = new Result[] {Result.create(new Cell[] {kv1})}; + } else { + results = new Result[0]; + } + count++; + return results; + } + }); + + // Pass back the context always + Mockito.when(callableWithReplicas.hasMoreResultsContext()).thenReturn(true); + // Only have more results the first time + Mockito.when(callableWithReplicas.getServerHasMoreResults()).thenAnswer( + createTrueThenFalseAnswer()); + + // A mocked HRegionInfo so ClientSmallScanner#nextScanner(...) works right + HRegionInfo regionInfo = Mockito.mock(HRegionInfo.class); + Mockito.when(callableWithReplicas.getHRegionInfo()).thenReturn(regionInfo); + // Trigger the "no more data" branch for #nextScanner(...) + Mockito.when(regionInfo.getEndKey()).thenReturn(HConstants.EMPTY_BYTE_ARRAY); + + csrs.loadCache(); + + List results = csrs.cache; + Iterator iter = results.iterator(); + assertEquals(3, results.size()); + for (int i = 3; i >= 1 && iter.hasNext(); i--) { + Result result = iter.next(); + byte[] row = result.getRow(); + assertEquals("row" + i, new String(row, StandardCharsets.UTF_8)); + assertEquals(1, result.getMap().size()); + } + assertTrue(csrs.closed); + } + } + + @Test + public void testNoContextFewerRecords() throws Exception { + final KeyValue kv1 = new KeyValue("row1".getBytes(), "cf".getBytes(), "cq".getBytes(), 1, + Type.Maximum), kv2 = new KeyValue("row2".getBytes(), "cf".getBytes(), "cq".getBytes(), 1, + Type.Maximum), kv3 = new KeyValue("row3".getBytes(), "cf".getBytes(), "cq".getBytes(), 1, + Type.Maximum); + + ScannerCallableWithReplicas callableWithReplicas = Mockito + .mock(ScannerCallableWithReplicas.class); + + // While the server returns 2 records per batch, we expect more records. + scan.setCaching(2); + + SmallScannerCallableFactory factory = getFactory(callableWithReplicas); + + try (ClientSmallReversedScanner csrs = new ClientSmallReversedScanner(conf, scan, + TableName.valueOf("table"), clusterConn, rpcFactory, controllerFactory, pool, + Integer.MAX_VALUE)) { + + csrs.setScannerCallableFactory(factory); + + // Return some data the first time, less the second, and none after that + Mockito.when(caller.callWithoutRetries(callableWithReplicas, csrs.getScannerTimeout())) + .thenAnswer(new Answer() { + int count = 0; + + @Override + public Result[] answer(InvocationOnMock invocation) { + Result[] results; + if (0 == count) { + results = new Result[] {Result.create(new Cell[] {kv3}), + Result.create(new Cell[] {kv2})}; + } else if (1 == count) { + // Return fewer records than expected (2) + results = new Result[] {Result.create(new Cell[] {kv1})}; + } else { + throw new RuntimeException("Should not fetch a third batch from the server"); + } + count++; + return results; + } + }); + + // Server doesn't return the context + Mockito.when(callableWithReplicas.hasMoreResultsContext()).thenReturn(false); + // getServerHasMoreResults shouldn't be called when hasMoreResultsContext returns false + Mockito.when(callableWithReplicas.getServerHasMoreResults()) + .thenThrow(new RuntimeException("Should not be called")); + + // A mocked HRegionInfo so ClientSmallScanner#nextScanner(...) works right + HRegionInfo regionInfo = Mockito.mock(HRegionInfo.class); + Mockito.when(callableWithReplicas.getHRegionInfo()).thenReturn(regionInfo); + // Trigger the "no more data" branch for #nextScanner(...) + Mockito.when(regionInfo.getEndKey()).thenReturn(HConstants.EMPTY_BYTE_ARRAY); + + csrs.loadCache(); + + List results = csrs.cache; + Iterator iter = results.iterator(); + assertEquals(2, results.size()); + for (int i = 3; i >= 2 && iter.hasNext(); i--) { + Result result = iter.next(); + byte[] row = result.getRow(); + assertEquals("row" + i, new String(row, StandardCharsets.UTF_8)); + assertEquals(1, result.getMap().size()); + } + + // "consume" the Results + results.clear(); + + csrs.loadCache(); + + assertEquals(1, results.size()); + Result result = results.get(0); + assertEquals("row1", new String(result.getRow(), StandardCharsets.UTF_8)); + assertEquals(1, result.getMap().size()); + + assertTrue(csrs.closed); + } + } + + @Test + public void testNoContextNoRecords() throws Exception { + ScannerCallableWithReplicas callableWithReplicas = Mockito + .mock(ScannerCallableWithReplicas.class); + + // While the server return 2 records per RPC, we expect there to be more records. + scan.setCaching(2); + + SmallScannerCallableFactory factory = getFactory(callableWithReplicas); + + try (ClientSmallReversedScanner csrs = new ClientSmallReversedScanner(conf, scan, + TableName.valueOf("table"), clusterConn, rpcFactory, controllerFactory, pool, + Integer.MAX_VALUE)) { + + csrs.setScannerCallableFactory(factory); + + // Return some data the first time, less the second, and none after that + Mockito.when(caller.callWithoutRetries(callableWithReplicas, csrs.getScannerTimeout())) + .thenReturn(new Result[0]); + + // Server doesn't return the context + Mockito.when(callableWithReplicas.hasMoreResultsContext()).thenReturn(false); + // Only have more results the first time + Mockito.when(callableWithReplicas.getServerHasMoreResults()) + .thenThrow(new RuntimeException("Should not be called")); + + // A mocked HRegionInfo so ClientSmallScanner#nextScanner(...) works right + HRegionInfo regionInfo = Mockito.mock(HRegionInfo.class); + Mockito.when(callableWithReplicas.getHRegionInfo()).thenReturn(regionInfo); + // Trigger the "no more data" branch for #nextScanner(...) + Mockito.when(regionInfo.getEndKey()).thenReturn(HConstants.EMPTY_BYTE_ARRAY); + + csrs.loadCache(); + + assertEquals(0, csrs.cache.size()); + assertTrue(csrs.closed); + } + } + + @Test + public void testContextNoRecords() throws Exception { + ScannerCallableWithReplicas callableWithReplicas = Mockito + .mock(ScannerCallableWithReplicas.class); + + SmallScannerCallableFactory factory = getFactory(callableWithReplicas); + + try (ClientSmallReversedScanner csrs = new ClientSmallReversedScanner(conf, scan, + TableName.valueOf("table"), clusterConn, rpcFactory, controllerFactory, pool, + Integer.MAX_VALUE)) { + + csrs.setScannerCallableFactory(factory); + + // Return some data the first time, less the second, and none after that + Mockito.when(caller.callWithoutRetries(callableWithReplicas, csrs.getScannerTimeout())) + .thenReturn(new Result[0]); + + // Server doesn't return the context + Mockito.when(callableWithReplicas.hasMoreResultsContext()).thenReturn(true); + // Only have more results the first time + Mockito.when(callableWithReplicas.getServerHasMoreResults()) + .thenReturn(false); + + // A mocked HRegionInfo so ClientSmallScanner#nextScanner(...) works right + HRegionInfo regionInfo = Mockito.mock(HRegionInfo.class); + Mockito.when(callableWithReplicas.getHRegionInfo()).thenReturn(regionInfo); + // Trigger the "no more data" branch for #nextScanner(...) + Mockito.when(regionInfo.getEndKey()).thenReturn(HConstants.EMPTY_BYTE_ARRAY); + + csrs.loadCache(); + + assertEquals(0, csrs.cache.size()); + assertTrue(csrs.closed); + } + } +} diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientSmallScanner.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientSmallScanner.java new file mode 100644 index 00000000000..90bf4bbd3ab --- /dev/null +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientSmallScanner.java @@ -0,0 +1,339 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.List; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.KeyValue.Type; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.ClientSmallScanner.SmallScannerCallableFactory; +import org.apache.hadoop.hbase.client.metrics.ScanMetrics; +import org.apache.hadoop.hbase.ipc.RpcControllerFactory; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +/** + * Test the ClientSmallScanner. + */ +@Category(SmallTests.class) +public class TestClientSmallScanner { + + Scan scan; + ExecutorService pool; + Configuration conf; + + ClusterConnection clusterConn; + RpcRetryingCallerFactory rpcFactory; + RpcControllerFactory controllerFactory; + RpcRetryingCaller caller; + + @Before + @SuppressWarnings({"deprecation", "unchecked"}) + public void setup() throws IOException { + clusterConn = Mockito.mock(ClusterConnection.class); + rpcFactory = Mockito.mock(RpcRetryingCallerFactory.class); + controllerFactory = Mockito.mock(RpcControllerFactory.class); + pool = Executors.newSingleThreadExecutor(); + scan = new Scan(); + conf = new Configuration(); + Mockito.when(clusterConn.getConfiguration()).thenReturn(conf); + // Mock out the RpcCaller + caller = Mockito.mock(RpcRetryingCaller.class); + // Return the mock from the factory + Mockito.when(rpcFactory. newCaller()).thenReturn(caller); + } + + @After + public void teardown() { + if (null != pool) { + pool.shutdownNow(); + } + } + + /** + * Create a simple Answer which returns true the first time, and false every time after. + */ + private Answer createTrueThenFalseAnswer() { + return new Answer() { + boolean first = true; + + @Override + public Boolean answer(InvocationOnMock invocation) { + if (first) { + first = false; + return true; + } + return false; + } + }; + } + + private SmallScannerCallableFactory getFactory( + final ScannerCallableWithReplicas callableWithReplicas) { + return new SmallScannerCallableFactory() { + @Override + public ScannerCallableWithReplicas getCallable(ClusterConnection connection, TableName table, + Scan scan, ScanMetrics scanMetrics, byte[] localStartKey, int cacheNum, + RpcControllerFactory controllerFactory, ExecutorService pool, + int primaryOperationTimeout, int retries, int scannerTimeout, Configuration conf, + RpcRetryingCaller caller) { + return callableWithReplicas; + } + }; + } + + @Test + public void testContextPresent() throws Exception { + final KeyValue kv1 = new KeyValue("row1".getBytes(), "cf".getBytes(), "cq".getBytes(), 1, + Type.Maximum), kv2 = new KeyValue("row2".getBytes(), "cf".getBytes(), "cq".getBytes(), 1, + Type.Maximum), kv3 = new KeyValue("row3".getBytes(), "cf".getBytes(), "cq".getBytes(), 1, + Type.Maximum); + + ScannerCallableWithReplicas callableWithReplicas = Mockito + .mock(ScannerCallableWithReplicas.class); + + // Mock out the RpcCaller + @SuppressWarnings("unchecked") + RpcRetryingCaller caller = Mockito.mock(RpcRetryingCaller.class); + // Return the mock from the factory + Mockito.when(rpcFactory. newCaller()).thenReturn(caller); + + SmallScannerCallableFactory factory = getFactory(callableWithReplicas); + + // Intentionally leave a "default" caching size in the Scan. No matter the value, we + // should continue based on the server context + + try (ClientSmallScanner css = new ClientSmallScanner(conf, scan, TableName.valueOf("table"), + clusterConn, rpcFactory, controllerFactory, pool, Integer.MAX_VALUE)) { + + css.setScannerCallableFactory(factory); + + // Return some data the first time, less the second, and none after that + Mockito.when(caller.callWithoutRetries(callableWithReplicas, css.getScannerTimeout())) + .thenAnswer(new Answer() { + int count = 0; + + @Override + public Result[] answer(InvocationOnMock invocation) { + Result[] results; + if (0 == count) { + results = new Result[] {Result.create(new Cell[] {kv1}), + Result.create(new Cell[] {kv2})}; + } else if (1 == count) { + results = new Result[] {Result.create(new Cell[] {kv3})}; + } else { + results = new Result[0]; + } + count++; + return results; + } + }); + + // Pass back the context always + Mockito.when(callableWithReplicas.hasMoreResultsContext()).thenReturn(true); + // Only have more results the first time + Mockito.when(callableWithReplicas.getServerHasMoreResults()).thenAnswer( + createTrueThenFalseAnswer()); + + // A mocked HRegionInfo so ClientSmallScanner#nextScanner(...) works right + HRegionInfo regionInfo = Mockito.mock(HRegionInfo.class); + Mockito.when(callableWithReplicas.getHRegionInfo()).thenReturn(regionInfo); + // Trigger the "no more data" branch for #nextScanner(...) + Mockito.when(regionInfo.getEndKey()).thenReturn(HConstants.EMPTY_BYTE_ARRAY); + + css.loadCache(); + + List results = css.cache; + assertEquals(3, results.size()); + for (int i = 1; i <= 3; i++) { + Result result = results.get(i - 1); + byte[] row = result.getRow(); + assertEquals("row" + i, new String(row, StandardCharsets.UTF_8)); + assertEquals(1, result.getMap().size()); + } + + assertTrue(css.closed); + } + } + + @Test + public void testNoContextFewerRecords() throws Exception { + final KeyValue kv1 = new KeyValue("row1".getBytes(), "cf".getBytes(), "cq".getBytes(), 1, + Type.Maximum), kv2 = new KeyValue("row2".getBytes(), "cf".getBytes(), "cq".getBytes(), 1, + Type.Maximum), kv3 = new KeyValue("row3".getBytes(), "cf".getBytes(), "cq".getBytes(), 1, + Type.Maximum); + + ScannerCallableWithReplicas callableWithReplicas = Mockito + .mock(ScannerCallableWithReplicas.class); + + // While the server returns 2 records per batch, we expect more records. + scan.setCaching(2); + SmallScannerCallableFactory factory = getFactory(callableWithReplicas); + + try (ClientSmallScanner css = new ClientSmallScanner(conf, scan, TableName.valueOf("table"), + clusterConn, rpcFactory, controllerFactory, pool, Integer.MAX_VALUE)) { + + css.setScannerCallableFactory(factory); + // Return some data the first time, less the second, and none after that + Mockito.when(caller.callWithoutRetries(callableWithReplicas, css.getScannerTimeout())) + .thenAnswer(new Answer() { + int count = 0; + + @Override + public Result[] answer(InvocationOnMock invocation) { + Result[] results; + if (0 == count) { + results = new Result[] {Result.create(new Cell[] {kv1}), + Result.create(new Cell[] {kv2})}; + } else if (1 == count) { + // Return fewer records than expected (2) + results = new Result[] {Result.create(new Cell[] {kv3})}; + } else { + throw new RuntimeException("Should not fetch a third batch from the server"); + } + count++; + return results; + } + }); + + // Server doesn't return the context + Mockito.when(callableWithReplicas.hasMoreResultsContext()).thenReturn(false); + // Only have more results the first time + Mockito.when(callableWithReplicas.getServerHasMoreResults()).thenThrow( + new RuntimeException("Should not be called")); + + // A mocked HRegionInfo so ClientSmallScanner#nextScanner(...) works right + HRegionInfo regionInfo = Mockito.mock(HRegionInfo.class); + Mockito.when(callableWithReplicas.getHRegionInfo()).thenReturn(regionInfo); + // Trigger the "no more data" branch for #nextScanner(...) + Mockito.when(regionInfo.getEndKey()).thenReturn(HConstants.EMPTY_BYTE_ARRAY); + + css.loadCache(); + + List results = css.cache; + assertEquals(2, results.size()); + for (int i = 1; i <= 2; i++) { + Result result = results.get(i - 1); + byte[] row = result.getRow(); + assertEquals("row" + i, new String(row, StandardCharsets.UTF_8)); + assertEquals(1, result.getMap().size()); + } + + // "consume" the results we verified + results.clear(); + + css.loadCache(); + + assertEquals(1, results.size()); + Result result = results.get(0); + assertEquals("row3", new String(result.getRow(), StandardCharsets.UTF_8)); + assertEquals(1, result.getMap().size()); + assertTrue(css.closed); + } + } + + @Test + public void testNoContextNoRecords() throws Exception { + ScannerCallableWithReplicas callableWithReplicas = Mockito + .mock(ScannerCallableWithReplicas.class); + + // While the server return 2 records per RPC, we expect there to be more records. + scan.setCaching(2); + + SmallScannerCallableFactory factory = getFactory(callableWithReplicas); + + try (ClientSmallScanner css = new ClientSmallScanner(conf, scan, TableName.valueOf("table"), + clusterConn, rpcFactory, controllerFactory, pool, Integer.MAX_VALUE)) { + + css.setScannerCallableFactory(factory); + + // Return some data the first time, less the second, and none after that + Mockito.when(caller.callWithoutRetries(callableWithReplicas, css.getScannerTimeout())) + .thenReturn(new Result[0]); + + // Server doesn't return the context + Mockito.when(callableWithReplicas.hasMoreResultsContext()).thenReturn(false); + // Only have more results the first time + Mockito.when(callableWithReplicas.getServerHasMoreResults()).thenThrow( + new RuntimeException("Should not be called")); + + // A mocked HRegionInfo so ClientSmallScanner#nextScanner(...) works right + HRegionInfo regionInfo = Mockito.mock(HRegionInfo.class); + Mockito.when(callableWithReplicas.getHRegionInfo()).thenReturn(regionInfo); + // Trigger the "no more data" branch for #nextScanner(...) + Mockito.when(regionInfo.getEndKey()).thenReturn(HConstants.EMPTY_BYTE_ARRAY); + + css.loadCache(); + + assertEquals(0, css.cache.size()); + assertTrue(css.closed); + } + } + + @Test + public void testContextNoRecords() throws Exception { + ScannerCallableWithReplicas callableWithReplicas = Mockito + .mock(ScannerCallableWithReplicas.class); + + SmallScannerCallableFactory factory = getFactory(callableWithReplicas); + + try (ClientSmallScanner css = new ClientSmallScanner(conf, scan, TableName.valueOf("table"), + clusterConn, rpcFactory, controllerFactory, pool, Integer.MAX_VALUE)) { + + css.setScannerCallableFactory(factory); + + // Return some data the first time, less the second, and none after that + Mockito.when(caller.callWithoutRetries(callableWithReplicas, css.getScannerTimeout())) + .thenReturn(new Result[0]); + + // Server doesn't return the context + Mockito.when(callableWithReplicas.hasMoreResultsContext()).thenReturn(true); + // Only have more results the first time + Mockito.when(callableWithReplicas.getServerHasMoreResults()).thenReturn(false); + + // A mocked HRegionInfo so ClientSmallScanner#nextScanner(...) works right + HRegionInfo regionInfo = Mockito.mock(HRegionInfo.class); + Mockito.when(callableWithReplicas.getHRegionInfo()).thenReturn(regionInfo); + // Trigger the "no more data" branch for #nextScanner(...) + Mockito.when(regionInfo.getEndKey()).thenReturn(HConstants.EMPTY_BYTE_ARRAY); + + css.loadCache(); + + assertEquals(0, css.cache.size()); + assertTrue(css.closed); + } + } +} diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java index 23e538c6893..fd1ba50d8ca 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java @@ -155,6 +155,13 @@ public class TestGet { Set qualifiers = get.getFamilyMap().get(family); Assert.assertEquals(1, qualifiers.size()); } + + @Test + public void TestGetRowFromGetCopyConstructor() throws Exception { + Get get = new Get(ROW); + Get copyGet = new Get(get); + assertEquals(0, Bytes.compareTo(get.getRow(), copyGet.getRow())); + } @Test public void testDynamicFilter() throws Exception { diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestProcedureFuture.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestProcedureFuture.java new file mode 100644 index 00000000000..da3ffe97c6e --- /dev/null +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestProcedureFuture.java @@ -0,0 +1,186 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.client; + +import java.io.IOException; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +import org.apache.hadoop.hbase.testclassification.ClientTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse; + +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import org.mockito.Mockito; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +@Category({ClientTests.class, SmallTests.class}) +public class TestProcedureFuture { + private static class TestFuture extends HBaseAdmin.ProcedureFuture { + private boolean postOperationResultCalled = false; + private boolean waitOperationResultCalled = false; + private boolean getProcedureResultCalled = false; + private boolean convertResultCalled = false; + + public TestFuture(final HBaseAdmin admin, final Long procId) { + super(admin, procId); + } + + public boolean wasPostOperationResultCalled() { + return postOperationResultCalled; + } + + public boolean wasWaitOperationResultCalled() { + return waitOperationResultCalled; + } + + public boolean wasGetProcedureResultCalled() { + return getProcedureResultCalled; + } + + public boolean wasConvertResultCalled() { + return convertResultCalled; + } + + @Override + protected GetProcedureResultResponse getProcedureResult( + final GetProcedureResultRequest request) throws IOException { + getProcedureResultCalled = true; + return GetProcedureResultResponse.newBuilder() + .setState(GetProcedureResultResponse.State.FINISHED) + .build(); + } + + @Override + protected Void convertResult(final GetProcedureResultResponse response) throws IOException { + convertResultCalled = true; + return null; + } + + @Override + protected Void waitOperationResult(final long deadlineTs) + throws IOException, TimeoutException { + waitOperationResultCalled = true; + return null; + } + + @Override + protected Void postOperationResult(final Void result, final long deadlineTs) + throws IOException, TimeoutException { + postOperationResultCalled = true; + return result; + } + } + + /** + * When a master return a result with procId, + * we are skipping the waitOperationResult() call, + * since we are getting the procedure result. + */ + @Test(timeout=60000) + public void testWithProcId() throws Exception { + HBaseAdmin admin = Mockito.mock(HBaseAdmin.class); + TestFuture f = new TestFuture(admin, 100L); + f.get(1, TimeUnit.MINUTES); + + assertTrue("expected getProcedureResult() to be called", f.wasGetProcedureResultCalled()); + assertTrue("expected convertResult() to be called", f.wasConvertResultCalled()); + assertFalse("unexpected waitOperationResult() called", f.wasWaitOperationResultCalled()); + assertTrue("expected postOperationResult() to be called", f.wasPostOperationResultCalled()); + } + + /** + * Verify that the spin loop for the procedure running works. + */ + @Test(timeout=60000) + public void testWithProcIdAndSpinning() throws Exception { + final AtomicInteger spinCount = new AtomicInteger(0); + HBaseAdmin admin = Mockito.mock(HBaseAdmin.class); + TestFuture f = new TestFuture(admin, 100L) { + @Override + protected GetProcedureResultResponse getProcedureResult( + final GetProcedureResultRequest request) throws IOException { + boolean done = spinCount.incrementAndGet() >= 10; + return GetProcedureResultResponse.newBuilder() + .setState(done ? GetProcedureResultResponse.State.FINISHED : + GetProcedureResultResponse.State.RUNNING) + .build(); + } + }; + f.get(1, TimeUnit.MINUTES); + + assertEquals(10, spinCount.get()); + assertTrue("expected convertResult() to be called", f.wasConvertResultCalled()); + assertFalse("unexpected waitOperationResult() called", f.wasWaitOperationResultCalled()); + assertTrue("expected postOperationResult() to be called", f.wasPostOperationResultCalled()); + } + + /** + * When a master return a result without procId, + * we are skipping the getProcedureResult() call. + */ + @Test(timeout=60000) + public void testWithoutProcId() throws Exception { + HBaseAdmin admin = Mockito.mock(HBaseAdmin.class); + TestFuture f = new TestFuture(admin, null); + f.get(1, TimeUnit.MINUTES); + + assertFalse("unexpected getProcedureResult() called", f.wasGetProcedureResultCalled()); + assertFalse("unexpected convertResult() called", f.wasConvertResultCalled()); + assertTrue("expected waitOperationResult() to be called", f.wasWaitOperationResultCalled()); + assertTrue("expected postOperationResult() to be called", f.wasPostOperationResultCalled()); + } + + /** + * When a new client with procedure support tries to ask an old-master without proc-support + * the procedure result we get a DoNotRetryIOException (which is an UnsupportedOperationException) + * The future should trap that and fallback to the waitOperationResult(). + * + * This happens when the operation calls happens on a "new master" but while we are waiting + * the operation to be completed, we failover on an "old master". + */ + @Test(timeout=60000) + public void testOnServerWithNoProcedureSupport() throws Exception { + HBaseAdmin admin = Mockito.mock(HBaseAdmin.class); + TestFuture f = new TestFuture(admin, 100L) { + @Override + protected GetProcedureResultResponse getProcedureResult( + final GetProcedureResultRequest request) throws IOException { + super.getProcedureResult(request); + throw new DoNotRetryIOException(new UnsupportedOperationException("getProcedureResult")); + } + }; + f.get(1, TimeUnit.MINUTES); + + assertTrue("expected getProcedureResult() to be called", f.wasGetProcedureResultCalled()); + assertFalse("unexpected convertResult() called", f.wasConvertResultCalled()); + assertTrue("expected waitOperationResult() to be called", f.wasWaitOperationResultCalled()); + assertTrue("expected postOperationResult() to be called", f.wasPostOperationResultCalled()); + } +} \ No newline at end of file diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromAdmin.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromAdmin.java index 78d718e4142..1b039bd241d 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromAdmin.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromAdmin.java @@ -72,8 +72,8 @@ public class TestSnapshotFromAdmin { + "- further testing won't prove anything.", time < ignoreExpectedTime); // setup the mocks - ConnectionManager.HConnectionImplementation mockConnection = Mockito - .mock(ConnectionManager.HConnectionImplementation.class); + ConnectionImplementation mockConnection = Mockito + .mock(ConnectionImplementation.class); Configuration conf = HBaseConfiguration.create(); // setup the conf to match the expected properties conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, numRetries); @@ -119,8 +119,8 @@ public class TestSnapshotFromAdmin { */ @Test public void testValidateSnapshotName() throws Exception { - ConnectionManager.HConnectionImplementation mockConnection = Mockito - .mock(ConnectionManager.HConnectionImplementation.class); + ConnectionImplementation mockConnection = Mockito + .mock(ConnectionImplementation.class); Configuration conf = HBaseConfiguration.create(); Mockito.when(mockConnection.getConfiguration()).thenReturn(conf); Admin admin = new HBaseAdmin(mockConnection); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestLongComparator.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestLongComparator.java index b003d5c755c..3c31c037cc4 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestLongComparator.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestLongComparator.java @@ -17,10 +17,14 @@ */ package org.apache.hadoop.hbase.filter; +import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; import org.junit.Test; +import org.junit.experimental.categories.Category; + import static org.junit.Assert.assertEquals; +@Category(SmallTests.class) public class TestLongComparator { private long values[] = { Long.MIN_VALUE, -10000000000L, -1000000L, 0L, 1000000L, 10000000000L, Long.MAX_VALUE }; diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestEncryptionUtil.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestEncryptionUtil.java index ed6f49b7dc3..b0e3464f4b2 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestEncryptionUtil.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestEncryptionUtil.java @@ -49,7 +49,9 @@ public class TestEncryptionUtil { // generate a test key byte[] keyBytes = new byte[AES.KEY_LENGTH]; new SecureRandom().nextBytes(keyBytes); - Key key = new SecretKeySpec(keyBytes, "AES"); + String algorithm = + conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); + Key key = new SecretKeySpec(keyBytes, algorithm); // wrap the test key byte[] wrappedKeyBytes = EncryptionUtil.wrapKey(conf, "hbase", key); @@ -73,4 +75,49 @@ public class TestEncryptionUtil { } } + @Test + public void testWALKeyWrapping() throws Exception { + // set up the key provider for testing to resolve a key for our test subject + Configuration conf = new Configuration(); // we don't need HBaseConfiguration for this + conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, KeyProviderForTesting.class.getName()); + + // generate a test key + byte[] keyBytes = new byte[AES.KEY_LENGTH]; + new SecureRandom().nextBytes(keyBytes); + String algorithm = conf.get(HConstants.CRYPTO_WAL_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); + Key key = new SecretKeySpec(keyBytes, algorithm); + + // wrap the test key + byte[] wrappedKeyBytes = EncryptionUtil.wrapKey(conf, "hbase", key); + assertNotNull(wrappedKeyBytes); + + // unwrap + Key unwrappedKey = EncryptionUtil.unwrapWALKey(conf, "hbase", wrappedKeyBytes); + assertNotNull(unwrappedKey); + // only secretkeyspec supported for now + assertTrue(unwrappedKey instanceof SecretKeySpec); + // did we get back what we wrapped? + assertTrue("Unwrapped key bytes do not match original", + Bytes.equals(keyBytes, unwrappedKey.getEncoded())); + } + + @Test(expected = KeyException.class) + public void testWALKeyWrappingWithIncorrectKey() throws Exception { + // set up the key provider for testing to resolve a key for our test subject + Configuration conf = new Configuration(); // we don't need HBaseConfiguration for this + conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, KeyProviderForTesting.class.getName()); + + // generate a test key + byte[] keyBytes = new byte[AES.KEY_LENGTH]; + new SecureRandom().nextBytes(keyBytes); + String algorithm = conf.get(HConstants.CRYPTO_WAL_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); + Key key = new SecretKeySpec(keyBytes, algorithm); + + // wrap the test key + byte[] wrappedKeyBytes = EncryptionUtil.wrapKey(conf, "hbase", key); + assertNotNull(wrappedKeyBytes); + + // unwrap with an incorrect key + EncryptionUtil.unwrapWALKey(conf, "other", wrappedKeyBytes); + } } diff --git a/hbase-client/src/test/resources/log4j.properties b/hbase-client/src/test/resources/log4j.properties index 6ee91efc3b2..13a95b4a673 100644 --- a/hbase-client/src/test/resources/log4j.properties +++ b/hbase-client/src/test/resources/log4j.properties @@ -63,4 +63,4 @@ log4j.logger.org.apache.hadoop.hbase=DEBUG log4j.org.apache.hadoop.metrics2.impl.MetricsSystemImpl=ERROR log4j.org.apache.hadoop.metrics2.util.MBeans=ERROR # Enable this to get detailed connection error/retry logging. -# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=TRACE +# log4j.logger.org.apache.hadoop.hbase.client.ConnectionImplementation=TRACE diff --git a/hbase-common/pom.xml b/hbase-common/pom.xml index c95aaf7243a..2f5e7f2f611 100644 --- a/hbase-common/pom.xml +++ b/hbase-common/pom.xml @@ -41,25 +41,6 @@ - - maven-compiler-plugin - - - default-compile - - ${java.default.compiler} - true - - - - default-testCompile - - ${java.default.compiler} - true - - - - org.apache.maven.plugins maven-site-plugin @@ -165,8 +146,8 @@ - + org.eclipse.m2e lifecycle-mapping @@ -187,6 +168,32 @@ + + + org.apache.maven.plugins + maven-dependency-plugin + [2.8,) + + build-classpath + + + + + + + + + org.apache.maven.plugins + maven-compiler-plugin + [3.2,) + + compile + + + + + + diff --git a/hbase-common/src/main/asciidoc/.gitignore b/hbase-common/src/main/asciidoc/.gitignore deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/AuthUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/AuthUtil.java index f59793530fa..4754ea4c2f5 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/AuthUtil.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/AuthUtil.java @@ -59,10 +59,10 @@ public class AuthUtil { conf.get("hbase.client.dns.nameserver", "default"))); userProvider.login("hbase.client.keytab.file", "hbase.client.kerberos.principal", host); } catch (UnknownHostException e) { - LOG.error("Error resolving host name"); + LOG.error("Error resolving host name: " + e.getMessage(), e); throw e; } catch (IOException e) { - LOG.error("Error while trying to perform the initial login"); + LOG.error("Error while trying to perform the initial login: " + e.getMessage(), e); throw e; } @@ -93,7 +93,7 @@ public class AuthUtil { try { ugi.checkTGTAndReloginFromKeytab(); } catch (IOException e) { - LOG.info("Got exception while trying to refresh credentials "); + LOG.error("Got exception while trying to refresh credentials: " + e.getMessage(), e); } } }; diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java index cbb7ff3162b..540c967546e 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java @@ -244,7 +244,6 @@ public class CellComparator implements Comparator, Serializable { /** * Returns a hash code that is always the same for two Cells having a matching equals(..) result. - * Currently does not guard against nulls, but it could if necessary. */ public static int hashCode(Cell cell){ if (cell == null) {// return 0 for empty Cell @@ -258,8 +257,7 @@ public class CellComparator implements Comparator, Serializable { /** * Returns a hash code that is always the same for two Cells having a matching - * equals(..) result. Currently does not guard against nulls, but it could if - * necessary. Note : Ignore mvcc while calculating the hashcode + * equals(..) result. Note : Ignore mvcc while calculating the hashcode * * @param cell * @return hashCode diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java index fefe626d08e..bce39579d6a 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java @@ -61,6 +61,10 @@ public final class CellUtil { cell.getQualifierLength()); } + public static ByteRange fillValueRange(Cell cell, ByteRange range) { + return range.set(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); + } + public static ByteRange fillTagRange(Cell cell, ByteRange range) { return range.set(cell.getTagsArray(), cell.getTagsOffset(), cell.getTagsLength()); } @@ -564,6 +568,21 @@ public final class CellUtil { return estimatedSerializedSizeOf(cell); } + /** + * This is a hack that should be removed once we don't care about matching + * up client- and server-side estimations of cell size. It needed to be + * backwards compatible with estimations done by older clients. We need to + * pretend that tags never exist and cells aren't serialized with tag + * length included. See HBASE-13262 and HBASE-13303 + */ + @Deprecated + public static long estimatedHeapSizeOfWithoutTags(final Cell cell) { + if (cell instanceof KeyValue) { + return ((KeyValue)cell).heapSizeWithoutTags(); + } + return getSumOfCellKeyElementLengths(cell) + cell.getValueLength(); + } + /********************* tags *************************************/ /** * Util method to iterate through the tags diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/ChoreService.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/ChoreService.java index 5e01c39bbd4..5e2e43ea3e0 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/ChoreService.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/ChoreService.java @@ -131,10 +131,10 @@ public class ChoreService implements ChoreServicer { if (chore == null) return false; try { + chore.setChoreServicer(this); ScheduledFuture future = scheduler.scheduleAtFixedRate(chore, chore.getInitialDelay(), chore.getPeriod(), chore.getTimeUnit()); - chore.setChoreServicer(this); scheduledChores.put(chore, future); return true; } catch (Exception exception) { @@ -161,7 +161,7 @@ public class ChoreService implements ChoreServicer { @Override public synchronized void cancelChore(ScheduledChore chore) { - cancelChore(chore, false); + cancelChore(chore, true); } @Override diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java index 34465819661..1be66a12c59 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java @@ -71,7 +71,7 @@ public class HBaseConfiguration extends Configuration { String thisVersion = VersionInfo.getVersion(); if (!thisVersion.equals(defaultsVersion)) { throw new RuntimeException( - "hbase-default.xml file seems to be for and old version of HBase (" + + "hbase-default.xml file seems to be for an older version of HBase (" + defaultsVersion + "), this version is " + thisVersion); } } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java index 8a07397117d..fc65c474e6f 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -206,6 +206,10 @@ public final class HConstants { public static final String ZOOKEEPER_DATA_DIR = ZK_CFG_PROPERTY_PREFIX + "dataDir"; + /** Parameter name for the ZK tick time */ + public static final String ZOOKEEPER_TICK_TIME = + ZK_CFG_PROPERTY_PREFIX + "tickTime"; + /** Default limit on concurrent client-side zookeeper connections */ public static final int DEFAULT_ZOOKEPER_MAX_CLIENT_CNXNS = 300; @@ -387,7 +391,7 @@ public final class HConstants { /** * The hbase:meta table's name. - * + * */ @Deprecated // for compat from 0.94 -> 0.96. public static final byte[] META_TABLE_NAME = TableName.META_TABLE_NAME.getName(); @@ -608,13 +612,20 @@ public final class HConstants { */ public static final UUID DEFAULT_CLUSTER_ID = new UUID(0L,0L); - /** - * Parameter name for maximum number of bytes returned when calling a - * scanner's next method. - */ + /** + * Parameter name for maximum number of bytes returned when calling a scanner's next method. + * Controlled by the client. + */ public static final String HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY = "hbase.client.scanner.max.result.size"; + /** + * Parameter name for maximum number of bytes returned when calling a scanner's next method. + * Controlled by the server. + */ + public static final String HBASE_SERVER_SCANNER_MAX_RESULT_SIZE_KEY = + "hbase.server.scanner.max.result.size"; + /** * Maximum number of bytes returned when calling a scanner's next method. * Note that when a single row is larger than this limit the row is still @@ -624,6 +635,16 @@ public final class HConstants { */ public static final long DEFAULT_HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE = 2 * 1024 * 1024; + /** + * Maximum number of bytes returned when calling a scanner's next method. + * Note that when a single row is larger than this limit the row is still + * returned completely. + * Safety setting to protect the region server. + * + * The default value is 100MB. (a client would rarely request larger chunks on purpose) + */ + public static final long DEFAULT_HBASE_SERVER_SCANNER_MAX_RESULT_SIZE = 100 * 1024 * 1024; + /** * Parameter name for client pause value, used mostly as value to wait * before running a retry of a failed get, region lookup, etc. @@ -698,7 +719,7 @@ public final class HConstants { /** * Default value for {@link #HBASE_CLIENT_SCANNER_CACHING} */ - public static final int DEFAULT_HBASE_CLIENT_SCANNER_CACHING = 100; + public static final int DEFAULT_HBASE_CLIENT_SCANNER_CACHING = Integer.MAX_VALUE; /** * Parameter name for number of rows that will be fetched when calling next on @@ -939,7 +960,7 @@ public final class HConstants { * NONE: no preference in destination of replicas * ONE_SSD: place only one replica in SSD and the remaining in default storage * and ALL_SSD: place all replica on SSD - * + * * See http://hadoop.apache.org/docs/r2.6.0/hadoop-project-dist/hadoop-hdfs/ArchivalStorage.html*/ public static final String WAL_STORAGE_POLICY = "hbase.wal.storage.policy"; public static final String DEFAULT_WAL_STORAGE_POLICY = "NONE"; @@ -953,7 +974,7 @@ public final class HConstants { * The byte array represents for NO_NEXT_INDEXED_KEY; * The actual value is irrelevant because this is always compared by reference. */ - public static final byte [] NO_NEXT_INDEXED_KEY = Bytes.toBytes("NO_NEXT_INDEXED_KEY"); + public static final Cell NO_NEXT_INDEXED_KEY = new KeyValue(); /** delimiter used between portions of a region name */ public static final int DELIMITER = ','; public static final String HBASE_CONFIG_READ_ZOOKEEPER_CONFIG = @@ -1051,6 +1072,9 @@ public final class HConstants { public static final long NO_NONCE = 0; + /** Default cipher for encryption */ + public static final String CIPHER_AES = "AES"; + /** Configuration key for the crypto algorithm provider, a class name */ public static final String CRYPTO_CIPHERPROVIDER_CONF_KEY = "hbase.crypto.cipherprovider"; @@ -1074,6 +1098,13 @@ public final class HConstants { /** Configuration key for the name of the master WAL encryption key for the cluster, a string */ public static final String CRYPTO_WAL_KEY_NAME_CONF_KEY = "hbase.crypto.wal.key.name"; + /** Configuration key for the algorithm used for creating jks key, a string */ + public static final String CRYPTO_KEY_ALGORITHM_CONF_KEY = "hbase.crypto.key.algorithm"; + + /** Configuration key for the name of the alternate cipher algorithm for the cluster, a string */ + public static final String CRYPTO_ALTERNATE_KEY_ALGORITHM_CONF_KEY = + "hbase.crypto.alternate.key.algorithm"; + /** Configuration key for enabling WAL encryption, a boolean */ public static final String ENABLE_WAL_ENCRYPTION = "hbase.regionserver.wal.encryption"; @@ -1126,7 +1157,7 @@ public final class HConstants { public static final String HBASE_CLIENT_FAST_FAIL_THREASHOLD_MS = "hbase.client.fastfail.threshold"; - + public static final long HBASE_CLIENT_FAST_FAIL_THREASHOLD_MS_DEFAULT = 60000; @@ -1137,7 +1168,7 @@ public final class HConstants { 600000; public static final String HBASE_CLIENT_FAST_FAIL_INTERCEPTOR_IMPL = - "hbase.client.fast.fail.interceptor.impl"; + "hbase.client.fast.fail.interceptor.impl"; /** Config key for if the server should send backpressure and if the client should listen to * that backpressure from the server */ diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java index 8566a8815cd..7de1f54b0fb 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java @@ -47,7 +47,7 @@ import org.apache.hadoop.io.RawComparator; import com.google.common.annotations.VisibleForTesting; /** - * An HBase Key/Value. This is the fundamental HBase Type. + * An HBase Key/Value. This is the fundamental HBase Type. *

* HBase applications and users should use the Cell interface and avoid directly using KeyValue * and member functions not defined in Cell. @@ -268,9 +268,9 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, //// // KeyValue core instance fields. - private byte [] bytes = null; // an immutable byte array that contains the KV - private int offset = 0; // offset into bytes buffer KV starts at - private int length = 0; // length of the KV starting from offset. + protected byte [] bytes = null; // an immutable byte array that contains the KV + protected int offset = 0; // offset into bytes buffer KV starts at + protected int length = 0; // length of the KV starting from offset. /** * @return True if a delete type, a {@link KeyValue.Type#Delete} or @@ -297,6 +297,7 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, return seqId; } + @Override public void setSequenceId(long seqId) { this.seqId = seqId; } @@ -577,7 +578,7 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, this(row, roffset, rlength, family, foffset, flength, qualifier, qoffset, qlength, timestamp, type, value, voffset, vlength, null); } - + /** * Constructs KeyValue structure filled with specified values. Uses the provided buffer as the * data buffer. @@ -742,9 +743,9 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, public KeyValue(Cell c) { this(c.getRowArray(), c.getRowOffset(), (int)c.getRowLength(), - c.getFamilyArray(), c.getFamilyOffset(), (int)c.getFamilyLength(), - c.getQualifierArray(), c.getQualifierOffset(), (int) c.getQualifierLength(), - c.getTimestamp(), Type.codeToType(c.getTypeByte()), c.getValueArray(), c.getValueOffset(), + c.getFamilyArray(), c.getFamilyOffset(), (int)c.getFamilyLength(), + c.getQualifierArray(), c.getQualifierOffset(), (int) c.getQualifierLength(), + c.getTimestamp(), Type.codeToType(c.getTypeByte()), c.getValueArray(), c.getValueOffset(), c.getValueLength(), c.getTagsArray(), c.getTagsOffset(), c.getTagsLength()); this.seqId = c.getSequenceId(); } @@ -955,7 +956,7 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, final int rlength, final byte [] family, final int foffset, int flength, final byte [] qualifier, final int qoffset, int qlength, final long timestamp, final Type type, - final byte [] value, final int voffset, + final byte [] value, final int voffset, int vlength, byte[] tags, int tagsOffset, int tagsLength) { checkParameters(row, rlength, family, flength, qlength, vlength); @@ -1063,15 +1064,12 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, return CellComparator.equals(this, (Cell)other); } + /** + * In line with {@link #equals(Object)}, only uses the key portion, not the value. + */ @Override public int hashCode() { - byte[] b = getBuffer(); - int start = getOffset(), end = getOffset() + getLength(); - int h = b[start++]; - for (int i = start; i < end; i++) { - h = (h * 13) ^ b[i]; - } - return h; + return CellComparator.hashCodeIgnoreMvcc(this); } //--------------------------------------------------------------------------- @@ -1115,6 +1113,7 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, // //--------------------------------------------------------------------------- + @Override public String toString() { if (this.bytes == null || this.bytes.length == 0) { return "empty"; @@ -1125,10 +1124,10 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, /** * @param k Key portion of a KeyValue. - * @return Key as a String, empty string if k is null. + * @return Key as a String, empty string if k is null. */ public static String keyToString(final byte [] k) { - if (k == null) { + if (k == null) { return ""; } return keyToString(k, 0, k.length); @@ -1464,6 +1463,7 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, * save on allocations. * @return Value in a new byte array. */ + @Override @Deprecated // use CellUtil.getValueArray() public byte [] getValue() { return CellUtil.cloneValue(this); @@ -1477,6 +1477,7 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, * lengths instead. * @return Row in a new byte array. */ + @Override @Deprecated // use CellUtil.getRowArray() public byte [] getRow() { return CellUtil.cloneRow(this); @@ -1534,6 +1535,7 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, * lengths instead. * @return Returns family. Makes a copy. */ + @Override @Deprecated // use CellUtil.getFamilyArray public byte [] getFamily() { return CellUtil.cloneFamily(this); @@ -1548,6 +1550,7 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, * Use {@link #getBuffer()} with appropriate offsets and lengths instead. * @return Returns qualifier. Makes a copy. */ + @Override @Deprecated // use CellUtil.getQualifierArray public byte [] getQualifier() { return CellUtil.cloneQualifier(this); @@ -1846,7 +1849,7 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, return compareFlatKey(l,loff,llen, r,roff,rlen); } - + /** * Compares the only the user specified portion of a Key. This is overridden by MetaComparator. * @param left @@ -1890,6 +1893,58 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, return compareFlatKey(left, 0, left.length, right, 0, right.length); } + // compare a key against row/fam/qual/ts/type + public int compareKey(Cell cell, + byte[] row, int roff, int rlen, + byte[] fam, int foff, int flen, + byte[] col, int coff, int clen, + long ts, byte type) { + + int compare = compareRows( + cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(), + row, roff, rlen); + if (compare != 0) { + return compare; + } + // If the column is not specified, the "minimum" key type appears the + // latest in the sorted order, regardless of the timestamp. This is used + // for specifying the last key/value in a given row, because there is no + // "lexicographically last column" (it would be infinitely long). The + // "maximum" key type does not need this behavior. + if (cell.getFamilyLength() + cell.getQualifierLength() == 0 + && cell.getTypeByte() == Type.Minimum.getCode()) { + // left is "bigger", i.e. it appears later in the sorted order + return 1; + } + if (flen+clen == 0 && type == Type.Minimum.getCode()) { + return -1; + } + + compare = compareFamilies( + cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(), + fam, foff, flen); + if (compare != 0) { + return compare; + } + compare = compareColumns( + cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(), + col, coff, clen); + if (compare != 0) { + return compare; + } + // Next compare timestamps. + compare = compareTimestamps(cell.getTimestamp(), ts); + if (compare != 0) { + return compare; + } + + // Compare types. Let the delete types sort ahead of puts; i.e. types + // of higher numbers sort before those of lesser numbers. Maximum (255) + // appears ahead of everything, and minimum (0) appears after + // everything. + return (0xff & type) - (0xff & cell.getTypeByte()); + } + public int compareOnlyKeyPortion(Cell left, Cell right) { return CellComparator.compare(left, right, true); } @@ -2214,7 +2269,8 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, * @param leftKey * @param rightKey * @return 0 if equal, <0 if left smaller, >0 if right smaller - * @deprecated Since 0.99.2; Use {@link CellComparator#getMidpoint(Cell, Cell)} instead. + * @deprecated Since 0.99.2; Use + * {@link CellComparator#getMidpoint(KeyValue.KVComparator, Cell, Cell) instead} */ @Deprecated public byte[] getShortMidpointKey(final byte[] leftKey, final byte[] rightKey) { @@ -2354,7 +2410,7 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, in.readFully(bytes); return new KeyValue(bytes, 0, length); } - + /** * Create a new KeyValue by copying existing cell and adding new tags * @param c @@ -2370,9 +2426,9 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, existingTags = newTags; } return new KeyValue(c.getRowArray(), c.getRowOffset(), (int)c.getRowLength(), - c.getFamilyArray(), c.getFamilyOffset(), (int)c.getFamilyLength(), - c.getQualifierArray(), c.getQualifierOffset(), (int) c.getQualifierLength(), - c.getTimestamp(), Type.codeToType(c.getTypeByte()), c.getValueArray(), c.getValueOffset(), + c.getFamilyArray(), c.getFamilyOffset(), (int)c.getFamilyLength(), + c.getQualifierArray(), c.getQualifierOffset(), (int) c.getQualifierLength(), + c.getTimestamp(), Type.codeToType(c.getTypeByte()), c.getValueArray(), c.getValueOffset(), c.getValueLength(), existingTags); } @@ -2477,6 +2533,7 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, this.comparator = c; } + @Override public int compare(KeyValue left, KeyValue right) { return comparator.compareRows(left, right); } @@ -2485,7 +2542,7 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, /** * Avoids redundant comparisons for better performance. - * + * * TODO get rid of this wart */ public interface SamePrefixComparator { @@ -2508,6 +2565,7 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, * TODO: With V3 consider removing this. * @return legacy class name for FileFileTrailer#comparatorClassName */ + @Override public String getLegacyKeyComparatorName() { return "org.apache.hadoop.hbase.util.Bytes$ByteArrayComparator"; } @@ -2515,6 +2573,7 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, /** * @deprecated Since 0.99.2. */ + @Override @Deprecated public int compareFlatKey(byte[] left, int loffset, int llength, byte[] right, int roffset, int rlength) { @@ -2526,6 +2585,7 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, return compareOnlyKeyPortion(left, right); } + @Override @VisibleForTesting public int compareOnlyKeyPortion(Cell left, Cell right) { int c = Bytes.BYTES_RAWCOMPARATOR.compare(left.getRowArray(), left.getRowOffset(), @@ -2552,6 +2612,7 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, return (0xff & left.getTypeByte()) - (0xff & right.getTypeByte()); } + @Override public byte[] calcIndexKey(byte[] lastKeyOfPreviousBlock, byte[] firstKeyInBlock) { return firstKeyInBlock; } @@ -2576,6 +2637,27 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, return ClassSize.align(sum); } + /** + * This is a hack that should be removed once we don't care about matching + * up client- and server-side estimations of cell size. It needed to be + * backwards compatible with estimations done by older clients. We need to + * pretend that tags never exist and KeyValues aren't serialized with tag + * length included. See HBASE-13262 and HBASE-13303 + */ + @Deprecated + public long heapSizeWithoutTags() { + int sum = 0; + sum += ClassSize.OBJECT;// the KeyValue object itself + sum += ClassSize.REFERENCE;// pointer to "bytes" + sum += ClassSize.align(ClassSize.ARRAY);// "bytes" + sum += KeyValue.KEYVALUE_INFRASTRUCTURE_SIZE; + sum += getKeyLength(); + sum += getValueLength(); + sum += 2 * Bytes.SIZEOF_INT;// offset, length + sum += Bytes.SIZEOF_LONG;// memstoreTS + return ClassSize.align(sum); + } + /** * A simple form of KeyValue that creates a keyvalue with only the key part of the byte[] * Mainly used in places where we need to compare two cells. Avoids copying of bytes @@ -2583,16 +2665,15 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, * Hence create a Keyvalue(aka Cell) that would help in comparing as two cells */ public static class KeyOnlyKeyValue extends KeyValue { - private int length = 0; - private int offset = 0; - private byte[] b; - public KeyOnlyKeyValue() { } + public KeyOnlyKeyValue(byte[] b) { + this(b, 0, b.length); + } public KeyOnlyKeyValue(byte[] b, int offset, int length) { - this.b = b; + this.bytes = b; this.length = length; this.offset = offset; } @@ -2610,7 +2691,7 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, * @param length */ public void setKey(byte[] key, int offset, int length) { - this.b = key; + this.bytes = key; this.offset = offset; this.length = length; } @@ -2619,13 +2700,13 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, public byte[] getKey() { int keylength = getKeyLength(); byte[] key = new byte[keylength]; - System.arraycopy(this.b, getKeyOffset(), key, 0, keylength); + System.arraycopy(this.bytes, getKeyOffset(), key, 0, keylength); return key; } @Override public byte[] getRowArray() { - return b; + return bytes; } @Override @@ -2635,12 +2716,12 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, @Override public byte[] getFamilyArray() { - return b; + return bytes; } @Override public byte getFamilyLength() { - return this.b[getFamilyOffset() - 1]; + return this.bytes[getFamilyOffset() - 1]; } @Override @@ -2650,7 +2731,7 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, @Override public byte[] getQualifierArray() { - return b; + return bytes; } @Override @@ -2670,12 +2751,12 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, @Override public short getRowLength() { - return Bytes.toShort(this.b, getKeyOffset()); + return Bytes.toShort(this.bytes, getKeyOffset()); } @Override public byte getTypeByte() { - return this.b[this.offset + getKeyLength() - 1]; + return this.bytes[this.offset + getKeyLength() - 1]; } private int getQualifierLength(int rlength, int flength) { @@ -2685,7 +2766,7 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, @Override public long getTimestamp() { int tsOffset = getTimestampOffset(); - return Bytes.toLong(this.b, tsOffset); + return Bytes.toLong(this.bytes, tsOffset); } @Override @@ -2725,10 +2806,10 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, @Override public String toString() { - if (this.b == null || this.b.length == 0) { + if (this.bytes == null || this.bytes.length == 0) { return "empty"; } - return keyToString(this.b, this.offset, getKeyLength()) + "/vlen=0/mvcc=0"; + return keyToString(this.bytes, this.offset, getKeyLength()) + "/vlen=0/mvcc=0"; } @Override diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java index dde15bca82e..7cbfdd60d75 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java @@ -574,7 +574,7 @@ public class KeyValueUtil { // write value out.write(cell.getValueArray(), cell.getValueOffset(), vlen); // write tags if we have to - if (withTags) { + if (withTags && tlen > 0) { // 2 bytes tags length followed by tags bytes // tags length is serialized with 2 bytes only(short way) even if the type is int. As this // is non -ve numbers, we save the sign bit. See HBASE-11437 diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/ScheduledChore.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/ScheduledChore.java index ccedcc76bce..16d080b4692 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/ScheduledChore.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/ScheduledChore.java @@ -164,26 +164,19 @@ public abstract class ScheduledChore implements Runnable { this.timeUnit = unit; } - synchronized void resetState() { - timeOfLastRun = -1; - timeOfThisRun = -1; - initialChoreComplete = false; - } - /** * @see java.lang.Thread#run() */ @Override - public synchronized void run() { - timeOfLastRun = timeOfThisRun; - timeOfThisRun = System.currentTimeMillis(); + public void run() { + updateTimeTrackingBeforeRun(); if (missedStartTime() && isScheduled()) { - choreServicer.onChoreMissedStartTime(this); + onChoreMissedStartTime(); if (LOG.isInfoEnabled()) LOG.info("Chore: " + getName() + " missed its start time"); - } else if (stopper.isStopped() || choreServicer == null || !isScheduled()) { - cancel(); + } else if (stopper.isStopped() || !isScheduled()) { + cancel(false); cleanup(); - LOG.info("Chore: " + getName() + " was stopped"); + if (LOG.isInfoEnabled()) LOG.info("Chore: " + getName() + " was stopped"); } else { try { if (!initialChoreComplete) { @@ -192,15 +185,33 @@ public abstract class ScheduledChore implements Runnable { chore(); } } catch (Throwable t) { - LOG.error("Caught error", t); + if (LOG.isErrorEnabled()) LOG.error("Caught error", t); if (this.stopper.isStopped()) { - cancel(); + cancel(false); cleanup(); } } } } + /** + * Update our time tracking members. Called at the start of an execution of this chore's run() + * method so that a correct decision can be made as to whether or not we missed the start time + */ + private synchronized void updateTimeTrackingBeforeRun() { + timeOfLastRun = timeOfThisRun; + timeOfThisRun = System.currentTimeMillis(); + } + + /** + * Notify the ChoreService that this chore has missed its start time. Allows the ChoreService to + * make the decision as to whether or not it would be worthwhile to increase the number of core + * pool threads + */ + private synchronized void onChoreMissedStartTime() { + if (choreServicer != null) choreServicer.onChoreMissedStartTime(this); + } + /** * @return How long has it been since this chore last run. Useful for checking if the chore has * missed its scheduled start time by too large of a margin @@ -248,7 +259,7 @@ public abstract class ScheduledChore implements Runnable { } public synchronized void cancel() { - cancel(false); + cancel(true); } public synchronized void cancel(boolean mayInterruptIfRunning) { @@ -317,7 +328,7 @@ public abstract class ScheduledChore implements Runnable { * Override to run a task before we start looping. * @return true if initial chore was successful */ - protected synchronized boolean initialChore() { + protected boolean initialChore() { // Default does nothing return true; } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/Stoppable.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/Stoppable.java index b87b7649d34..fc83ba355d2 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/Stoppable.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/Stoppable.java @@ -19,11 +19,13 @@ package org.apache.hadoop.hbase; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; /** * Implementers are Stoppable. */ -@InterfaceAudience.Private +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) +@InterfaceStability.Evolving public interface Stoppable { /** * Stop this service. diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java index c560a4352e6..17fd3b7e68b 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java @@ -180,10 +180,11 @@ public final class TableName implements Comparable { } if (qualifierName[start] == '.' || qualifierName[start] == '-') { - throw new IllegalArgumentException("Illegal first character <" + qualifierName[0] + - "> at 0. Namespaces can only start with alphanumeric " + + throw new IllegalArgumentException("Illegal first character <" + qualifierName[start] + + "> at 0. " + (isSnapshot ? "Snapshot" : "User-space table") + + " qualifiers can only start with 'alphanumeric " + "characters': i.e. [a-zA-Z_0-9]: " + - Bytes.toString(qualifierName)); + Bytes.toString(qualifierName, start, end)); } for (int i = start; i < end; i++) { if (Character.isLetterOrDigit(qualifierName[i]) || @@ -194,7 +195,7 @@ public final class TableName implements Comparable { } throw new IllegalArgumentException("Illegal character code:" + qualifierName[i] + ", <" + (char) qualifierName[i] + "> at " + i + - ". " + (isSnapshot ? "snapshot" : "User-space table") + + ". " + (isSnapshot ? "Snapshot" : "User-space table") + " qualifiers can only contain " + "'alphanumeric characters': i.e. [a-zA-Z_0-9-.]: " + Bytes.toString(qualifierName, start, end)); @@ -207,21 +208,21 @@ public final class TableName implements Comparable { /** * Valid namespace characters are [a-zA-Z_0-9] */ - public static void isLegalNamespaceName(byte[] namespaceName, int offset, int length) { - for (int i = offset; i < length; i++) { + public static void isLegalNamespaceName(final byte[] namespaceName, + final int start, + final int end) { + if(end - start < 1) { + throw new IllegalArgumentException("Namespace name must not be empty"); + } + for (int i = start; i < end; i++) { if (Character.isLetterOrDigit(namespaceName[i])|| namespaceName[i] == '_') { continue; } throw new IllegalArgumentException("Illegal character <" + namespaceName[i] + "> at " + i + ". Namespaces can only contain " + "'alphanumeric characters': i.e. [a-zA-Z_0-9]: " + Bytes.toString(namespaceName, - offset, length)); + start, end)); } - if(offset == length) - throw new IllegalArgumentException("Illegal character <" + namespaceName[offset] + - "> at " + offset + ". Namespaces can only contain " + - "'alphanumeric characters': i.e. [a-zA-Z_0-9]: " + Bytes.toString(namespaceName, - offset, length)); } public byte[] getName() { @@ -240,6 +241,19 @@ public final class TableName implements Comparable { return namespaceAsString; } + /** + * Ideally, getNameAsString should contain namespace within it, + * but if the namespace is default, it just returns the name. This method + * takes care of this corner case. + */ + public String getNameWithNamespaceInclAsString() { + if(getNamespaceAsString().equals(NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR)) { + return NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR + + TableName.NAMESPACE_DELIM + getNameAsString(); + } + return getNameAsString(); + } + public byte[] getQualifier() { return qualifier; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionStateListener.java~HEAD b/hbase-common/src/main/java/org/apache/hadoop/hbase/exceptions/TimeoutIOException.java similarity index 51% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionStateListener.java~HEAD rename to hbase-common/src/main/java/org/apache/hadoop/hbase/exceptions/TimeoutIOException.java index 6b954ac3708..4e1ee39d9f0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionStateListener.java~HEAD +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/exceptions/TimeoutIOException.java @@ -15,40 +15,32 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hbase.quotas; + +package org.apache.hadoop.hbase.exceptions; import java.io.IOException; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.classification.InterfaceAudience; /** - * The listener interface for receiving region state events. + * Exception thrown when a blocking operation times out. */ +@SuppressWarnings("serial") @InterfaceAudience.Private -public interface RegionStateListener { +public class TimeoutIOException extends IOException { + public TimeoutIOException() { + super(); + } - /** - * Process region split event. - * - * @param hri An instance of HRegionInfo - * @throws IOException - */ - void onRegionSplit(HRegionInfo hri) throws IOException; + public TimeoutIOException(final String message) { + super(message); + } - /** - * Process region split reverted event. - * - * @param hri An instance of HRegionInfo - * @throws IOException Signals that an I/O exception has occurred. - */ - void onRegionSplitReverted(HRegionInfo hri) throws IOException; + public TimeoutIOException(final String message, final Throwable t) { + super(message, t); + } - /** - * Process region merge event. - * - * @param hri An instance of HRegionInfo - * @throws IOException - */ - void onRegionMerged(HRegionInfo hri) throws IOException; + public TimeoutIOException(final Throwable t) { + super(t); + } } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/BoundedByteBufferPool.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/BoundedByteBufferPool.java new file mode 100644 index 00000000000..bff0c7734c5 --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/BoundedByteBufferPool.java @@ -0,0 +1,113 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.io; + +import java.nio.ByteBuffer; +import java.util.Queue; +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.atomic.AtomicLong; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.classification.InterfaceAudience; + +import com.google.common.annotations.VisibleForTesting; + +/** + * Like Hadoops' ByteBufferPool only you do not specify desired size when getting a ByteBuffer. + * This pool keeps an upper bound on the count of ByteBuffers in the pool and on the maximum size + * of ByteBuffer that it will retain (Hence the pool is 'bounded' as opposed to, say, + * Hadoop's ElasticByteBuffferPool). + * If a ByteBuffer is bigger than the configured threshold, we will just let the ByteBuffer go + * rather than add it to the pool. If more ByteBuffers than the configured maximum instances, + * we will not add the passed ByteBuffer to the pool; we will just drop it + * (we will log a WARN in this case that we are at capacity). + * + *

The intended use case is a reservoir of bytebuffers that an RPC can reuse; buffers tend to + * achieve a particular 'run' size over time give or take a few extremes. Set TRACE level on this + * class for a couple of seconds to get reporting on how it is running when deployed. + * + *

This class is thread safe. + */ +@InterfaceAudience.Private +public class BoundedByteBufferPool { + private final Log LOG = LogFactory.getLog(this.getClass()); + + @VisibleForTesting + final Queue buffers; + + // Maximum size of a ByteBuffer to retain in pool + private final int maxByteBufferSizeToCache; + + // A running average only it only rises, it never recedes + private volatile int runningAverage; + + // Scratch that keeps rough total size of pooled bytebuffers + private volatile int totalReservoirCapacity; + + // For reporting + private AtomicLong allocations = new AtomicLong(0); + + /** + * @param maxByteBufferSizeToCache + * @param initialByteBufferSize + * @param maxToCache + */ + public BoundedByteBufferPool(final int maxByteBufferSizeToCache, final int initialByteBufferSize, + final int maxToCache) { + this.maxByteBufferSizeToCache = maxByteBufferSizeToCache; + this.runningAverage = initialByteBufferSize; + this.buffers = new ArrayBlockingQueue(maxToCache, true); + } + + public ByteBuffer getBuffer() { + ByteBuffer bb = this.buffers.poll(); + if (bb != null) { + // Clear sets limit == capacity. Postion == 0. + bb.clear(); + this.totalReservoirCapacity -= bb.capacity(); + } else { + bb = ByteBuffer.allocate(this.runningAverage); + this.allocations.incrementAndGet(); + } + if (LOG.isTraceEnabled()) { + LOG.trace("runningAverage=" + this.runningAverage + + ", totalCapacity=" + this.totalReservoirCapacity + ", count=" + this.buffers.size() + + ", alloctions=" + this.allocations.get()); + } + return bb; + } + + public void putBuffer(ByteBuffer bb) { + // If buffer is larger than we want to keep around, just let it go. + if (bb.capacity() > this.maxByteBufferSizeToCache) return; + if (!this.buffers.offer(bb)) { + LOG.warn("At capacity: " + this.buffers.size()); + } else { + int size = this.buffers.size(); // This size may be inexact. + this.totalReservoirCapacity += bb.capacity(); + int average = 0; + if (size != 0) { + average = this.totalReservoirCapacity / size; + } + if (average > this.runningAverage && average < this.maxByteBufferSizeToCache) { + this.runningAverage = average; + } + } + } +} \ No newline at end of file diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferOutputStream.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferOutputStream.java index 257b850572d..af12113137f 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferOutputStream.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferOutputStream.java @@ -43,17 +43,32 @@ public class ByteBufferOutputStream extends OutputStream { } public ByteBufferOutputStream(int capacity, boolean useDirectByteBuffer) { - if (useDirectByteBuffer) { - buf = ByteBuffer.allocateDirect(capacity); - } else { - buf = ByteBuffer.allocate(capacity); - } + this(allocate(capacity, useDirectByteBuffer)); + } + + /** + * @param bb ByteBuffer to use. If too small, will be discarded and a new one allocated in its + * place; i.e. the passed in BB may NOT BE RETURNED!! Minimally it will be altered. SIDE EFFECT!! + * If you want to get the newly allocated ByteBuffer, you'll need to pick it up when + * done with this instance by calling {@link #getByteBuffer()}. All this encapsulation violation + * is so we can recycle buffers rather than allocate each time; it can get expensive especially + * if the buffers are big doing allocations each time or having them undergo resizing because + * initial allocation was small. + * @see #getByteBuffer() + */ + public ByteBufferOutputStream(final ByteBuffer bb) { + this.buf = bb; + this.buf.clear(); } public int size() { return buf.position(); } + private static ByteBuffer allocate(final int capacity, final boolean useDirectByteBuffer) { + return useDirectByteBuffer? ByteBuffer.allocateDirect(capacity): ByteBuffer.allocate(capacity); + } + /** * This flips the underlying BB so be sure to use it _last_! * @return ByteBuffer @@ -70,12 +85,7 @@ public class ByteBufferOutputStream extends OutputStream { int newSize = (int)Math.min((((long)buf.capacity()) * 2), (long)(Integer.MAX_VALUE)); newSize = Math.max(newSize, buf.position() + extra); - ByteBuffer newBuf = null; - if (buf.isDirect()) { - newBuf = ByteBuffer.allocateDirect(newSize); - } else { - newBuf = ByteBuffer.allocate(newSize); - } + ByteBuffer newBuf = allocate(newSize, buf.isDirect()); buf.flip(); newBuf.put(buf); buf = newBuf; diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java index 8c16389aaf0..8352e4eff8f 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java @@ -166,6 +166,7 @@ public class TimeRange { * 1 if timestamp is greater than timerange */ public int compare(long timestamp) { + if (allTime) return 0; if (timestamp < minStamp) { return -1; } else if (timestamp >= maxStamp) { diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java index 3420d0a628f..ad89ca054c7 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/Encryption.java @@ -469,9 +469,8 @@ public final class Encryption { * @param iv the initialization vector, can be null * @throws IOException */ - public static void decryptWithSubjectKey(OutputStream out, InputStream in, - int outLen, String subject, Configuration conf, Cipher cipher, - byte[] iv) throws IOException { + public static void decryptWithSubjectKey(OutputStream out, InputStream in, int outLen, + String subject, Configuration conf, Cipher cipher, byte[] iv) throws IOException { Key key = getSecretKeyForSubject(subject, conf); if (key == null) { throw new IOException("No key found for subject '" + subject + "'"); @@ -479,7 +478,31 @@ public final class Encryption { Decryptor d = cipher.getDecryptor(); d.setKey(key); d.setIv(iv); // can be null - decrypt(out, in, outLen, d); + try { + decrypt(out, in, outLen, d); + } catch (IOException e) { + // If the current cipher algorithm fails to unwrap, try the alternate cipher algorithm, if one + // is configured + String alternateAlgorithm = conf.get(HConstants.CRYPTO_ALTERNATE_KEY_ALGORITHM_CONF_KEY); + if (alternateAlgorithm != null) { + if (LOG.isDebugEnabled()) { + LOG.debug("Unable to decrypt data with current cipher algorithm '" + + conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES) + + "'. Trying with the alternate cipher algorithm '" + alternateAlgorithm + + "' configured."); + } + Cipher alterCipher = Encryption.getCipher(conf, alternateAlgorithm); + if (alterCipher == null) { + throw new RuntimeException("Cipher '" + alternateAlgorithm + "' not available"); + } + d = alterCipher.getDecryptor(); + d.setKey(key); + d.setIv(iv); // can be null + decrypt(out, in, outLen, d); + } else { + throw new IOException(e); + } + } } private static ClassLoader getClassLoaderForClass(Class c) { diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java index be8c1927739..5b049fdb66b 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java @@ -676,11 +676,6 @@ abstract class BufferedDataBlockEncoder implements DataBlockEncoder { } } - @Override - public int seekToKeyInBlock(byte[] key, int offset, int length, boolean seekBefore) { - return seekToKeyInBlock(new KeyValue.KeyOnlyKeyValue(key, offset, length), seekBefore); - } - @Override public int seekToKeyInBlock(Cell seekCell, boolean seekBefore) { int rowCommonPrefix = 0; diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java index 872c22c5490..8073e548a2c 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java @@ -172,27 +172,6 @@ public interface DataBlockEncoder { */ boolean next(); - /** - * Moves the seeker position within the current block to: - *

    - *
  • the last key that that is less than or equal to the given key if - * seekBefore is false
  • - *
  • the last key that is strictly less than the given key if - * seekBefore is true. The caller is responsible for loading the - * previous block if the requested key turns out to be the first key of the - * current block.
  • - *
- * @param key byte array containing the key - * @param offset key position the array - * @param length key length in bytes - * @param seekBefore find the key strictly less than the given key in case - * of an exact match. Does not matter in case of an inexact match. - * @return 0 on exact match, 1 on inexact match. - */ - @Deprecated - int seekToKeyInBlock( - byte[] key, int offset, int length, boolean seekBefore - ); /** * Moves the seeker position within the current block to: *
    diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/StreamUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/StreamUtils.java index 314ed2b3aa8..0b442a58fc0 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/StreamUtils.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/StreamUtils.java @@ -120,7 +120,7 @@ public class StreamUtils { /** * Reads a varInt value stored in an array. - * + * * @param input * Input array where the varInt is available * @param offset @@ -198,4 +198,14 @@ public class StreamUtils { out.write((byte) (0xff & (v >> 8))); out.write((byte) (0xff & v)); } + + public static long readLong(InputStream in) throws IOException { + long result = 0; + for (int shift = 56; shift >= 0; shift -= 8) { + long x = in.read(); + if (x < 0) throw new IOException("EOF"); + result |= (x << shift); + } + return result; + } } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/security/User.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/security/User.java index a5ac51a78dc..58a3c665f36 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/security/User.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/security/User.java @@ -52,6 +52,8 @@ import org.apache.hadoop.security.token.TokenIdentifier; public abstract class User { public static final String HBASE_SECURITY_CONF_KEY = "hbase.security.authentication"; + public static final String HBASE_SECURITY_AUTHORIZATION_CONF_KEY = + "hbase.security.authorization"; protected UserGroupInformation ugi; diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java index bec35ee7680..8096178a920 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java @@ -1364,14 +1364,26 @@ public class Bytes implements Comparable { } /** - * Reads a zero-compressed encoded long from input stream and returns it. + * Reads a zero-compressed encoded long from input buffer and returns it. * @param buffer Binary array * @param offset Offset into array at which vint begins. * @throws java.io.IOException e - * @return deserialized long from stream. + * @return deserialized long from buffer. + * @deprecated Use {@link #readAsVLong()} instead. */ + @Deprecated public static long readVLong(final byte [] buffer, final int offset) throws IOException { + return readAsVLong(buffer, offset); + } + + /** + * Reads a zero-compressed encoded long from input buffer and returns it. + * @param buffer Binary array + * @param offset Offset into array at which vint begins. + * @return deserialized long from buffer. + */ + public static long readAsVLong(final byte [] buffer, final int offset) { byte firstByte = buffer[offset]; int len = WritableUtils.decodeVIntSize(firstByte); if (len == 1) { diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ForeignExceptionUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ForeignExceptionUtil.java new file mode 100644 index 00000000000..a0006eddcfe --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ForeignExceptionUtil.java @@ -0,0 +1,109 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.util; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage; +import org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.GenericExceptionMessage; +import org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.StackTraceElementMessage; + +/** + * Helper to convert Exceptions and StackTraces from/to protobuf. + * (see ErrorHandling.proto for the internal of the proto messages) + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public final class ForeignExceptionUtil { + private ForeignExceptionUtil() { } + + public static IOException toIOException(final ForeignExceptionMessage eem) { + GenericExceptionMessage gem = eem.getGenericException(); + StackTraceElement[] trace = toStackTrace(gem.getTraceList()); + RemoteException re = new RemoteException(gem.getClassName(), gem.getMessage()); + re.setStackTrace(trace); + return re.unwrapRemoteException(); + } + + public static ForeignExceptionMessage toProtoForeignException(String source, Throwable t) { + GenericExceptionMessage.Builder gemBuilder = GenericExceptionMessage.newBuilder(); + gemBuilder.setClassName(t.getClass().getName()); + if (t.getMessage() != null) { + gemBuilder.setMessage(t.getMessage()); + } + // set the stack trace, if there is one + List stack = toProtoStackTraceElement(t.getStackTrace()); + if (stack != null) { + gemBuilder.addAllTrace(stack); + } + GenericExceptionMessage payload = gemBuilder.build(); + ForeignExceptionMessage.Builder exception = ForeignExceptionMessage.newBuilder(); + exception.setGenericException(payload).setSource(source); + return exception.build(); + } + + /** + * Convert a stack trace to list of {@link StackTraceElement}. + * @param trace the stack trace to convert to protobuf message + * @return null if the passed stack is null. + */ + public static List toProtoStackTraceElement(StackTraceElement[] trace) { + // if there is no stack trace, ignore it and just return the message + if (trace == null) return null; + // build the stack trace for the message + List pbTrace = new ArrayList(trace.length); + for (StackTraceElement elem : trace) { + StackTraceElementMessage.Builder stackBuilder = StackTraceElementMessage.newBuilder(); + stackBuilder.setDeclaringClass(elem.getClassName()); + if (elem.getFileName() != null) { + stackBuilder.setFileName(elem.getFileName()); + } + stackBuilder.setLineNumber(elem.getLineNumber()); + stackBuilder.setMethodName(elem.getMethodName()); + pbTrace.add(stackBuilder.build()); + } + return pbTrace; + } + + /** + * Unwind a serialized array of {@link StackTraceElementMessage}s to a + * {@link StackTraceElement}s. + * @param traceList list that was serialized + * @return the deserialized list or null if it couldn't be unwound (e.g. wasn't set on + * the sender). + */ + public static StackTraceElement[] toStackTrace(List traceList) { + if (traceList == null || traceList.size() == 0) { + return new StackTraceElement[0]; // empty array + } + StackTraceElement[] trace = new StackTraceElement[traceList.size()]; + for (int i = 0; i < traceList.size(); i++) { + StackTraceElementMessage elem = traceList.get(i); + trace[i] = new StackTraceElement( + elem.getDeclaringClass(), elem.getMethodName(), + elem.hasFileName() ? elem.getFileName() : null, + elem.getLineNumber()); + } + return trace; + } +} \ No newline at end of file diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Hash.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Hash.java index 82cf5c42553..34d9f9007b9 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Hash.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Hash.java @@ -26,7 +26,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability; /** * This class represents a common API for hashing functions. */ -@InterfaceAudience.Public +@InterfaceAudience.Private @InterfaceStability.Stable public abstract class Hash { /** Constant to denote invalid hash type. */ diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/JenkinsHash.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/JenkinsHash.java index 359e7a90c2a..8ee214d398f 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/JenkinsHash.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/JenkinsHash.java @@ -42,7 +42,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability; * @see Has update on the * Dr. Dobbs Article */ -@InterfaceAudience.Public +@InterfaceAudience.Private @InterfaceStability.Stable public class JenkinsHash extends Hash { private static final int BYTE_MASK = 0xff; diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/MurmurHash.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/MurmurHash.java index 3663d419747..5c27386af19 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/MurmurHash.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/MurmurHash.java @@ -29,7 +29,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability; *

    The C version of MurmurHash 2.0 found at that site was ported * to Java by Andrzej Bialecki (ab at getopt org).

    */ -@InterfaceAudience.Public +@InterfaceAudience.Private @InterfaceStability.Stable public class MurmurHash extends Hash { private static MurmurHash _instance = new MurmurHash(); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/MurmurHash3.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/MurmurHash3.java index 89014dbdd00..78d1331fc61 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/MurmurHash3.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/MurmurHash3.java @@ -28,7 +28,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability; *

    MurmurHash3 is the successor to MurmurHash2. It comes in 3 variants, and * the 32-bit version targets low latency for hash table use.

    */ -@InterfaceAudience.Public +@InterfaceAudience.Private @InterfaceStability.Stable public class MurmurHash3 extends Hash { private static MurmurHash3 _instance = new MurmurHash3(); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java index 7f3838f435e..678cc7bf0c1 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/PrettyPrinter.java @@ -36,7 +36,7 @@ public class PrettyPrinter { StringBuilder human = new StringBuilder(); switch (unit) { case TIME_INTERVAL: - human.append(humanReadableTTL(Long.valueOf(value))); + human.append(humanReadableTTL(Long.parseLong(value))); break; case LONG: byte[] longBytes = Bytes.toBytesBinary(value); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/RetryCounter.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/RetryCounter.java index c88cae335f6..73512fa21bb 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/RetryCounter.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/RetryCounter.java @@ -152,7 +152,9 @@ public class RetryCounter { public void sleepUntilNextRetry() throws InterruptedException { int attempts = getAttemptTimes(); long sleepTime = retryConfig.backoffPolicy.getBackoffTime(retryConfig, attempts); - LOG.info("Sleeping " + sleepTime + "ms before retry #" + attempts + "..."); + if (LOG.isTraceEnabled()) { + LOG.trace("Sleeping " + sleepTime + "ms before retry #" + attempts + "..."); + } retryConfig.getTimeUnit().sleep(sleepTime); useRetry(); } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Threads.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Threads.java index 81178c4e6bc..51a506bc2d7 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Threads.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Threads.java @@ -35,6 +35,8 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.StringUtils; +import com.google.common.base.Preconditions; + /** * Thread Utility */ @@ -266,8 +268,66 @@ public class Threads { t.setUncaughtExceptionHandler(LOGGING_EXCEPTION_HANDLER); } - private static Method printThreadInfoMethod = null; - private static boolean printThreadInfoMethodWithPrintStream = true; + private static interface PrintThreadInfoHelper { + + void printThreadInfo(PrintStream stream, String title); + + } + + private static class PrintThreadInfoLazyHolder { + + public static final PrintThreadInfoHelper HELPER = initHelper(); + + private static PrintThreadInfoHelper initHelper() { + Method method = null; + try { + // Hadoop 2.7+ declares printThreadInfo(PrintStream, String) + method = ReflectionUtils.class.getMethod("printThreadInfo", PrintStream.class, + String.class); + method.setAccessible(true); + final Method hadoop27Method = method; + return new PrintThreadInfoHelper() { + + @Override + public void printThreadInfo(PrintStream stream, String title) { + try { + hadoop27Method.invoke(null, stream, title); + } catch (IllegalAccessException | IllegalArgumentException e) { + throw new RuntimeException(e); + } catch (InvocationTargetException e) { + throw new RuntimeException(e.getCause()); + } + } + }; + } catch (NoSuchMethodException e) { + LOG.info( + "Can not find hadoop 2.7+ printThreadInfo method, try hadoop hadoop 2.6 and earlier", e); + } + try { + // Hadoop 2.6 and earlier declares printThreadInfo(PrintWriter, String) + method = ReflectionUtils.class.getMethod("printThreadInfo", PrintWriter.class, + String.class); + method.setAccessible(true); + final Method hadoop26Method = method; + return new PrintThreadInfoHelper() { + + @Override + public void printThreadInfo(PrintStream stream, String title) { + try { + hadoop26Method.invoke(null, new PrintWriter(stream), title); + } catch (IllegalAccessException | IllegalArgumentException e) { + throw new RuntimeException(e); + } catch (InvocationTargetException e) { + throw new RuntimeException(e.getCause()); + } + } + }; + } catch (NoSuchMethodException e) { + LOG.warn("Cannot find printThreadInfo method. Check hadoop jars linked", e); + } + return null; + } + } /** * Print all of the thread's information and stack traces. Wrapper around Hadoop's method. @@ -276,33 +336,7 @@ public class Threads { * @param title a string title for the stack trace */ public static void printThreadInfo(PrintStream stream, String title) { - - if (printThreadInfoMethod == null) { - try { - // Hadoop 2.7+ declares printThreadInfo(PrintStream, String) - printThreadInfoMethod = ReflectionUtils.class.getMethod("printThreadInfo", - PrintStream.class, String.class); - } catch (NoSuchMethodException e) { - // Hadoop 2.6 and earlier declares printThreadInfo(PrintWriter, String) - printThreadInfoMethodWithPrintStream = false; - try { - printThreadInfoMethod = ReflectionUtils.class.getMethod("printThreadInfo", - PrintWriter.class, String.class); - } catch (NoSuchMethodException e1) { - throw new RuntimeException("Cannot find method. Check hadoop jars linked", e1); - } - } - printThreadInfoMethod.setAccessible(true); - } - - try { - if (printThreadInfoMethodWithPrintStream) { - printThreadInfoMethod.invoke(null, stream, title); - } else { - printThreadInfoMethod.invoke(null, new PrintWriter(stream), title); - } - } catch (IllegalAccessException | IllegalArgumentException | InvocationTargetException e) { - throw new RuntimeException(e.getCause()); - } + Preconditions.checkNotNull(PrintThreadInfoLazyHolder.HELPER, + "Cannot find method. Check hadoop jars linked").printThreadInfo(stream, title); } } diff --git a/hbase-common/src/main/resources/hbase-default.xml b/hbase-common/src/main/resources/hbase-default.xml index 52b373e1c9f..9931b2d2cd5 100644 --- a/hbase-common/src/main/resources/hbase-default.xml +++ b/hbase-common/src/main/resources/hbase-default.xml @@ -96,6 +96,11 @@ possible configurations would overwhelm and obscure the important. + + hbase.master.port + 16000 + The port the HBase Master should bind to. + hbase.master.info.port 16010 @@ -239,14 +244,6 @@ possible configurations would overwhelm and obscure the important. Interval between messages from the RegionServer to Master in milliseconds. - - hbase.regionserver.regionSplitLimit - 2147483647 - Limit for the number of regions after which no more region - splitting should take place. This is not a hard limit for the number of - regions but acts as a guideline for the regionserver to stop splitting after - a certain limit. Default is MAX_INT; i.e. do not block splitting. - hbase.regionserver.logroll.period 3600000 @@ -333,6 +330,15 @@ possible configurations would overwhelm and obscure the important. DelimitedKeyPrefixRegionSplitPolicy, KeyPrefixRegionSplitPolicy etc. + + hbase.regionserver.regionSplitLimit + 1000 + + Limit for the number of regions after which no more region splitting should take place. + This is not hard limit for the number of regions but acts as a guideline for the regionserver + to stop splitting after a certain limit. Default is set to 1000. + + @@ -518,13 +524,19 @@ possible configurations would overwhelm and obscure the important. hbase.client.scanner.caching - 100 - Number of rows that will be fetched when calling next - on a scanner if it is not served from (local, client) memory. Higher - caching values will enable faster scanners but will eat up more memory - and some calls of next may take longer and longer times when the cache is empty. - Do not set this value such that the time between invocations is greater - than the scanner timeout; i.e. hbase.client.scanner.timeout.period + 2147483647 + Number of rows that we try to fetch when calling next + on a scanner if it is not served from (local, client) memory. This configuration + works together with hbase.client.scanner.max.result.size to try and use the + network efficiently. The default value is Integer.MAX_VALUE by default so that + the network will fill the chunk size defined by hbase.client.scanner.max.result.size + rather than be limited by a particular number of rows since the size of rows varies + table to table. If you know ahead of time that you will not require more than a certain + number of rows from a scan, this configuration should be set to that row limit via + Scan#setCaching. Higher caching values will enable faster scanners but will eat up more + memory and some calls of next may take longer and longer times when the cache is empty. + Do not set this value such that the time between invocations is greater than the scanner + timeout; i.e. hbase.client.scanner.timeout.period hbase.client.keyvalue.maxsize @@ -906,6 +918,13 @@ possible configurations would overwhelm and obscure the important. Set no delay on rpc socket connections. See http://docs.oracle.com/javase/1.5.0/docs/api/java/net/Socket.html#getTcpNoDelay() + + hbase.regionserver.hostname + + This config is for experts: don't set its value unless you really know what you are doing. + When set to a non-empty value, this represents the (external facing) hostname for the underlying server. + See https://issues.apache.org/jira/browse/HBASE-12954 for details. + @@ -984,6 +1003,22 @@ possible configurations would overwhelm and obscure the important. as part of the table details, region names, etc. When this is set to false, the keys are hidden. + + hbase.coprocessor.enabled + true + Enables or disables coprocessor loading. If 'false' + (disabled), any other coprocessor related configuration will be ignored. + + + + hbase.coprocessor.user.enabled + true + Enables or disables user (aka. table) coprocessor loading. + If 'false' (disabled), any table coprocessor attributes in table + descriptors will be ignored. If "hbase.coprocessor.enabled" is 'false' + this setting has no effect. + + hbase.coprocessor.region.classes @@ -1128,7 +1163,7 @@ possible configurations would overwhelm and obscure the important. hbase.regionserver.thrift.framed.max_frame_size_in_mb 2 - Default frame size when using framed transport + Default frame size when using framed transport, in MB hbase.regionserver.thrift.compact @@ -1268,6 +1303,16 @@ possible configurations would overwhelm and obscure the important. + + hbase.server.scanner.max.result.size + 104857600 + Maximum number of bytes returned when calling a scanner's next method. + Note that when a single row is larger than this limit the row is still returned completely. + The default value is 100MB. + This is a safety setting to protect the server from OOM situations. + + + hbase.status.published false diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestChoreService.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestChoreService.java index 811d4d9564c..3529e9403b8 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestChoreService.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestChoreService.java @@ -423,7 +423,7 @@ public class TestChoreService { shutdownService(service); } - @Test (timeout=20000) + @Test(timeout = 30000) public void testCorePoolDecrease() throws InterruptedException { final int initialCorePoolSize = 3; ChoreService service = new ChoreService("testCorePoolDecrease", initialCorePoolSize); @@ -456,6 +456,8 @@ public class TestChoreService { service.getNumberOfScheduledChores(), service.getCorePoolSize()); assertEquals(service.getNumberOfChoresMissingStartTime(), 5); + // Now we begin to cancel the chores that caused an increase in the core thread pool of the + // ChoreService. These cancellations should cause a decrease in the core thread pool. slowChore5.cancel(); Thread.sleep(chorePeriod * 10); assertEquals(Math.max(ChoreService.MIN_CORE_POOL_SIZE, service.getNumberOfScheduledChores()), @@ -486,44 +488,6 @@ public class TestChoreService { service.getCorePoolSize()); assertEquals(service.getNumberOfChoresMissingStartTime(), 0); - slowChore1.resetState(); - service.scheduleChore(slowChore1); - Thread.sleep(chorePeriod * 10); - assertEquals(Math.max(ChoreService.MIN_CORE_POOL_SIZE, service.getNumberOfScheduledChores()), - service.getCorePoolSize()); - assertEquals(service.getNumberOfChoresMissingStartTime(), 1); - - slowChore2.resetState(); - service.scheduleChore(slowChore2); - Thread.sleep(chorePeriod * 10); - assertEquals(Math.max(ChoreService.MIN_CORE_POOL_SIZE, service.getNumberOfScheduledChores()), - service.getCorePoolSize()); - assertEquals(service.getNumberOfChoresMissingStartTime(), 2); - - DoNothingChore fastChore1 = new DoNothingChore("fastChore1", chorePeriod); - service.scheduleChore(fastChore1); - Thread.sleep(chorePeriod * 10); - assertEquals(service.getNumberOfChoresMissingStartTime(), 2); - assertEquals("Should increase", 3, service.getCorePoolSize()); - - DoNothingChore fastChore2 = new DoNothingChore("fastChore2", chorePeriod); - service.scheduleChore(fastChore2); - Thread.sleep(chorePeriod * 10); - assertEquals(service.getNumberOfChoresMissingStartTime(), 2); - assertEquals("Should increase", 3, service.getCorePoolSize()); - - DoNothingChore fastChore3 = new DoNothingChore("fastChore3", chorePeriod); - service.scheduleChore(fastChore3); - Thread.sleep(chorePeriod * 10); - assertEquals(service.getNumberOfChoresMissingStartTime(), 2); - assertEquals("Should not change", 3, service.getCorePoolSize()); - - DoNothingChore fastChore4 = new DoNothingChore("fastChore4", chorePeriod); - service.scheduleChore(fastChore4); - Thread.sleep(chorePeriod * 10); - assertEquals(service.getNumberOfChoresMissingStartTime(), 2); - assertEquals("Should not change", 3, service.getCorePoolSize()); - shutdownService(service); } @@ -657,35 +621,6 @@ public class TestChoreService { shutdownService(service); } - @Test (timeout=20000) - public void testScheduledChoreReset() throws InterruptedException { - final int period = 100; - ChoreService service = new ChoreService("testScheduledChoreReset"); - ScheduledChore chore = new DoNothingChore("sampleChore", period); - - // TRUE - assertTrue(!chore.isInitialChoreComplete()); - assertTrue(chore.getTimeOfLastRun() == -1); - assertTrue(chore.getTimeOfThisRun() == -1); - - service.scheduleChore(chore); - Thread.sleep(5 * period); - - // FALSE - assertFalse(!chore.isInitialChoreComplete()); - assertFalse(chore.getTimeOfLastRun() == -1); - assertFalse(chore.getTimeOfThisRun() == -1); - - chore.resetState(); - - // TRUE - assertTrue(!chore.isInitialChoreComplete()); - assertTrue(chore.getTimeOfLastRun() == -1); - assertTrue(chore.getTimeOfThisRun() == -1); - - shutdownService(service); - } - @Test (timeout=20000) public void testChangingChoreServices() throws InterruptedException { final int period = 100; diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java index 0b67b415601..e1de0c3ce47 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java @@ -18,6 +18,10 @@ */ package org.apache.hadoop.hbase; +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.DataInputStream; +import java.io.DataOutputStream; import java.io.IOException; import java.util.Arrays; import java.util.Collections; @@ -35,6 +39,8 @@ import org.apache.hadoop.hbase.KeyValue.MetaComparator; import org.apache.hadoop.hbase.KeyValue.Type; import org.apache.hadoop.hbase.util.Bytes; +import static org.junit.Assert.assertNotEquals; + public class TestKeyValue extends TestCase { private final Log LOG = LogFactory.getLog(this.getClass().getName()); @@ -614,4 +620,233 @@ public class TestKeyValue extends TestCase { b = new KeyValue(Bytes.toBytes("table,111,222,bbb"), now); assertTrue(c.compare(a, b) < 0); } + + public void testEqualsAndHashCode() throws Exception { + KeyValue kvA1 = new KeyValue(Bytes.toBytes("key"), Bytes.toBytes("cf"), + Bytes.toBytes("qualA"), Bytes.toBytes("1")); + KeyValue kvA2 = new KeyValue(Bytes.toBytes("key"), Bytes.toBytes("cf"), + Bytes.toBytes("qualA"), Bytes.toBytes("2")); + // We set a different sequence id on kvA2 to demonstrate that the equals and hashCode also + // don't take this into account. + kvA2.setSequenceId(2); + KeyValue kvB = new KeyValue(Bytes.toBytes("key"), Bytes.toBytes("cf"), + Bytes.toBytes("qualB"), Bytes.toBytes("1")); + + assertEquals(kvA1, kvA2); + assertNotEquals(kvA1, kvB); + assertEquals(kvA1.hashCode(), kvA2.hashCode()); + assertNotEquals(kvA1.hashCode(), kvB.hashCode()); + + } + + public void testKeyValueSerialization() throws Exception { + KeyValue kvA1 = new KeyValue(Bytes.toBytes("key"), Bytes.toBytes("cf"), Bytes.toBytes("qualA"), + Bytes.toBytes("1")); + KeyValue kvA2 = new KeyValue(Bytes.toBytes("key"), Bytes.toBytes("cf"), Bytes.toBytes("qualA"), + Bytes.toBytes("2")); + MockKeyValue mkvA1 = new MockKeyValue(kvA1); + MockKeyValue mkvA2 = new MockKeyValue(kvA2); + ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); + DataOutputStream os = new DataOutputStream(byteArrayOutputStream); + KeyValueUtil.oswrite(mkvA1, os, true); + KeyValueUtil.oswrite(mkvA2, os, true); + DataInputStream is = new DataInputStream(new ByteArrayInputStream( + byteArrayOutputStream.toByteArray())); + KeyValue deSerKV1 = KeyValue.iscreate(is); + assertTrue(kvA1.equals(deSerKV1)); + KeyValue deSerKV2 = KeyValue.iscreate(is); + assertTrue(kvA2.equals(deSerKV2)); + } + + private class MockKeyValue implements Cell { + private final KeyValue kv; + + public MockKeyValue(KeyValue kv) { + this.kv = kv; + } + + /** + * This returns the offset where the tag actually starts. + */ + @Override + public int getTagsOffset() { + return this.kv.getTagsOffset(); + } + + // used to achieve atomic operations in the memstore. + @Override + public long getMvccVersion() { + return this.kv.getMvccVersion(); + } + + /** + * used to achieve atomic operations in the memstore. + */ + @Override + public long getSequenceId() { + return this.kv.getSequenceId(); + } + + /** + * This returns the total length of the tag bytes + */ + @Override + public int getTagsLength() { + return this.kv.getTagsLength(); + } + + /** + * + * @return Timestamp + */ + @Override + public long getTimestamp() { + return this.kv.getTimestamp(); + } + + /** + * @return KeyValue.TYPE byte representation + */ + @Override + public byte getTypeByte() { + return this.kv.getTypeByte(); + } + + /** + * @return the backing array of the entire KeyValue (all KeyValue fields are + * in a single array) + */ + @Override + public byte[] getValueArray() { + return this.kv.getValueArray(); + } + + /** + * @return the value offset + */ + @Override + public int getValueOffset() { + return this.kv.getValueOffset(); + } + + /** + * @return Value length + */ + @Override + public int getValueLength() { + return this.kv.getValueLength(); + } + + /** + * @return the backing array of the entire KeyValue (all KeyValue fields are + * in a single array) + */ + @Override + public byte[] getRowArray() { + return this.kv.getRowArray(); + } + + /** + * @return Row offset + */ + @Override + public int getRowOffset() { + return this.kv.getRowOffset(); + } + + /** + * @return Row length + */ + @Override + public short getRowLength() { + return this.kv.getRowLength(); + } + + /** + * @return the backing array of the entire KeyValue (all KeyValue fields are + * in a single array) + */ + @Override + public byte[] getFamilyArray() { + return this.kv.getFamilyArray(); + } + + /** + * @return Family offset + */ + @Override + public int getFamilyOffset() { + return this.kv.getFamilyOffset(); + } + + /** + * @return Family length + */ + @Override + public byte getFamilyLength() { + return this.kv.getFamilyLength(); + } + + /** + * @return the backing array of the entire KeyValue (all KeyValue fields are + * in a single array) + */ + @Override + public byte[] getQualifierArray() { + return this.kv.getQualifierArray(); + } + + /** + * @return Qualifier offset + */ + @Override + public int getQualifierOffset() { + return this.kv.getQualifierOffset(); + } + + /** + * @return Qualifier length + */ + @Override + public int getQualifierLength() { + return this.kv.getQualifierLength(); + } + + @Override + @Deprecated + public byte[] getValue() { + // TODO Auto-generated method stub + return null; + } + + @Override + @Deprecated + public byte[] getFamily() { + // TODO Auto-generated method stub + return null; + } + + @Override + @Deprecated + public byte[] getQualifier() { + // TODO Auto-generated method stub + return null; + } + + @Override + @Deprecated + public byte[] getRow() { + // TODO Auto-generated method stub + return null; + } + + /** + * @return the backing array of the entire KeyValue (all KeyValue fields are + * in a single array) + */ + @Override + public byte[] getTagsArray() { + return this.kv.getTagsArray(); + } + } } diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/TestBoundedByteBufferPool.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/TestBoundedByteBufferPool.java new file mode 100644 index 00000000000..0d452878a68 --- /dev/null +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/TestBoundedByteBufferPool.java @@ -0,0 +1,88 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.io; + +import static org.junit.Assert.assertEquals; + +import java.nio.ByteBuffer; + +import org.apache.hadoop.hbase.testclassification.IOTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({ IOTests.class, SmallTests.class }) +public class TestBoundedByteBufferPool { + final int maxByteBufferSizeToCache = 10; + final int initialByteBufferSize = 1; + final int maxToCache = 10; + BoundedByteBufferPool reservoir; + + @Before + public void before() { + this.reservoir = + new BoundedByteBufferPool(maxByteBufferSizeToCache, initialByteBufferSize, maxToCache); + } + + @After + public void after() { + this.reservoir = null; + } + + @Test + public void testEquivalence() { + ByteBuffer bb = ByteBuffer.allocate(1); + this.reservoir.putBuffer(bb); + this.reservoir.putBuffer(bb); + this.reservoir.putBuffer(bb); + assertEquals(3, this.reservoir.buffers.size()); + } + + @Test + public void testGetPut() { + ByteBuffer bb = this.reservoir.getBuffer(); + assertEquals(initialByteBufferSize, bb.capacity()); + assertEquals(0, this.reservoir.buffers.size()); + this.reservoir.putBuffer(bb); + assertEquals(1, this.reservoir.buffers.size()); + // Now remove a buffer and don't put it back so reservoir is empty. + this.reservoir.getBuffer(); + assertEquals(0, this.reservoir.buffers.size()); + // Try adding in a buffer with a bigger-than-initial size and see if our runningAverage works. + // Need to add then remove, then get a new bytebuffer so reservoir internally is doing + // allocation + final int newCapacity = 2; + this.reservoir.putBuffer(ByteBuffer.allocate(newCapacity)); + assertEquals(1, reservoir.buffers.size()); + this.reservoir.getBuffer(); + assertEquals(0, this.reservoir.buffers.size()); + bb = this.reservoir.getBuffer(); + assertEquals(newCapacity, bb.capacity()); + // Assert that adding a too-big buffer won't happen + assertEquals(0, this.reservoir.buffers.size()); + this.reservoir.putBuffer(ByteBuffer.allocate(maxByteBufferSizeToCache * 2)); + assertEquals(0, this.reservoir.buffers.size()); + // Assert we can't add more than max allowed instances. + for (int i = 0; i < maxToCache; i++) { + this.reservoir.putBuffer(ByteBuffer.allocate(initialByteBufferSize)); + } + assertEquals(maxToCache, this.reservoir.buffers.size()); + } +} diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestCipherProvider.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestCipherProvider.java index fdb9448c1df..dbf7fc5d26e 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestCipherProvider.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestCipherProvider.java @@ -142,11 +142,13 @@ public class TestCipherProvider { Configuration conf = HBaseConfiguration.create(); CipherProvider provider = Encryption.getCipherProvider(conf); assertTrue(provider instanceof DefaultCipherProvider); - assertTrue(Arrays.asList(provider.getSupportedCiphers()).contains("AES")); - Cipher a = Encryption.getCipher(conf, "AES"); + String algorithm = + conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); + assertTrue(Arrays.asList(provider.getSupportedCiphers()).contains(algorithm)); + Cipher a = Encryption.getCipher(conf, algorithm); assertNotNull(a); assertTrue(a.getProvider() instanceof DefaultCipherProvider); - assertEquals(a.getName(), "AES"); + assertEquals(a.getName(), algorithm); assertEquals(a.getKeyLength(), AES.KEY_LENGTH); } diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestEncryption.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestEncryption.java index d36333efef2..0d38356010b 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestEncryption.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/TestEncryption.java @@ -29,6 +29,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; @@ -89,8 +90,10 @@ public class TestEncryption { LOG.info("checkTransformSymmetry: AES, plaintext length = " + plaintext.length); Configuration conf = HBaseConfiguration.create(); - Cipher aes = Encryption.getCipher(conf, "AES"); - Key key = new SecretKeySpec(keyBytes, "AES"); + String algorithm = + conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); + Cipher aes = Encryption.getCipher(conf, algorithm); + Key key = new SecretKeySpec(keyBytes, algorithm); Encryptor e = aes.getEncryptor(); e.setKey(key); diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestBytes.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestBytes.java index d948a2bce61..eb5e4537838 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestBytes.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestBytes.java @@ -31,6 +31,7 @@ import junit.framework.TestCase; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.io.WritableUtils; import org.junit.Assert; import org.junit.experimental.categories.Category; @@ -213,6 +214,19 @@ public class TestBytes extends TestCase { assertEquals(7, target.limit()); } + public void testReadAsVLong() throws Exception { + long [] longs = {-1l, 123l, Long.MIN_VALUE, Long.MAX_VALUE}; + for (int i = 0; i < longs.length; i++) { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + DataOutputStream output = new DataOutputStream(baos); + WritableUtils.writeVLong(output, longs[i]); + byte[] long_bytes_no_offset = baos.toByteArray(); + assertEquals(longs[i], Bytes.readAsVLong(long_bytes_no_offset, 0)); + byte[] long_bytes_with_offset = bytesWithOffset(long_bytes_no_offset); + assertEquals(longs[i], Bytes.readAsVLong(long_bytes_with_offset, 1)); + } + } + public void testToStringBinaryForBytes() { byte[] array = { '0', '9', 'a', 'z', 'A', 'Z', '@', 1 }; String actual = Bytes.toStringBinary(array); diff --git a/hbase-common/src/test/resources/log4j.properties b/hbase-common/src/test/resources/log4j.properties index 6ee91efc3b2..13a95b4a673 100644 --- a/hbase-common/src/test/resources/log4j.properties +++ b/hbase-common/src/test/resources/log4j.properties @@ -63,4 +63,4 @@ log4j.logger.org.apache.hadoop.hbase=DEBUG log4j.org.apache.hadoop.metrics2.impl.MetricsSystemImpl=ERROR log4j.org.apache.hadoop.metrics2.util.MBeans=ERROR # Enable this to get detailed connection error/retry logging. -# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=TRACE +# log4j.logger.org.apache.hadoop.hbase.client.ConnectionImplementation=TRACE diff --git a/hbase-examples/pom.xml b/hbase-examples/pom.xml index a149f521db8..80bd9c3de36 100644 --- a/hbase-examples/pom.xml +++ b/hbase-examples/pom.xml @@ -31,27 +31,6 @@ Examples of HBase usage - - maven-compiler-plugin - - - default-compile - - ${java.default.compiler} - true - false - - - - default-testCompile - - ${java.default.compiler} - true - false - - - - org.apache.maven.plugins maven-site-plugin @@ -82,6 +61,49 @@ maven-source-plugin + + + + + org.eclipse.m2e + lifecycle-mapping + 1.0.0 + + + + + + org.apache.maven.plugins + maven-dependency-plugin + [2.8,) + + build-classpath + + + + + + + + + org.apache.maven.plugins + maven-compiler-plugin + [3.2,) + + compile + + + + + + + + + + + + diff --git a/hbase-examples/src/main/asciidoc/.gitignore b/hbase-examples/src/main/asciidoc/.gitignore deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/BulkDeleteEndpoint.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/BulkDeleteEndpoint.java index 0b65341b2ec..93f98acdfac 100644 --- a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/BulkDeleteEndpoint.java +++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/BulkDeleteEndpoint.java @@ -26,11 +26,11 @@ import java.util.TreeSet; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HConstants.OperationStatusCode; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Mutation; @@ -39,15 +39,15 @@ import org.apache.hadoop.hbase.coprocessor.CoprocessorException; import org.apache.hadoop.hbase.coprocessor.CoprocessorService; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest; -import org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteResponse; -import org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteService; import org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.DeleteType; +import org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteResponse; import org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteResponse.Builder; +import org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteService; import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.ResponseConverter; -import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.OperationStatus; +import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.RegionScanner; import org.apache.hadoop.hbase.util.Bytes; @@ -112,7 +112,7 @@ public class BulkDeleteEndpoint extends BulkDeleteService implements Coprocessor RpcCallback done) { long totalRowsDeleted = 0L; long totalVersionsDeleted = 0L; - HRegion region = env.getRegion(); + Region region = env.getRegion(); int rowBatchSize = request.getRowBatchSize(); Long timestamp = null; if (request.hasTimestamp()) { @@ -151,7 +151,8 @@ public class BulkDeleteEndpoint extends BulkDeleteService implements Coprocessor for (List deleteRow : deleteRows) { deleteArr[i++] = createDeleteMutation(deleteRow, deleteType, timestamp); } - OperationStatus[] opStatus = region.batchMutate(deleteArr); + OperationStatus[] opStatus = region.batchMutate(deleteArr, HConstants.NO_NONCE, + HConstants.NO_NONCE); for (i = 0; i < opStatus.length; i++) { if (opStatus[i].getOperationStatusCode() != OperationStatusCode.SUCCESS) { break; diff --git a/hbase-examples/src/main/java/org/apache/hadoop/hbase/thrift/HttpDoAsClient.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/thrift/HttpDoAsClient.java index 9ef1bd28df9..df18fed8539 100644 --- a/hbase-examples/src/main/java/org/apache/hadoop/hbase/thrift/HttpDoAsClient.java +++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/thrift/HttpDoAsClient.java @@ -18,7 +18,6 @@ */ package org.apache.hadoop.hbase.thrift; -import sun.misc.BASE64Encoder; import java.io.UnsupportedEncodingException; import java.nio.ByteBuffer; @@ -43,6 +42,7 @@ import org.apache.hadoop.hbase.thrift.generated.ColumnDescriptor; import org.apache.hadoop.hbase.thrift.generated.Hbase; import org.apache.hadoop.hbase.thrift.generated.TCell; import org.apache.hadoop.hbase.thrift.generated.TRowResult; +import org.apache.hadoop.hbase.util.Base64; import org.apache.thrift.protocol.TBinaryProtocol; import org.apache.thrift.protocol.TProtocol; import org.apache.thrift.transport.THttpClient; @@ -64,21 +64,24 @@ public class HttpDoAsClient { static protected String host; CharsetDecoder decoder = null; private static boolean secure = false; + static protected String doAsUser = null; + static protected String principal = null; public static void main(String[] args) throws Exception { - if (args.length < 2 || args.length > 3) { + if (args.length < 3 || args.length > 4) { System.out.println("Invalid arguments!"); - System.out.println("Usage: DemoClient host port [secure=false]"); - + System.out.println("Usage: HttpDoAsClient host port doAsUserName [security=true]"); System.exit(-1); } - port = Integer.parseInt(args[1]); host = args[0]; - if (args.length > 2) { - secure = Boolean.parseBoolean(args[2]); + port = Integer.parseInt(args[1]); + doAsUser = args[2]; + if (args.length > 3) { + secure = Boolean.parseBoolean(args[3]); + principal = getSubject().getPrincipals().iterator().next().getName(); } final HttpDoAsClient client = new HttpDoAsClient(); @@ -134,7 +137,7 @@ public class HttpDoAsClient { for (ByteBuffer name : refresh(client, httpClient).getTableNames()) { System.out.println(" found: " + utf8(name.array())); if (utf8(name.array()).equals(utf8(t))) { - if (client.isTableEnabled(name)) { + if (refresh(client, httpClient).isTableEnabled(name)) { System.out.println(" disabling table: " + utf8(name.array())); refresh(client, httpClient).disableTable(name); } @@ -180,8 +183,8 @@ public class HttpDoAsClient { } private Hbase.Client refresh(Hbase.Client client, THttpClient httpClient) { + httpClient.setCustomHeader("doAs", doAsUser); if(secure) { - httpClient.setCustomHeader("doAs", "hbase"); try { httpClient.setCustomHeader("Authorization", generateTicket()); } catch (GSSException e) { @@ -196,14 +199,14 @@ public class HttpDoAsClient { // Oid for kerberos principal name Oid krb5PrincipalOid = new Oid("1.2.840.113554.1.2.2.1"); Oid KERB_V5_OID = new Oid("1.2.840.113554.1.2.2"); - final GSSName clientName = manager.createName("hbase/node-1.internal@INTERNAL", + final GSSName clientName = manager.createName(principal, krb5PrincipalOid); final GSSCredential clientCred = manager.createCredential(clientName, 8 * 3600, KERB_V5_OID, GSSCredential.INITIATE_ONLY); - final GSSName serverName = manager.createName("hbase/node-1.internal@INTERNAL", krb5PrincipalOid); + final GSSName serverName = manager.createName(principal, krb5PrincipalOid); final GSSContext context = manager.createContext(serverName, KERB_V5_OID, @@ -216,7 +219,7 @@ public class HttpDoAsClient { final byte[] outToken = context.initSecContext(new byte[0], 0, 0); StringBuffer outputBuffer = new StringBuffer(); outputBuffer.append("Negotiate "); - outputBuffer.append(new BASE64Encoder().encode(outToken).replace("\n", "")); + outputBuffer.append(Base64.encodeBytes(outToken).replace("\n", "")); System.out.print("Ticket is: " + outputBuffer); return outputBuffer.toString(); } diff --git a/hbase-hadoop-compat/pom.xml b/hbase-hadoop-compat/pom.xml index 0c3c2bfff7d..63dedf7f18b 100644 --- a/hbase-hadoop-compat/pom.xml +++ b/hbase-hadoop-compat/pom.xml @@ -36,27 +36,6 @@ - - maven-compiler-plugin - - - default-compile - - ${java.default.compiler} - true - false - - - - default-testCompile - - ${java.default.compiler} - true - false - - - - org.apache.maven.plugins maven-site-plugin @@ -95,6 +74,36 @@ maven-source-plugin + + + + + org.eclipse.m2e + lifecycle-mapping + 1.0.0 + + + + + + org.apache.maven.plugins + maven-compiler-plugin + [3.2,) + + compile + + + + + + + + + + + + diff --git a/hbase-hadoop-compat/src/main/asciidoc/.gitignore b/hbase-hadoop-compat/src/main/asciidoc/.gitignore deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java index 25f0c407002..1f4c950e441 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java @@ -42,6 +42,8 @@ public interface MetricsHBaseServerSource extends BaseSource { String QUEUE_CALL_TIME_DESC = "Queue Call Time."; String PROCESS_CALL_TIME_NAME = "processCallTime"; String PROCESS_CALL_TIME_DESC = "Processing call time."; + String TOTAL_CALL_TIME_NAME = "totalCallTime"; + String TOTAL_CALL_TIME_DESC = "Total call time, including both queued and processing time."; String QUEUE_SIZE_NAME = "queueSize"; String QUEUE_SIZE_DESC = "Number of bytes in the call queues."; String GENERAL_QUEUE_NAME = "numCallsInGeneralQueue"; @@ -71,4 +73,6 @@ public interface MetricsHBaseServerSource extends BaseSource { void dequeuedCall(int qTime); void processedCall(int processingTime); -} + + void queuedAndProcessedCall(int totalTime); + } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java index 90a9a090443..268d4afcb7d 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java @@ -87,6 +87,13 @@ public interface MetricsRegionServerSource extends BaseSource { */ void updateReplay(long t); + /** + * Update the scan size. + * + * @param scanSize size of the scan + */ + void updateScannerNext(long scanSize); + /** * Increment the number of slow Puts that have happened. */ @@ -180,6 +187,9 @@ public interface MetricsRegionServerSource extends BaseSource { String PERCENT_FILES_LOCAL = "percentFilesLocal"; String PERCENT_FILES_LOCAL_DESC = "The percent of HFiles that are stored on the local hdfs data node."; + String PERCENT_FILES_LOCAL_SECONDARY_REGIONS = "percentFilesLocalSecondaryRegions"; + String PERCENT_FILES_LOCAL_SECONDARY_REGIONS_DESC = + "The percent of HFiles used by secondary regions that are stored on the local hdfs data node."; String SPLIT_QUEUE_LENGTH = "splitQueueLength"; String SPLIT_QUEUE_LENGTH_DESC = "Length of the queue for splits."; String COMPACTION_QUEUE_LENGTH = "compactionQueueLength"; @@ -303,7 +313,7 @@ public interface MetricsRegionServerSource extends BaseSource { String SPLIT_KEY = "splitTime"; String SPLIT_REQUEST_KEY = "splitRequestCount"; String SPLIT_REQUEST_DESC = "Number of splits requested"; - String SPLIT_SUCCESS_KEY = "splitSuccessCounnt"; + String SPLIT_SUCCESS_KEY = "splitSuccessCount"; String SPLIT_SUCCESS_DESC = "Number of successfully executed splits"; String FLUSH_KEY = "flushTime"; } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java index 2aad1158a27..b609b4ade9a 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java @@ -153,6 +153,11 @@ public interface MetricsRegionServerWrapper { */ int getPercentFileLocal(); + /** + * Get the percent of HFiles' that are local for secondary region replicas. + */ + int getPercentFileLocalSecondaryRegions(); + /** * Get the size of the split queue */ @@ -253,7 +258,6 @@ public interface MetricsRegionServerWrapper { long getMajorCompactedCellsSize(); /** -<<<<<<< HEAD * Gets the number of cells move to mob during compaction. */ long getMobCompactedIntoMobCellsCount(); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSource.java index 805dfcaf537..698a59a2acb 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSource.java @@ -26,4 +26,5 @@ public interface MetricsReplicationSinkSource { void setLastAppliedOpAge(long age); void incrAppliedBatches(long batches); void incrAppliedOps(long batchsize); + long getLastAppliedOpAge(); } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSource.java index 66d265a90ba..fecf191a063 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSource.java @@ -43,4 +43,5 @@ public interface MetricsReplicationSourceSource { void incrLogReadInBytes(long size); void incrLogReadInEdits(long size); void clear(); + long getLastShippedAge(); } diff --git a/hbase-hadoop2-compat/pom.xml b/hbase-hadoop2-compat/pom.xml index e88b85487b4..52472b3fa18 100644 --- a/hbase-hadoop2-compat/pom.xml +++ b/hbase-hadoop2-compat/pom.xml @@ -34,27 +34,6 @@ limitations under the License. - - maven-compiler-plugin - - - default-compile - - ${java.default.compiler} - true - false - - - - default-testCompile - - ${java.default.compiler} - true - false - - - - org.apache.maven.plugins maven-site-plugin @@ -116,9 +95,9 @@ limitations under the License. - + org.eclipse.m2e lifecycle-mapping @@ -139,6 +118,19 @@ limitations under the License. + + + org.apache.maven.plugins + maven-compiler-plugin + [3.2,) + + compile + + + + + + diff --git a/hbase-hadoop2-compat/src/main/asciidoc/.gitignore b/hbase-hadoop2-compat/src/main/asciidoc/.gitignore deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceImpl.java index 2f5e5cf289e..8eefb082039 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceImpl.java @@ -40,6 +40,7 @@ public class MetricsHBaseServerSourceImpl extends BaseSourceImpl private final MutableCounterLong receivedBytes; private MutableHistogram queueCallTime; private MutableHistogram processCallTime; + private MutableHistogram totalCallTime; public MetricsHBaseServerSourceImpl(String metricsName, String metricsDescription, @@ -66,6 +67,8 @@ public class MetricsHBaseServerSourceImpl extends BaseSourceImpl QUEUE_CALL_TIME_DESC); this.processCallTime = this.getMetricsRegistry().newHistogram(PROCESS_CALL_TIME_NAME, PROCESS_CALL_TIME_DESC); + this.totalCallTime = this.getMetricsRegistry().newHistogram(TOTAL_CALL_TIME_NAME, + TOTAL_CALL_TIME_DESC); } @Override @@ -108,10 +111,14 @@ public class MetricsHBaseServerSourceImpl extends BaseSourceImpl processCallTime.add(processingTime); } + @Override + public void queuedAndProcessedCall(int totalTime) { + totalCallTime.add(totalTime); + } + @Override public void getMetrics(MetricsCollector metricsCollector, boolean all) { - MetricsRecordBuilder mrb = metricsCollector.addRecord(metricsName) - .setContext(metricsContext); + MetricsRecordBuilder mrb = metricsCollector.addRecord(metricsName); if (wrapper != null) { mrb.addGauge(Interns.info(QUEUE_SIZE_NAME, QUEUE_SIZE_DESC), wrapper.getTotalQueueSize()) diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java index d4c90dcb88e..02463f618c5 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java @@ -68,8 +68,7 @@ public class MetricsMasterSourceImpl @Override public void getMetrics(MetricsCollector metricsCollector, boolean all) { - MetricsRecordBuilder metricsRecordBuilder = metricsCollector.addRecord(metricsName) - .setContext(metricsContext); + MetricsRecordBuilder metricsRecordBuilder = metricsCollector.addRecord(metricsName); // masterWrapper can be null because this function is called inside of init. if (masterWrapper != null) { diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSourceImpl.java index 5cb2cec4141..ab7255e8f03 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSourceImpl.java @@ -80,8 +80,7 @@ public class MetricsRegionAggregateSourceImpl extends BaseSourceImpl public void getMetrics(MetricsCollector collector, boolean all) { - MetricsRecordBuilder mrb = collector.addRecord(metricsName) - .setContext(metricsContext); + MetricsRecordBuilder mrb = collector.addRecord(metricsName); if (regionSources != null) { lock.readLock().lock(); diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java index 45e26992d80..cadb5746513 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java @@ -42,6 +42,7 @@ public class MetricsRegionServerSourceImpl private final MetricHistogram incrementHisto; private final MetricHistogram appendHisto; private final MetricHistogram replayHisto; + private final MetricHistogram scanNextHisto; private final MutableCounterLong slowPut; private final MutableCounterLong slowDelete; @@ -76,12 +77,13 @@ public class MetricsRegionServerSourceImpl slowGet = getMetricsRegistry().newCounter(SLOW_GET_KEY, SLOW_GET_DESC, 0l); incrementHisto = getMetricsRegistry().newHistogram(INCREMENT_KEY); - slowIncrement = getMetricsRegistry().newCounter(SLOW_INCREMENT_KEY, SLOW_INCREMENT_DESC, 0l); + slowIncrement = getMetricsRegistry().newCounter(SLOW_INCREMENT_KEY, SLOW_INCREMENT_DESC, 0L); appendHisto = getMetricsRegistry().newHistogram(APPEND_KEY); slowAppend = getMetricsRegistry().newCounter(SLOW_APPEND_KEY, SLOW_APPEND_DESC, 0l); replayHisto = getMetricsRegistry().newHistogram(REPLAY_KEY); + scanNextHisto = getMetricsRegistry().newHistogram(SCAN_NEXT_KEY); splitTimeHisto = getMetricsRegistry().newHistogram(SPLIT_KEY); flushTimeHisto = getMetricsRegistry().newHistogram(FLUSH_KEY); @@ -120,6 +122,11 @@ public class MetricsRegionServerSourceImpl replayHisto.add(t); } + @Override + public void updateScannerNext(long scanSize) { + scanNextHisto.add(scanSize); + } + @Override public void incrSlowPut() { slowPut.incr(); @@ -176,8 +183,7 @@ public class MetricsRegionServerSourceImpl @Override public void getMetrics(MetricsCollector metricsCollector, boolean all) { - MetricsRecordBuilder mrb = metricsCollector.addRecord(metricsName) - .setContext(metricsContext); + MetricsRecordBuilder mrb = metricsCollector.addRecord(metricsName); // rsWrap can be null because this function is called inside of init. if (rsWrap != null) { @@ -213,6 +219,9 @@ public class MetricsRegionServerSourceImpl rsWrap.getDataInMemoryWithoutWAL()) .addGauge(Interns.info(PERCENT_FILES_LOCAL, PERCENT_FILES_LOCAL_DESC), rsWrap.getPercentFileLocal()) + .addGauge(Interns.info(PERCENT_FILES_LOCAL_SECONDARY_REGIONS, + PERCENT_FILES_LOCAL_SECONDARY_REGIONS_DESC), + rsWrap.getPercentFileLocalSecondaryRegions()) .addGauge(Interns.info(SPLIT_QUEUE_LENGTH, SPLIT_QUEUE_LENGTH_DESC), rsWrap.getSplitQueueSize()) .addGauge(Interns.info(COMPACTION_QUEUE_LENGTH, COMPACTION_QUEUE_LENGTH_DESC), diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationGlobalSourceSource.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationGlobalSourceSource.java index a210171577c..6dace107f99 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationGlobalSourceSource.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationGlobalSourceSource.java @@ -95,4 +95,9 @@ public class MetricsReplicationGlobalSourceSource implements MetricsReplicationS @Override public void clear() { } + + @Override + public long getLastShippedAge() { + return ageOfLastShippedOpGauge.value(); + } } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSourceImpl.java index 3025e3e7724..14212ba0869 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSourceImpl.java @@ -44,4 +44,9 @@ public class MetricsReplicationSinkSourceImpl implements MetricsReplicationSinkS @Override public void incrAppliedOps(long batchsize) { opsCounter.incr(batchsize); } + + @Override + public long getLastAppliedOpAge() { + return ageGauge.value(); + } } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSourceImpl.java index 89ef4de920a..1422e7e1cd3 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSourceImpl.java @@ -125,4 +125,9 @@ public class MetricsReplicationSourceSourceImpl implements MetricsReplicationSou rms.removeMetric(logEditsFilteredKey); } + + @Override + public long getLastShippedAge() { + return ageOfLastShippedOpGauge.value(); + } } diff --git a/hbase-it/pom.xml b/hbase-it/pom.xml index 4522f9c5872..30bdf13a884 100644 --- a/hbase-it/pom.xml +++ b/hbase-it/pom.xml @@ -115,29 +115,49 @@ + + + org.eclipse.m2e + lifecycle-mapping + 1.0.0 + + + + + + org.apache.maven.plugins + maven-dependency-plugin + [2.8,) + + build-classpath + + + + + + + + + org.apache.maven.plugins + maven-compiler-plugin + [3.2,) + + compile + + + + + + + + + + - - maven-compiler-plugin - - - default-compile - - ${java.default.compiler} - true - - - - default-testCompile - - ${java.default.compiler} - true - - - - org.apache.maven.plugins @@ -202,6 +222,11 @@ com.google.guava guava + + com.sun.jersey + jersey-client + ${jersey.version} + com.yammer.metrics metrics-core diff --git a/hbase-it/src/main/asciidoc/.gitignore b/hbase-it/src/main/asciidoc/.gitignore deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBase.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBase.java index f45fb048314..097673a22cf 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBase.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBase.java @@ -71,8 +71,14 @@ public abstract class IntegrationTestBase extends AbstractHBaseTool { + "monkey properties."); } - @Override - protected void processOptions(CommandLine cmd) { + /** + * This allows tests that subclass children of this base class such as + * {@link org.apache.hadoop.hbase.test.IntegrationTestReplication} to + * include the base options without having to also include the options from the test. + * + * @param cmd the command line + */ + protected void processBaseOptions(CommandLine cmd) { if (cmd.hasOption(MONKEY_LONG_OPT)) { monkeyToUse = cmd.getOptionValue(MONKEY_LONG_OPT); } @@ -94,6 +100,11 @@ public abstract class IntegrationTestBase extends AbstractHBaseTool { } } + @Override + protected void processOptions(CommandLine cmd) { + processBaseOptions(cmd); + } + @Override public Configuration getConf() { Configuration c = super.getConf(); diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngest.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngest.java index c0c54b7879b..8495889a3a7 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngest.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngest.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.testclassification.IntegrationTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.LoadTestTool; import org.apache.hadoop.hbase.util.Threads; +import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.ToolRunner; import org.junit.Assert; import org.junit.Test; @@ -45,8 +46,8 @@ import com.google.common.collect.Sets; public class IntegrationTestIngest extends IntegrationTestBase { public static final char HIPHEN = '-'; private static final int SERVER_COUNT = 1; // number of slaves for the smallest cluster - private static final long DEFAULT_RUN_TIME = 20 * 60 * 1000; - private static final long JUNIT_RUN_TIME = 10 * 60 * 1000; + protected static final long DEFAULT_RUN_TIME = 20 * 60 * 1000; + protected static final long JUNIT_RUN_TIME = 10 * 60 * 1000; /** A soft limit on how long we should run */ protected static final String RUN_TIME_KEY = "hbase.%s.runtime"; @@ -66,6 +67,7 @@ public class IntegrationTestIngest extends IntegrationTestBase { protected LoadTestTool loadTool; protected String[] LOAD_TEST_TOOL_INIT_ARGS = { + LoadTestTool.OPT_COLUMN_FAMILIES, LoadTestTool.OPT_COMPRESSION, LoadTestTool.OPT_DATA_BLOCK_ENCODING, LoadTestTool.OPT_INMEMORY, @@ -78,7 +80,7 @@ public class IntegrationTestIngest extends IntegrationTestBase { public void setUpCluster() throws Exception { util = getTestingUtil(getConf()); LOG.debug("Initializing/checking cluster has " + SERVER_COUNT + " servers"); - util.initializeCluster(SERVER_COUNT); + util.initializeCluster(getMinServerCount()); LOG.debug("Done initializing/checking cluster"); cluster = util.getHBaseClusterInterface(); deleteTableIfNecessary(); @@ -89,6 +91,10 @@ public class IntegrationTestIngest extends IntegrationTestBase { initTable(); } + protected int getMinServerCount() { + return SERVER_COUNT; + } + protected void initTable() throws IOException { int ret = loadTool.run(getArgsForLoadTestToolInitTable()); Assert.assertEquals("Failed to initialize LoadTestTool", 0, ret); @@ -125,7 +131,22 @@ public class IntegrationTestIngest extends IntegrationTestBase { @Override protected Set getColumnFamilies() { - return Sets.newHashSet(Bytes.toString(LoadTestTool.COLUMN_FAMILY)); + Set families = Sets.newHashSet(); + String clazz = this.getClass().getSimpleName(); + // parse conf for getting the column famly names because LTT is not initialized yet. + String familiesString = getConf().get( + String.format("%s.%s", clazz, LoadTestTool.OPT_COLUMN_FAMILIES)); + if (familiesString == null) { + for (byte[] family : LoadTestTool.DEFAULT_COLUMN_FAMILIES) { + families.add(Bytes.toString(family)); + } + } else { + for (String family : familiesString.split(",")) { + families.add(family); + } + } + + return families; } private void deleteTableIfNecessary() throws IOException { @@ -206,6 +227,8 @@ public class IntegrationTestIngest extends IntegrationTestBase { List args = new ArrayList(); args.add("-tn"); args.add(getTablename().getNameAsString()); + args.add("-families"); + args.add(getColumnFamiliesAsString()); args.add(mode); args.add(modeSpecificArg); args.add("-start_key"); @@ -217,6 +240,10 @@ public class IntegrationTestIngest extends IntegrationTestBase { return args.toArray(new String[args.size()]); } + private String getColumnFamiliesAsString() { + return StringUtils.join(",", getColumnFamilies()); + } + /** Estimates a data size based on the cluster size */ protected long getNumKeys(long keysPerServer) throws IOException { diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngestStripeCompactions.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngestStripeCompactions.java index ebf159e3594..d64fbb07952 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngestStripeCompactions.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngestStripeCompactions.java @@ -20,11 +20,13 @@ package org.apache.hadoop.hbase; import java.io.IOException; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.StoreEngine; import org.apache.hadoop.hbase.regionserver.StripeStoreEngine; import org.apache.hadoop.hbase.testclassification.IntegrationTests; import org.apache.hadoop.hbase.util.LoadTestTool; +import org.apache.hadoop.util.ToolRunner; import org.junit.experimental.categories.Category; /** @@ -39,7 +41,14 @@ public class IntegrationTestIngestStripeCompactions extends IntegrationTestInges HTableDescriptor htd = new HTableDescriptor(getTablename()); htd.setConfiguration(StoreEngine.STORE_ENGINE_CLASS_KEY, StripeStoreEngine.class.getName()); htd.setConfiguration(HStore.BLOCKING_STOREFILES_KEY, "100"); - HColumnDescriptor hcd = new HColumnDescriptor(LoadTestTool.COLUMN_FAMILY); + HColumnDescriptor hcd = new HColumnDescriptor(LoadTestTool.DEFAULT_COLUMN_FAMILY); HBaseTestingUtility.createPreSplitLoadTestTable(util.getConfiguration(), htd, hcd); } + + public static void main(String[] args) throws Exception { + Configuration conf = HBaseConfiguration.create(); + IntegrationTestingUtility.setUseDistributedCluster(conf); + int ret = ToolRunner.run(conf, new IntegrationTestIngestStripeCompactions(), args); + System.exit(ret); + } } diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngestWithEncryption.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngestWithEncryption.java index ff8ed194d58..cd1b0b647f3 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngestWithEncryption.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngestWithEncryption.java @@ -24,8 +24,8 @@ import org.apache.hadoop.hbase.Waiter.Predicate; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.io.crypto.KeyProviderForTesting; import org.apache.hadoop.hbase.io.hfile.HFile; -import org.apache.hadoop.hbase.io.hfile.HFileReaderV3; -import org.apache.hadoop.hbase.io.hfile.HFileWriterV3; +import org.apache.hadoop.hbase.io.hfile.HFileReaderImpl; +import org.apache.hadoop.hbase.io.hfile.HFileWriterImpl; import org.apache.hadoop.hbase.wal.WAL.Reader; import org.apache.hadoop.hbase.wal.WALProvider.Writer; import org.apache.hadoop.hbase.regionserver.wal.SecureProtobufLogReader; @@ -46,8 +46,8 @@ public class IntegrationTestIngestWithEncryption extends IntegrationTestIngest { static { // These log level changes are only useful when running on a localhost // cluster. - Logger.getLogger(HFileReaderV3.class).setLevel(Level.TRACE); - Logger.getLogger(HFileWriterV3.class).setLevel(Level.TRACE); + Logger.getLogger(HFileReaderImpl.class).setLevel(Level.TRACE); + Logger.getLogger(HFileWriterImpl.class).setLevel(Level.TRACE); Logger.getLogger(SecureProtobufLogReader.class).setLevel(Level.TRACE); Logger.getLogger(SecureProtobufLogWriter.class).setLevel(Level.TRACE); } diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngestWithMOB.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngestWithMOB.java index 85c01ccae7f..82a599ce1bd 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngestWithMOB.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngestWithMOB.java @@ -24,7 +24,8 @@ import java.util.List; import org.apache.commons.cli.CommandLine; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.testclassification.IntegrationTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.LoadTestDataGeneratorWithMOB; @@ -40,7 +41,7 @@ import org.junit.experimental.categories.Category; public class IntegrationTestIngestWithMOB extends IntegrationTestIngest { private static final char COLON = ':'; - private byte[] mobColumnFamily = LoadTestTool.COLUMN_FAMILY; + private byte[] mobColumnFamily = LoadTestTool.DEFAULT_COLUMN_FAMILY; public static final String THRESHOLD = "threshold"; public static final String MIN_MOB_DATA_SIZE = "minMobDataSize"; public static final String MAX_MOB_DATA_SIZE = "maxMobDataSize"; @@ -111,8 +112,8 @@ public class IntegrationTestIngestWithMOB extends IntegrationTestIngest { protected void initTable() throws IOException { super.initTable(); - byte[] tableName = getTablename().getName(); - HBaseAdmin admin = new HBaseAdmin(conf); + TableName tableName = getTablename(); + Admin admin = ConnectionFactory.createConnection().getAdmin(); HTableDescriptor tableDesc = admin.getTableDescriptor(tableName); LOG.info("Disabling table " + getTablename()); admin.disableTable(tableName); diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestRegionReplicaReplication.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestRegionReplicaReplication.java new file mode 100644 index 00000000000..33b2554cc71 --- /dev/null +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestRegionReplicaReplication.java @@ -0,0 +1,226 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase; + +import java.io.IOException; +import java.util.List; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.TimeUnit; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.testclassification.IntegrationTests; +import org.apache.hadoop.hbase.util.ConstantDelayQueue; +import org.apache.hadoop.hbase.util.LoadTestTool; +import org.apache.hadoop.hbase.util.MultiThreadedUpdater; +import org.apache.hadoop.hbase.util.MultiThreadedWriter; +import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil; +import org.apache.hadoop.hbase.util.Threads; +import org.apache.hadoop.hbase.util.test.LoadTestDataGenerator; +import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.ToolRunner; +import org.junit.Assert; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import com.google.common.collect.Lists; + +/** + * Integration test for testing async wal replication to secondary region replicas. Sets up a table + * with given region replication (default 2), and uses LoadTestTool client writer, updater and + * reader threads for writes and reads and verification. It uses a delay queue with a given delay + * ("read_delay_ms", default 5000ms) between the writer/updater and reader threads to make the + * written items available to readers. This means that a reader will only start reading from a row + * written by the writer / updater after 5secs has passed. The reader thread performs the reads from + * the given region replica id (default 1) to perform the reads. Async wal replication has to finish + * with the replication of the edits before read_delay_ms to the given region replica id so that + * the read and verify will not fail. + * + * The job will run for at least given runtime (default 10min) by running a concurrent + * writer and reader workload followed by a concurrent updater and reader workload for + * num_keys_per_server. + *

    + * Example usage: + *

    + * hbase org.apache.hadoop.hbase.IntegrationTestRegionReplicaReplication
    + * -DIntegrationTestRegionReplicaReplication.num_keys_per_server=10000
    + * -Dhbase.IntegrationTestRegionReplicaReplication.runtime=600000
    + * -DIntegrationTestRegionReplicaReplication.read_delay_ms=5000
    + * -DIntegrationTestRegionReplicaReplication.region_replication=3
    + * -DIntegrationTestRegionReplicaReplication.region_replica_id=2
    + * -DIntegrationTestRegionReplicaReplication.num_read_threads=100
    + * -DIntegrationTestRegionReplicaReplication.num_write_threads=100
    + * 
    + */ +@Category(IntegrationTests.class) +public class IntegrationTestRegionReplicaReplication extends IntegrationTestIngest { + + private static final String TEST_NAME + = IntegrationTestRegionReplicaReplication.class.getSimpleName(); + + private static final String OPT_READ_DELAY_MS = "read_delay_ms"; + + private static final int DEFAULT_REGION_REPLICATION = 2; + private static final int SERVER_COUNT = 1; // number of slaves for the smallest cluster + private static final String[] DEFAULT_COLUMN_FAMILIES = new String[] {"f1", "f2", "f3"}; + + @Override + protected int getMinServerCount() { + return SERVER_COUNT; + } + + @Override + public void setConf(Configuration conf) { + conf.setIfUnset( + String.format("%s.%s", TEST_NAME, LoadTestTool.OPT_REGION_REPLICATION), + String.valueOf(DEFAULT_REGION_REPLICATION)); + + conf.setIfUnset( + String.format("%s.%s", TEST_NAME, LoadTestTool.OPT_COLUMN_FAMILIES), + StringUtils.join(",", DEFAULT_COLUMN_FAMILIES)); + + conf.setBoolean("hbase.table.sanity.checks", true); + + // enable async wal replication to region replicas for unit tests + conf.setBoolean(ServerRegionReplicaUtil.REGION_REPLICA_REPLICATION_CONF_KEY, true); + conf.setBoolean(HConstants.REPLICATION_ENABLE_KEY, true); + + conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024L * 1024 * 4); // flush every 4 MB + conf.setInt("hbase.hstore.blockingStoreFiles", 100); + + super.setConf(conf); + } + + @Override + @Test + public void testIngest() throws Exception { + runIngestTest(JUNIT_RUN_TIME, 25000, 10, 1024, 10, 20); + } + + /** + * This extends MultiThreadedWriter to add a configurable delay to the keys written by the writer + * threads to become available to the MultiThradedReader threads. We add this delay because of + * the async nature of the wal replication to region replicas. + */ + public static class DelayingMultiThreadedWriter extends MultiThreadedWriter { + private long delayMs; + public DelayingMultiThreadedWriter(LoadTestDataGenerator dataGen, Configuration conf, + TableName tableName) throws IOException { + super(dataGen, conf, tableName); + } + @Override + protected BlockingQueue createWriteKeysQueue(Configuration conf) { + this.delayMs = conf.getLong(String.format("%s.%s", + IntegrationTestRegionReplicaReplication.class.getSimpleName(), OPT_READ_DELAY_MS), 5000); + return new ConstantDelayQueue(TimeUnit.MILLISECONDS, delayMs); + } + } + + /** + * This extends MultiThreadedWriter to add a configurable delay to the keys written by the writer + * threads to become available to the MultiThradedReader threads. We add this delay because of + * the async nature of the wal replication to region replicas. + */ + public static class DelayingMultiThreadedUpdater extends MultiThreadedUpdater { + private long delayMs; + public DelayingMultiThreadedUpdater(LoadTestDataGenerator dataGen, Configuration conf, + TableName tableName, double updatePercent) throws IOException { + super(dataGen, conf, tableName, updatePercent); + } + @Override + protected BlockingQueue createWriteKeysQueue(Configuration conf) { + this.delayMs = conf.getLong(String.format("%s.%s", + IntegrationTestRegionReplicaReplication.class.getSimpleName(), OPT_READ_DELAY_MS), 5000); + return new ConstantDelayQueue(TimeUnit.MILLISECONDS, delayMs); + } + } + + @Override + protected void runIngestTest(long defaultRunTime, long keysPerServerPerIter, int colsPerKey, + int recordSize, int writeThreads, int readThreads) throws Exception { + + LOG.info("Running ingest"); + LOG.info("Cluster size:" + util.getHBaseClusterInterface().getClusterStatus().getServersSize()); + + // sleep for some time so that the cache for disabled tables does not interfere. + Threads.sleep( + getConf().getInt("hbase.region.replica.replication.cache.disabledAndDroppedTables.expiryMs", + 5000) + 1000); + + long start = System.currentTimeMillis(); + String runtimeKey = String.format(RUN_TIME_KEY, this.getClass().getSimpleName()); + long runtime = util.getConfiguration().getLong(runtimeKey, defaultRunTime); + long startKey = 0; + + long numKeys = getNumKeys(keysPerServerPerIter); + while (System.currentTimeMillis() - start < 0.9 * runtime) { + LOG.info("Intended run time: " + (runtime/60000) + " min, left:" + + ((runtime - (System.currentTimeMillis() - start))/60000) + " min"); + + int verifyPercent = 100; + int updatePercent = 20; + int ret = -1; + int regionReplicaId = conf.getInt(String.format("%s.%s" + , TEST_NAME, LoadTestTool.OPT_REGION_REPLICA_ID), 1); + + // we will run writers and readers at the same time. + List args = Lists.newArrayList(getArgsForLoadTestTool("", "", startKey, numKeys)); + args.add("-write"); + args.add(String.format("%d:%d:%d", colsPerKey, recordSize, writeThreads)); + args.add("-" + LoadTestTool.OPT_MULTIPUT); + args.add("-writer"); + args.add(DelayingMultiThreadedWriter.class.getName()); // inject writer class + args.add("-read"); + args.add(String.format("%d:%d", verifyPercent, readThreads)); + args.add("-" + LoadTestTool.OPT_REGION_REPLICA_ID); + args.add(String.valueOf(regionReplicaId)); + + ret = loadTool.run(args.toArray(new String[args.size()])); + if (0 != ret) { + String errorMsg = "Load failed with error code " + ret; + LOG.error(errorMsg); + Assert.fail(errorMsg); + } + + args = Lists.newArrayList(getArgsForLoadTestTool("", "", startKey, numKeys)); + args.add("-update"); + args.add(String.format("%s:%s:1", updatePercent, writeThreads)); + args.add("-updater"); + args.add(DelayingMultiThreadedUpdater.class.getName()); // inject updater class + args.add("-read"); + args.add(String.format("%d:%d", verifyPercent, readThreads)); + args.add("-" + LoadTestTool.OPT_REGION_REPLICA_ID); + args.add(String.valueOf(regionReplicaId)); + + ret = loadTool.run(args.toArray(new String[args.size()])); + if (0 != ret) { + String errorMsg = "Load failed with error code " + ret; + LOG.error(errorMsg); + Assert.fail(errorMsg); + } + startKey += numKeys; + } + } + + public static void main(String[] args) throws Exception { + Configuration conf = HBaseConfiguration.create(); + IntegrationTestingUtility.setUseDistributedCluster(conf); + int ret = ToolRunner.run(conf, new IntegrationTestRegionReplicaReplication(), args); + System.exit(ret); + } +} diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/RESTApiClusterManager.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/RESTApiClusterManager.java new file mode 100644 index 00000000000..28fac4ee2bf --- /dev/null +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/RESTApiClusterManager.java @@ -0,0 +1,350 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase; + +import com.sun.jersey.api.client.Client; +import com.sun.jersey.api.client.ClientResponse; +import com.sun.jersey.api.client.filter.HTTPBasicAuthFilter; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.util.ReflectionUtils; +import org.codehaus.jackson.JsonNode; +import org.codehaus.jackson.map.ObjectMapper; + +import javax.ws.rs.core.MediaType; +import javax.ws.rs.core.Response; +import javax.ws.rs.core.UriBuilder; +import javax.xml.ws.http.HTTPException; +import java.io.IOException; +import java.net.URI; +import java.util.HashMap; +import java.util.Map; + +/** + * A ClusterManager implementation designed to control Cloudera Manager (http://www.cloudera.com) + * clusters via REST API. This API uses HTTP GET requests against the cluster manager server to + * retrieve information and POST/PUT requests to perform actions. As a simple example, to retrieve a + * list of hosts from a CM server with login credentials admin:admin, a simple curl command would be + * curl -X POST -H "Content-Type:application/json" -u admin:admin \ + * "http://this.is.my.server.com:7180/api/v8/hosts" + * + * This command would return a JSON result, which would need to be parsed to retrieve relevant + * information. This action and many others are covered by this class. + * + * A note on nomenclature: while the ClusterManager interface uses a ServiceType enum when + * referring to things like RegionServers and DataNodes, cluster managers often use different + * terminology. As an example, Cloudera Manager (http://www.cloudera.com) would refer to a + * RegionServer as a "role" of the HBase "service." It would further refer to "hbase" as the + * "serviceType." Apache Ambari (http://ambari.apache.org) would call the RegionServer a + * "component" of the HBase "service." + * + * This class will defer to the ClusterManager terminology in methods that it implements from + * that interface, but uses Cloudera Manager's terminology when dealing with its API directly. + */ +public class RESTApiClusterManager extends Configured implements ClusterManager { + // Properties that need to be in the Configuration object to interact with the REST API cluster + // manager. Most easily defined in hbase-site.xml, but can also be passed on the command line. + private static final String REST_API_CLUSTER_MANAGER_HOSTNAME = + "hbase.it.clustermanager.restapi.hostname"; + private static final String REST_API_CLUSTER_MANAGER_USERNAME = + "hbase.it.clustermanager.restapi.username"; + private static final String REST_API_CLUSTER_MANAGER_PASSWORD = + "hbase.it.clustermanager.restapi.password"; + private static final String REST_API_CLUSTER_MANAGER_CLUSTER_NAME = + "hbase.it.clustermanager.restapi.clustername"; + + // Some default values for the above properties. + private static final String DEFAULT_SERVER_HOSTNAME = "http://localhost:7180"; + private static final String DEFAULT_SERVER_USERNAME = "admin"; + private static final String DEFAULT_SERVER_PASSWORD = "admin"; + private static final String DEFAULT_CLUSTER_NAME = "Cluster 1"; + + // Fields for the hostname, username, password, and cluster name of the cluster management server + // to be used. + private String serverHostname; + private String serverUsername; + private String serverPassword; + private String clusterName; + + // Each version of Cloudera Manager supports a particular API versions. Version 6 of this API + // provides all the features needed by this class. + private static final String API_VERSION = "v6"; + + // Client instances are expensive, so use the same one for all our REST queries. + private Client client = Client.create(); + + // An instance of HBaseClusterManager is used for methods like the kill, resume, and suspend + // because cluster managers don't tend to implement these operations. + private ClusterManager hBaseClusterManager; + + private static final Log LOG = LogFactory.getLog(RESTApiClusterManager.class); + + RESTApiClusterManager() { + hBaseClusterManager = ReflectionUtils.newInstance(HBaseClusterManager.class, + new IntegrationTestingUtility().getConfiguration()); + } + + @Override + public void setConf(Configuration conf) { + super.setConf(conf); + if (conf == null) { + // Configured gets passed null before real conf. Why? I don't know. + return; + } + serverHostname = conf.get(REST_API_CLUSTER_MANAGER_HOSTNAME, DEFAULT_SERVER_HOSTNAME); + serverUsername = conf.get(REST_API_CLUSTER_MANAGER_USERNAME, DEFAULT_SERVER_USERNAME); + serverPassword = conf.get(REST_API_CLUSTER_MANAGER_PASSWORD, DEFAULT_SERVER_PASSWORD); + clusterName = conf.get(REST_API_CLUSTER_MANAGER_CLUSTER_NAME, DEFAULT_CLUSTER_NAME); + + // Add filter to Client instance to enable server authentication. + client.addFilter(new HTTPBasicAuthFilter(serverUsername, serverPassword)); + } + + @Override + public void start(ServiceType service, String hostname, int port) throws IOException { + performClusterManagerCommand(service, hostname, RoleCommand.START); + } + + @Override + public void stop(ServiceType service, String hostname, int port) throws IOException { + performClusterManagerCommand(service, hostname, RoleCommand.STOP); + } + + @Override + public void restart(ServiceType service, String hostname, int port) throws IOException { + performClusterManagerCommand(service, hostname, RoleCommand.RESTART); + } + + @Override + public boolean isRunning(ServiceType service, String hostname, int port) throws IOException { + String serviceName = getServiceName(roleServiceType.get(service)); + String hostId = getHostId(hostname); + String roleState = getRoleState(serviceName, service.toString(), hostId); + String healthSummary = getHealthSummary(serviceName, service.toString(), hostId); + boolean isRunning = false; + + // Use Yoda condition to prevent NullPointerException. roleState will be null if the "service + // type" does not exist on the specified hostname. + if ("STARTED".equals(roleState) && "GOOD".equals(healthSummary)) { + isRunning = true; + } + + return isRunning; + } + + @Override + public void kill(ServiceType service, String hostname, int port) throws IOException { + hBaseClusterManager.kill(service, hostname, port); + } + + @Override + public void suspend(ServiceType service, String hostname, int port) throws IOException { + hBaseClusterManager.kill(service, hostname, port); + } + + @Override + public void resume(ServiceType service, String hostname, int port) throws IOException { + hBaseClusterManager.kill(service, hostname, port); + } + + + // Convenience method to execute command against role on hostname. Only graceful commands are + // supported since cluster management APIs don't tend to let you SIGKILL things. + private void performClusterManagerCommand(ServiceType role, String hostname, RoleCommand command) + throws IOException { + LOG.info("Performing " + command + " command against " + role + " on " + hostname + "..."); + String serviceName = getServiceName(roleServiceType.get(role)); + String hostId = getHostId(hostname); + String roleName = getRoleName(serviceName, role.toString(), hostId); + doRoleCommand(serviceName, roleName, command); + } + + // Performing a command (e.g. starting or stopping a role) requires a POST instead of a GET. + private void doRoleCommand(String serviceName, String roleName, RoleCommand roleCommand) { + URI uri = UriBuilder.fromUri(serverHostname) + .path("api") + .path(API_VERSION) + .path("clusters") + .path(clusterName) + .path("services") + .path(serviceName) + .path("roleCommands") + .path(roleCommand.toString()) + .build(); + String body = "{ \"items\": [ \"" + roleName + "\" ] }"; + LOG.info("Executing POST against " + uri + " with body " + body + "..."); + ClientResponse response = client.resource(uri) + .type(MediaType.APPLICATION_JSON) + .post(ClientResponse.class, body); + + int statusCode = response.getStatus(); + if (statusCode != Response.Status.OK.getStatusCode()) { + throw new HTTPException(statusCode); + } + } + + // Possible healthSummary values include "GOOD" and "BAD." + private String getHealthSummary(String serviceName, String roleType, String hostId) + throws IOException { + return getRolePropertyValue(serviceName, roleType, hostId, "healthSummary"); + } + + // This API uses a hostId to execute host-specific commands; get one from a hostname. + private String getHostId(String hostname) throws IOException { + String hostId = null; + + URI uri = UriBuilder.fromUri(serverHostname) + .path("api") + .path(API_VERSION) + .path("hosts") + .build(); + JsonNode hosts = getJsonNodeFromURIGet(uri); + if (hosts != null) { + // Iterate through the list of hosts, stopping once you've reached the requested hostname. + for (JsonNode host : hosts) { + if (host.get("hostname").getTextValue().equals(hostname)) { + hostId = host.get("hostId").getTextValue(); + break; + } + } + } else { + hostId = null; + } + + return hostId; + } + + // Execute GET against URI, returning a JsonNode object to be traversed. + private JsonNode getJsonNodeFromURIGet(URI uri) throws IOException { + LOG.info("Executing GET against " + uri + "..."); + ClientResponse response = client.resource(uri) + .accept(MediaType.APPLICATION_JSON_TYPE) + .get(ClientResponse.class); + + int statusCode = response.getStatus(); + if (statusCode != Response.Status.OK.getStatusCode()) { + throw new HTTPException(statusCode); + } + // This API folds information as the value to an "items" attribute. + return new ObjectMapper().readTree(response.getEntity(String.class)).get("items"); + } + + // This API assigns a unique role name to each host's instance of a role. + private String getRoleName(String serviceName, String roleType, String hostId) + throws IOException { + return getRolePropertyValue(serviceName, roleType, hostId, "name"); + } + + // Get the value of a property from a role on a particular host. + private String getRolePropertyValue(String serviceName, String roleType, String hostId, + String property) throws IOException { + String roleValue = null; + URI uri = UriBuilder.fromUri(serverHostname) + .path("api") + .path(API_VERSION) + .path("clusters") + .path(clusterName) + .path("services") + .path(serviceName) + .path("roles") + .build(); + JsonNode roles = getJsonNodeFromURIGet(uri); + if (roles != null) { + // Iterate through the list of roles, stopping once the requested one is found. + for (JsonNode role : roles) { + if (role.get("hostRef").get("hostId").getTextValue().equals(hostId) && + role.get("type") + .getTextValue() + .toLowerCase() + .equals(roleType.toLowerCase())) { + roleValue = role.get(property).getTextValue(); + break; + } + } + } + + return roleValue; + } + + // Possible roleState values include "STARTED" and "STOPPED." + private String getRoleState(String serviceName, String roleType, String hostId) + throws IOException { + return getRolePropertyValue(serviceName, roleType, hostId, "roleState"); + } + + // Convert a service (e.g. "HBASE," "HDFS") into a service name (e.g. "HBASE-1," "HDFS-1"). + private String getServiceName(Service service) throws IOException { + String serviceName = null; + URI uri = UriBuilder.fromUri(serverHostname) + .path("api") + .path(API_VERSION) + .path("clusters") + .path(clusterName) + .path("services") + .build(); + JsonNode services = getJsonNodeFromURIGet(uri); + if (services != null) { + // Iterate through the list of services, stopping once the requested one is found. + for (JsonNode serviceEntry : services) { + if (serviceEntry.get("type").getTextValue().equals(service.toString())) { + serviceName = serviceEntry.get("name").getTextValue(); + break; + } + } + } + + return serviceName; + } + + /* + * Some enums to guard against bad calls. + */ + + // The RoleCommand enum is used by the doRoleCommand method to guard against non-existent methods + // being invoked on a given role. + private enum RoleCommand { + START, STOP, RESTART; + + // APIs tend to take commands in lowercase, so convert them to save the trouble later. + @Override + public String toString() { + return name().toLowerCase(); + } + } + + // ClusterManager methods take a "ServiceType" object (e.g. "HBASE_MASTER," "HADOOP_NAMENODE"). + // These "service types," which cluster managers call "roles" or "components," need to be mapped + // to their corresponding service (e.g. "HBase," "HDFS") in order to be controlled. + private static Map roleServiceType = new HashMap(); + static { + roleServiceType.put(ServiceType.HADOOP_NAMENODE, Service.HDFS); + roleServiceType.put(ServiceType.HADOOP_DATANODE, Service.HDFS); + roleServiceType.put(ServiceType.HADOOP_JOBTRACKER, Service.MAPREDUCE); + roleServiceType.put(ServiceType.HADOOP_TASKTRACKER, Service.MAPREDUCE); + roleServiceType.put(ServiceType.HBASE_MASTER, Service.HBASE); + roleServiceType.put(ServiceType.HBASE_REGIONSERVER, Service.HBASE); + } + + private enum Service { + HBASE, HDFS, MAPREDUCE + } +} \ No newline at end of file diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RemoveColumnAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RemoveColumnAction.java index efb44134503..c083d9c12a4 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RemoveColumnAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RemoveColumnAction.java @@ -54,7 +54,7 @@ public class RemoveColumnAction extends Action { HTableDescriptor tableDescriptor = admin.getTableDescriptor(tableName); HColumnDescriptor[] columnDescriptors = tableDescriptor.getColumnFamilies(); - if (columnDescriptors.length <= 1) { + if (columnDescriptors.length <= (protectedColumns == null ? 1 : protectedColumns.size())) { return; } diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java index 824e87f3f14..4423650b812 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java @@ -36,13 +36,13 @@ import org.apache.hadoop.hbase.ClusterStatus; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.IntegrationTestingUtility; -import org.apache.hadoop.hbase.testclassification.IntegrationTests; import org.apache.hadoop.hbase.InvalidFamilyOperationException; import org.apache.hadoop.hbase.NamespaceExistException; import org.apache.hadoop.hbase.NamespaceNotFoundException; import org.apache.hadoop.hbase.TableExistsException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; +import org.apache.hadoop.hbase.testclassification.IntegrationTests; import org.apache.hadoop.hbase.chaos.actions.Action; import org.apache.hadoop.hbase.chaos.actions.MoveRegionsOfTableAction; import org.apache.hadoop.hbase.chaos.actions.RestartActiveMasterAction; @@ -50,8 +50,6 @@ import org.apache.hadoop.hbase.chaos.actions.RestartRsHoldingMetaAction; import org.apache.hadoop.hbase.chaos.actions.RestartRsHoldingTableAction; import org.apache.hadoop.hbase.chaos.factories.MonkeyConstants; import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.HBaseAdmin; -import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; @@ -107,6 +105,7 @@ import com.google.common.base.Objects; * The ChaosMonkey actions currently run are: *
      *
    • Restart the RegionServer holding meta.
    • + *
    • Move the Regions of meta.
    • *
    • Restart the RegionServer holding the table the scan and put threads are targeting.
    • *
    • Move the Regions of the table used by the scan and put threads.
    • *
    • Restart the master.
    • @@ -147,6 +146,7 @@ public class IntegrationTestMTTR { */ private static Action restartRSAction; private static Action restartMetaAction; + private static Action moveMetaRegionsAction; private static Action moveRegionAction; private static Action restartMasterAction; @@ -195,6 +195,10 @@ public class IntegrationTestMTTR { // Set up the action that will kill the region holding meta. restartMetaAction = new RestartRsHoldingMetaAction(sleepTime); + // Set up the action that will move the regions of meta. + moveMetaRegionsAction = new MoveRegionsOfTableAction(sleepTime, + MonkeyConstants.DEFAULT_MOVE_REGIONS_MAX_TIME, TableName.META_TABLE_NAME); + // Set up the action that will move the regions of our table. moveRegionAction = new MoveRegionsOfTableAction(sleepTime, MonkeyConstants.DEFAULT_MOVE_REGIONS_MAX_TIME, tableName); @@ -206,6 +210,7 @@ public class IntegrationTestMTTR { Action.ActionContext actionContext = new Action.ActionContext(util); restartRSAction.init(actionContext); restartMetaAction.init(actionContext); + moveMetaRegionsAction.init(actionContext); moveRegionAction.init(actionContext); restartMasterAction.init(actionContext); } @@ -255,6 +260,7 @@ public class IntegrationTestMTTR { // Clean up the actions. moveRegionAction = null; restartMetaAction = null; + moveMetaRegionsAction = null; restartRSAction = null; restartMasterAction = null; @@ -271,6 +277,11 @@ public class IntegrationTestMTTR { run(new ActionCallable(restartMetaAction), "KillRsHoldingMeta"); } + @Test + public void testMoveMeta() throws Exception { + run(new ActionCallable(moveMetaRegionsAction), "MoveMeta"); + } + @Test public void testMoveRegion() throws Exception { run(new ActionCallable(moveRegionAction), "MoveRegion"); diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java index 9864031c871..00bc85179ff 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java @@ -20,9 +20,10 @@ package org.apache.hadoop.hbase.test; import java.io.DataInput; import java.io.DataOutput; -import java.io.IOException; import java.io.FileNotFoundException; +import java.io.IOException; import java.io.InterruptedIOException; +import java.security.SecureRandom; import java.util.ArrayList; import java.util.Arrays; import java.util.Iterator; @@ -32,6 +33,7 @@ import java.util.Set; import java.util.SortedSet; import java.util.TreeSet; import java.util.UUID; +import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.atomic.AtomicInteger; import org.apache.commons.cli.CommandLine; @@ -64,7 +66,6 @@ import org.apache.hadoop.hbase.client.BufferedMutatorParams; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionLocator; @@ -79,6 +80,8 @@ import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil; import org.apache.hadoop.hbase.mapreduce.TableMapper; import org.apache.hadoop.hbase.mapreduce.TableRecordReaderImpl; import org.apache.hadoop.hbase.mapreduce.WALPlayer; +import org.apache.hadoop.hbase.regionserver.FlushLargeStoresPolicy; +import org.apache.hadoop.hbase.regionserver.FlushPolicyFactory; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; import org.apache.hadoop.hbase.testclassification.IntegrationTests; import org.apache.hadoop.hbase.util.AbstractHBaseTool; @@ -100,16 +103,16 @@ import org.apache.hadoop.mapreduce.RecordReader; import org.apache.hadoop.mapreduce.Reducer; import org.apache.hadoop.mapreduce.TaskAttemptContext; import org.apache.hadoop.mapreduce.TaskAttemptID; +import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.input.FileSplit; import org.apache.hadoop.mapreduce.lib.input.SequenceFileAsBinaryInputFormat; -import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat; -import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat; import org.apache.hadoop.mapreduce.lib.output.SequenceFileAsBinaryOutputFormat; import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat; import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat; +import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; import org.junit.Test; @@ -189,6 +192,8 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase { protected static String DEFAULT_TABLE_NAME = "IntegrationTestBigLinkedList"; protected static byte[] FAMILY_NAME = Bytes.toBytes("meta"); + private static byte[] BIG_FAMILY_NAME = Bytes.toBytes("big"); + private static byte[] TINY_FAMILY_NAME = Bytes.toBytes("tiny"); //link to the id of the prev node in the linked list protected static final byte[] COLUMN_PREV = Bytes.toBytes("prev"); @@ -237,6 +242,20 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase { private static final Log LOG = LogFactory.getLog(Generator.class); + /** + * Set this configuration if you want to test single-column family flush works. If set, we will + * add a big column family and a small column family on either side of the usual ITBLL 'meta' + * column family. When we write out the ITBLL, we will also add to the big column family a value + * bigger than that for ITBLL and for small, something way smaller. The idea is that when + * flush-by-column family rather than by region is enabled, we can see if ITBLL is broke in any + * way. Here is how you would pass it: + *

      + * $ ./bin/hbase org.apache.hadoop.hbase.test.IntegrationTestBigLinkedList + * -Dgenerator.multiple.columnfamilies=true generator 1 10 g + */ + public static final String MULTIPLE_UNEVEN_COLUMNFAMILIES_KEY = + "generator.multiple.columnfamilies"; + static class GeneratorInputFormat extends InputFormat { static class GeneratorInputSplit extends InputSplit implements Writable { @Override @@ -285,7 +304,8 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase { public void initialize(InputSplit arg0, TaskAttemptContext context) throws IOException, InterruptedException { numNodes = context.getConfiguration().getLong(GENERATOR_NUM_ROWS_PER_MAP_KEY, 25000000); - rand = new Random(); + // Use SecureRandom to avoid issue described in HBASE-13382. + rand = new SecureRandom(); } @Override @@ -327,8 +347,9 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase { /** * Some ASCII art time: + *

      * [ . . . ] represents one batch of random longs of length WIDTH - * + *

            *                _________________________
            *               |                  ______ |
            *               |                 |      ||
      @@ -348,6 +369,7 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
            *             ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^_____|||
            *             |                 |________||
            *             |___________________________|
      +     * 
      */ static class GeneratorMapper extends Mapper { @@ -363,6 +385,9 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase { long numNodes; long wrap; int width; + boolean multipleUnevenColumnFamilies; + byte[] tinyValue = new byte[] { 't' }; + byte[] bigValue = null; @Override protected void setup(Context context) throws IOException, InterruptedException { @@ -378,6 +403,8 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase { if (this.numNodes < this.wrap) { this.wrap = this.numNodes; } + this.multipleUnevenColumnFamilies = + context.getConfiguration().getBoolean(MULTIPLE_UNEVEN_COLUMNFAMILIES_KEY, false); } protected void instantiateHTable() throws IOException { @@ -403,8 +430,9 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase { persist(output, count, prev, current, id); i = 0; - if (first == null) + if (first == null) { first = current; + } prev = current; current = new byte[this.width][]; @@ -434,13 +462,25 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase { throws IOException { for (int i = 0; i < current.length; i++) { Put put = new Put(current[i]); - put.add(FAMILY_NAME, COLUMN_PREV, prev == null ? NO_KEY : prev[i]); + put.addColumn(FAMILY_NAME, COLUMN_PREV, prev == null ? NO_KEY : prev[i]); if (count >= 0) { - put.add(FAMILY_NAME, COLUMN_COUNT, Bytes.toBytes(count + i)); + put.addColumn(FAMILY_NAME, COLUMN_COUNT, Bytes.toBytes(count + i)); } if (id != null) { - put.add(FAMILY_NAME, COLUMN_CLIENT, id); + put.addColumn(FAMILY_NAME, COLUMN_CLIENT, id); + } + // See if we are to write multiple columns. + if (this.multipleUnevenColumnFamilies) { + // Use any column name. + put.addColumn(TINY_FAMILY_NAME, TINY_FAMILY_NAME, this.tinyValue); + // If we've not allocated bigValue, do it now. Reuse same value each time. + if (this.bigValue == null) { + this.bigValue = new byte[current[i].length * 10]; + ThreadLocalRandom.current().nextBytes(this.bigValue); + } + // Use any column name. + put.addColumn(BIG_FAMILY_NAME, BIG_FAMILY_NAME, this.bigValue); } mutator.mutate(put); @@ -474,12 +514,15 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase { protected void createSchema() throws IOException { Configuration conf = getConf(); - Admin admin = new HBaseAdmin(conf); TableName tableName = getTableName(conf); - try { + try (Connection conn = ConnectionFactory.createConnection(conf); + Admin admin = conn.getAdmin()) { if (!admin.tableExists(tableName)) { HTableDescriptor htd = new HTableDescriptor(getTableName(getConf())); htd.addFamily(new HColumnDescriptor(FAMILY_NAME)); + // Always add these families. Just skip writing to them when we do not test per CF flush. + htd.addFamily(new HColumnDescriptor(BIG_FAMILY_NAME)); + htd.addFamily(new HColumnDescriptor(TINY_FAMILY_NAME)); int numberOfServers = admin.getClusterStatus().getServers().size(); if (numberOfServers == 0) { throw new IllegalStateException("No live regionservers"); @@ -498,8 +541,6 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase { } catch (MasterNotRunningException e) { LOG.error("Master not running", e); throw new IOException(e); - } finally { - admin.close(); } } @@ -507,7 +548,7 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase { Integer width, Integer wrapMuplitplier) throws Exception { LOG.info("Running RandomInputGenerator with numMappers=" + numMappers + ", numNodes=" + numNodes); - Job job = new Job(getConf()); + Job job = Job.getInstance(getConf()); job.setJobName("Random Input Generator"); job.setNumReduceTasks(0); @@ -533,7 +574,7 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase { Integer width, Integer wrapMuplitplier) throws Exception { LOG.info("Running Generator with numMappers=" + numMappers +", numNodes=" + numNodes); createSchema(); - Job job = new Job(getConf()); + Job job = Job.getInstance(getConf()); job.setJobName("Link Generator"); job.setNumReduceTasks(0); @@ -551,6 +592,11 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase { job.setOutputFormatClass(NullOutputFormat.class); job.getConfiguration().setBoolean("mapreduce.map.speculative", false); + String multipleUnevenColumnFamiliesStr = System.getProperty(MULTIPLE_UNEVEN_COLUMNFAMILIES_KEY); + if (multipleUnevenColumnFamiliesStr != null) { + job.getConfiguration().setBoolean(MULTIPLE_UNEVEN_COLUMNFAMILIES_KEY, + Boolean.parseBoolean(multipleUnevenColumnFamiliesStr)); + } TableMapReduceUtil.addDependencyJars(job); TableMapReduceUtil.addDependencyJars(job.getConfiguration(), AbstractHBaseTool.class); TableMapReduceUtil.initCredentials(job); @@ -704,9 +750,9 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase { return result; } - private static SortedSet readFileToSearch(final Configuration conf, - final FileSystem fs, final LocatedFileStatus keyFileStatus) - throws IOException, InterruptedException { + private static SortedSet readFileToSearch(final Configuration conf, + final FileSystem fs, final LocatedFileStatus keyFileStatus) throws IOException, + InterruptedException { SortedSet result = new TreeSet(Bytes.BYTES_COMPARATOR); // Return entries that are flagged Counts.UNDEFINED in the value. Return the row. This is // what is missing. @@ -719,13 +765,11 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase { while (rr.nextKeyValue()) { rr.getCurrentKey(); BytesWritable bw = rr.getCurrentValue(); - switch (Verify.VerifyReducer.whichType(bw.getBytes())) { - case UNDEFINED: - byte [] key = new byte [rr.getCurrentKey().getLength()]; - System.arraycopy(rr.getCurrentKey().getBytes(), 0, key, 0, - rr.getCurrentKey().getLength()); + if (Verify.VerifyReducer.whichType(bw.getBytes()) == Verify.Counts.UNDEFINED) { + byte[] key = new byte[rr.getCurrentKey().getLength()]; + System.arraycopy(rr.getCurrentKey().getBytes(), 0, key, 0, rr.getCurrentKey() + .getLength()); result.add(key); - break; } } } @@ -740,7 +784,8 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase { static class Verify extends Configured implements Tool { private static final Log LOG = LogFactory.getLog(Verify.class); - protected static final BytesWritable DEF = new BytesWritable(NO_KEY); + protected static final BytesWritable DEF = new BytesWritable(new byte[] { 0 }); + protected static final BytesWritable DEF_LOST_FAMILIES = new BytesWritable(new byte[] { 1 }); protected Job job; @@ -748,12 +793,29 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase { private BytesWritable row = new BytesWritable(); private BytesWritable ref = new BytesWritable(); + private boolean multipleUnevenColumnFamilies; + + @Override + protected void setup( + Mapper.Context context) + throws IOException, InterruptedException { + this.multipleUnevenColumnFamilies = + context.getConfiguration().getBoolean(Generator.MULTIPLE_UNEVEN_COLUMNFAMILIES_KEY, + false); + } + @Override protected void map(ImmutableBytesWritable key, Result value, Context context) throws IOException ,InterruptedException { byte[] rowKey = key.get(); row.set(rowKey, 0, rowKey.length); - context.write(row, DEF); + if (multipleUnevenColumnFamilies + && (!value.containsColumn(BIG_FAMILY_NAME, BIG_FAMILY_NAME) || !value.containsColumn( + TINY_FAMILY_NAME, TINY_FAMILY_NAME))) { + context.write(row, DEF_LOST_FAMILIES); + } else { + context.write(row, DEF); + } byte[] prev = value.getValue(FAMILY_NAME, COLUMN_PREV); if (prev != null && prev.length > 0) { ref.set(prev, 0, prev.length); @@ -769,7 +831,8 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase { * problems found from the reducer. */ public static enum Counts { - UNREFERENCED, UNDEFINED, REFERENCED, CORRUPT, EXTRAREFERENCES, EXTRA_UNDEF_REFERENCES + UNREFERENCED, UNDEFINED, REFERENCED, CORRUPT, EXTRAREFERENCES, EXTRA_UNDEF_REFERENCES, + LOST_FAMILIES } /** @@ -777,11 +840,13 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase { * subsequent investigative mapreduce jobs. Each emitted value is prefaced by a one byte flag * saying what sort of emission it is. Flag is the Count enum ordinal as a short. */ - public static class VerifyReducer - extends Reducer { + public static class VerifyReducer extends + Reducer { private ArrayList refs = new ArrayList(); - private final BytesWritable UNREF = - new BytesWritable(addPrefixFlag(Counts.UNREFERENCED.ordinal(), new byte [] {})); + private final BytesWritable UNREF = new BytesWritable(addPrefixFlag( + Counts.UNREFERENCED.ordinal(), new byte[] {})); + private final BytesWritable LOSTFAM = new BytesWritable(addPrefixFlag( + Counts.LOST_FAMILIES.ordinal(), new byte[] {})); private AtomicInteger rows = new AtomicInteger(0); private Connection connection; @@ -794,9 +859,12 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase { } @Override - protected void cleanup(Reducer.Context context) - throws IOException, InterruptedException { - if (this.connection != null) this.connection.close(); + protected void cleanup( + Reducer.Context context) + throws IOException, InterruptedException { + if (this.connection != null) { + this.connection.close(); + } super.cleanup(context); } @@ -806,12 +874,12 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase { * @return Return new byte array that has ordinal as prefix on front taking up * Bytes.SIZEOF_SHORT bytes followed by r */ - public static byte [] addPrefixFlag(final int ordinal, final byte [] r) { - byte [] prefix = Bytes.toBytes((short)ordinal); + public static byte[] addPrefixFlag(final int ordinal, final byte [] r) { + byte[] prefix = Bytes.toBytes((short)ordinal); if (prefix.length != Bytes.SIZEOF_SHORT) { throw new RuntimeException("Unexpected size: " + prefix.length); } - byte [] result = new byte [prefix.length + r.length]; + byte[] result = new byte[prefix.length + r.length]; System.arraycopy(prefix, 0, result, 0, prefix.length); System.arraycopy(r, 0, result, prefix.length, r.length); return result; @@ -831,21 +899,24 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase { * @param bw * @return Row bytes minus the type flag. */ - public static byte [] getRowOnly(BytesWritable bw) { - byte [] bytes = new byte [bw.getLength() - Bytes.SIZEOF_SHORT]; + public static byte[] getRowOnly(BytesWritable bw) { + byte[] bytes = new byte [bw.getLength() - Bytes.SIZEOF_SHORT]; System.arraycopy(bw.getBytes(), Bytes.SIZEOF_SHORT, bytes, 0, bytes.length); return bytes; } @Override public void reduce(BytesWritable key, Iterable values, Context context) - throws IOException, InterruptedException { - + throws IOException, InterruptedException { int defCount = 0; + boolean lostFamilies = false; refs.clear(); for (BytesWritable type : values) { if (type.getLength() == DEF.getLength()) { defCount++; + if (type.getBytes()[0] == 1) { + lostFamilies = true; + } } else { byte[] bytes = new byte[type.getLength()]; System.arraycopy(type.getBytes(), 0, bytes, 0, type.getLength()); @@ -861,13 +932,18 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase { LOG.error("LinkedListError: key=" + keyString + ", reference(s)=" + (refsSb != null? refsSb.toString(): "")); } + if (lostFamilies) { + LOG.error("LinkedListError: key=" + keyString + ", lost big or tiny families"); + context.getCounter(Counts.LOST_FAMILIES).increment(1); + context.write(key, LOSTFAM); + } if (defCount == 0 && refs.size() > 0) { // This is bad, found a node that is referenced but not defined. It must have been // lost, emit some info about this node for debugging purposes. // Write out a line per reference. If more than one, flag it.; for (int i = 0; i < refs.size(); i++) { - byte [] bs = refs.get(i); + byte[] bs = refs.get(i); int ordinal; if (i <= 0) { ordinal = Counts.UNDEFINED.ordinal(); @@ -963,16 +1039,16 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase { @Override public int run(String[] args) throws Exception { - if (args.length != 2) { - System.out.println("Usage : " + Verify.class.getSimpleName() + " "); + System.out.println("Usage : " + Verify.class.getSimpleName() + + " "); return 0; } String outputDir = args[0]; int numReducers = Integer.parseInt(args[1]); - return run(outputDir, numReducers); + return run(outputDir, numReducers); } public int run(String outputDir, int numReducers) throws Exception { @@ -982,7 +1058,7 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase { public int run(Path outputDir, int numReducers) throws Exception { LOG.info("Running Verify with outputDir=" + outputDir +", numReducers=" + numReducers); - job = new Job(getConf()); + job = Job.getInstance(getConf()); job.setJobName("Link Verifier"); job.setNumReduceTasks(numReducers); @@ -994,6 +1070,11 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase { scan.addColumn(FAMILY_NAME, COLUMN_PREV); scan.setCaching(10000); scan.setCacheBlocks(false); + if (isMultiUnevenColumnFamilies()) { + scan.addColumn(BIG_FAMILY_NAME, BIG_FAMILY_NAME); + scan.addColumn(TINY_FAMILY_NAME, TINY_FAMILY_NAME); + job.getConfiguration().setBoolean(Generator.MULTIPLE_UNEVEN_COLUMNFAMILIES_KEY, true); + } TableMapReduceUtil.initTableMapperJob(getTableName(getConf()).getName(), scan, VerifyMapper.class, BytesWritable.class, BytesWritable.class, job); @@ -1012,7 +1093,6 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase { return success ? 0 : 1; } - @SuppressWarnings("deprecation") public boolean verify(long expectedReferenced) throws Exception { if (job == null) { throw new IllegalStateException("You should call run() first"); @@ -1024,6 +1104,7 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase { Counter unreferenced = counters.findCounter(Counts.UNREFERENCED); Counter undefined = counters.findCounter(Counts.UNDEFINED); Counter multiref = counters.findCounter(Counts.EXTRAREFERENCES); + Counter lostfamilies = counters.findCounter(Counts.LOST_FAMILIES); boolean success = true; //assert @@ -1045,6 +1126,11 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase { success = false; } + if (lostfamilies.getValue() > 0) { + LOG.error("Found nodes which lost big or tiny families, count=" + lostfamilies.getValue()); + success = false; + } + if (!success) { handleFailure(counters); } @@ -1274,7 +1360,7 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase { if (cmd.hasOption('n')) { maxQueries = Long.parseLong(cmd.getOptionValue("n")); } - Random rand = new Random(); + Random rand = new SecureRandom(); boolean isSpecificStart = cmd.hasOption('s'); byte[] startKey = isSpecificStart ? Bytes.toBytesBinary(cmd.getOptionValue('s')) : null; int logEvery = cmd.hasOption('l') ? Integer.parseInt(cmd.getOptionValue('l')) : 1; @@ -1358,17 +1444,17 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase { Path p = new Path(args[0]); Configuration conf = getConf(); TableName tableName = getTableName(conf); + try (FileSystem fs = HFileSystem.get(conf); + Connection conn = ConnectionFactory.createConnection(conf); + Admin admin = conn.getAdmin()) { + if (admin.tableExists(tableName)) { + admin.disableTable(tableName); + admin.deleteTable(tableName); + } - FileSystem fs = HFileSystem.get(conf); - Admin admin = new HBaseAdmin(conf); - - if (admin.tableExists(tableName)) { - admin.disableTable(tableName); - admin.deleteTable(tableName); - } - - if (fs.exists(p)) { - fs.delete(p, true); + if (fs.exists(p)) { + fs.delete(p, true); + } } return 0; @@ -1420,12 +1506,22 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase { } } + private static boolean isMultiUnevenColumnFamilies() { + return Boolean.TRUE.toString().equalsIgnoreCase( + System.getProperty(Generator.MULTIPLE_UNEVEN_COLUMNFAMILIES_KEY)); + } + @Test public void testContinuousIngest() throws IOException, Exception { //Loop - int ret = ToolRunner.run(getTestingUtil(getConf()).getConfiguration(), new Loop(), - new String[] {"1", "1", "2000000", - util.getDataTestDirOnTestFS("IntegrationTestBigLinkedList").toString(), "1"}); + Configuration conf = getTestingUtil(getConf()).getConfiguration(); + if (isMultiUnevenColumnFamilies()) { + // make sure per CF flush is on + conf.set(FlushPolicyFactory.HBASE_FLUSH_POLICY_KEY, FlushLargeStoresPolicy.class.getName()); + } + int ret = + ToolRunner.run(conf, new Loop(), new String[] { "1", "1", "2000000", + util.getDataTestDirOnTestFS("IntegrationTestBigLinkedList").toString(), "1" }); org.junit.Assert.assertEquals(0, ret); } @@ -1468,7 +1564,6 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase { @Override public int runTestFromCommandLine() throws Exception { - Tool tool = null; if (toRun.equalsIgnoreCase("Generator")) { tool = new Generator(); @@ -1504,7 +1599,12 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase { @Override protected Set getColumnFamilies() { - return Sets.newHashSet(Bytes.toString(FAMILY_NAME)); + if (isMultiUnevenColumnFamilies()) { + return Sets.newHashSet(Bytes.toString(FAMILY_NAME), Bytes.toString(BIG_FAMILY_NAME), + Bytes.toString(TINY_FAMILY_NAME)); + } else { + return Sets.newHashSet(Bytes.toString(FAMILY_NAME)); + } } private static void setJobConf(Job job, int numMappers, long numNodes, diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedListWithVisibility.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedListWithVisibility.java index 99ad36aa513..b82c750c76d 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedListWithVisibility.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedListWithVisibility.java @@ -40,10 +40,10 @@ import org.apache.hadoop.hbase.chaos.factories.MonkeyFactory; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.BufferedMutator; import org.apache.hadoop.hbase.client.BufferedMutatorParams; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HConnection; -import org.apache.hadoop.hbase.client.HConnectionManager; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; @@ -128,7 +128,8 @@ public class IntegrationTestBigLinkedListWithVisibility extends IntegrationTestB protected void createSchema() throws IOException { LOG.info("Creating tables"); // Create three tables - boolean acl = AccessControlClient.isAccessControllerRunning(getConf()); + boolean acl = AccessControlClient.isAccessControllerRunning(ConnectionFactory + .createConnection(getConf())); if(!acl) { LOG.info("No ACL available."); } @@ -156,8 +157,8 @@ public class IntegrationTestBigLinkedListWithVisibility extends IntegrationTestB LOG.info("Granting permissions for user " + USER.getShortName()); Permission.Action[] actions = { Permission.Action.READ }; try { - AccessControlClient.grant(getConf(), tableName, USER.getShortName(), null, null, - actions); + AccessControlClient.grant(ConnectionFactory.createConnection(getConf()), tableName, + USER.getShortName(), null, null, actions); } catch (Throwable e) { LOG.fatal("Error in granting permission for the user " + USER.getShortName(), e); throw new IOException(e); @@ -448,7 +449,7 @@ public class IntegrationTestBigLinkedListWithVisibility extends IntegrationTestB @Override protected void handleFailure(Counters counters) throws IOException { Configuration conf = job.getConfiguration(); - HConnection conn = HConnectionManager.getConnection(conf); + HConnection conn = (HConnection) ConnectionFactory.createConnection(conf); TableName tableName = TableName.valueOf(COMMON_TABLE_NAME); CounterGroup g = counters.getGroup("undef"); Iterator it = g.iterator(); diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestReplication.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestReplication.java new file mode 100644 index 00000000000..a56d5fee51a --- /dev/null +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestReplication.java @@ -0,0 +1,417 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.test; + +import com.google.common.base.Joiner; +import org.apache.commons.cli.CommandLine; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.conf.Configured; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.IntegrationTestingUtility; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.replication.ReplicationAdmin; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; +import org.apache.hadoop.util.Tool; +import org.apache.hadoop.util.ToolRunner; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Set; +import java.util.TreeSet; +import java.util.UUID; + + +/** + * This is an integration test for replication. It is derived off + * {@link org.apache.hadoop.hbase.test.IntegrationTestBigLinkedList} that creates a large circular + * linked list in one cluster and verifies that the data is correct in a sink cluster. The test + * handles creating the tables and schema and setting up the replication. + */ +public class IntegrationTestReplication extends IntegrationTestBigLinkedList { + protected String sourceClusterIdString; + protected String sinkClusterIdString; + protected int numIterations; + protected int numMappers; + protected long numNodes; + protected String outputDir; + protected int numReducers; + protected int generateVerifyGap; + protected Integer width; + protected Integer wrapMultiplier; + + private final String SOURCE_CLUSTER_OPT = "sourceCluster"; + private final String DEST_CLUSTER_OPT = "destCluster"; + private final String ITERATIONS_OPT = "iterations"; + private final String NUM_MAPPERS_OPT = "numMappers"; + private final String OUTPUT_DIR_OPT = "outputDir"; + private final String NUM_REDUCERS_OPT = "numReducers"; + + /** + * The gap (in seconds) from when data is finished being generated at the source + * to when it can be verified. This is the replication lag we are willing to tolerate + */ + private final String GENERATE_VERIFY_GAP_OPT = "generateVerifyGap"; + + /** + * The width of the linked list. + * See {@link org.apache.hadoop.hbase.test.IntegrationTestBigLinkedList} for more details + */ + private final String WIDTH_OPT = "width"; + + /** + * The number of rows after which the linked list points to the first row. + * See {@link org.apache.hadoop.hbase.test.IntegrationTestBigLinkedList} for more details + */ + private final String WRAP_MULTIPLIER_OPT = "wrapMultiplier"; + + /** + * The number of nodes in the test setup. This has to be a multiple of WRAP_MULTIPLIER * WIDTH + * in order to ensure that the linked list can is complete. + * See {@link org.apache.hadoop.hbase.test.IntegrationTestBigLinkedList} for more details + */ + private final String NUM_NODES_OPT = "numNodes"; + + private final int DEFAULT_NUM_MAPPERS = 1; + private final int DEFAULT_NUM_REDUCERS = 1; + private final int DEFAULT_NUM_ITERATIONS = 1; + private final int DEFAULT_GENERATE_VERIFY_GAP = 60; + private final int DEFAULT_WIDTH = 1000000; + private final int DEFAULT_WRAP_MULTIPLIER = 25; + private final int DEFAULT_NUM_NODES = DEFAULT_WIDTH * DEFAULT_WRAP_MULTIPLIER; + + /** + * Wrapper around an HBase ClusterID allowing us + * to get admin connections and configurations for it + */ + protected class ClusterID { + private final Configuration configuration; + private Connection connection = null; + + /** + * This creates a new ClusterID wrapper that will automatically build connections and + * configurations to be able to talk to the specified cluster + * + * @param base the base configuration that this class will add to + * @param key the cluster key in the form of zk_quorum:zk_port:zk_parent_node + */ + public ClusterID(Configuration base, + String key) { + configuration = new Configuration(base); + String[] parts = key.split(":"); + configuration.set(HConstants.ZOOKEEPER_QUORUM, parts[0]); + configuration.set(HConstants.ZOOKEEPER_CLIENT_PORT, parts[1]); + configuration.set(HConstants.ZOOKEEPER_ZNODE_PARENT, parts[2]); + } + + @Override + public String toString() { + return Joiner.on(":").join(configuration.get(HConstants.ZOOKEEPER_QUORUM), + configuration.get(HConstants.ZOOKEEPER_CLIENT_PORT), + configuration.get(HConstants.ZOOKEEPER_ZNODE_PARENT)); + } + + public Configuration getConfiguration() { + return this.configuration; + } + + public Connection getConnection() throws Exception { + if (this.connection == null) { + this.connection = ConnectionFactory.createConnection(this.configuration); + } + return this.connection; + } + + public void closeConnection() throws Exception { + this.connection.close(); + this.connection = null; + } + + public boolean equals(ClusterID other) { + return this.toString().equalsIgnoreCase(other.toString()); + } + } + + /** + * The main runner loop for the test. It uses + * {@link org.apache.hadoop.hbase.test.IntegrationTestBigLinkedList} + * for the generation and verification of the linked list. It is heavily based on + * {@link org.apache.hadoop.hbase.test.IntegrationTestBigLinkedList.Loop} + */ + protected class VerifyReplicationLoop extends Configured implements Tool { + private final Log LOG = LogFactory.getLog(VerifyReplicationLoop.class); + protected ClusterID source; + protected ClusterID sink; + + IntegrationTestBigLinkedList integrationTestBigLinkedList; + + /** + * This tears down any tables that existed from before and rebuilds the tables and schemas on + * the source cluster. It then sets up replication from the source to the sink cluster by using + * the {@link org.apache.hadoop.hbase.client.replication.ReplicationAdmin} + * connection. + * + * @throws Exception + */ + protected void setupTablesAndReplication() throws Exception { + TableName tableName = getTableName(source.getConfiguration()); + + ClusterID[] clusters = {source, sink}; + + // delete any old tables in the source and sink + for (ClusterID cluster : clusters) { + Admin admin = cluster.getConnection().getAdmin(); + + if (admin.tableExists(tableName)) { + if (admin.isTableEnabled(tableName)) { + admin.disableTable(tableName); + } + + /** + * TODO: This is a work around on a replication bug (HBASE-13416) + * When we recreate a table against that has recently been + * deleted, the contents of the logs are replayed even though + * they should not. This ensures that we flush the logs + * before the table gets deleted. Eventually the bug should be + * fixed and this should be removed. + */ + Set regionServers = new TreeSet<>(); + for (HRegionLocation rl : + cluster.getConnection().getRegionLocator(tableName).getAllRegionLocations()) { + regionServers.add(rl.getServerName()); + } + + for (ServerName server : regionServers) { + source.getConnection().getAdmin().rollWALWriter(server); + } + + admin.deleteTable(tableName); + } + } + + // create the schema + Generator generator = new Generator(); + generator.setConf(source.getConfiguration()); + generator.createSchema(); + + // setup the replication on the source + if (!source.equals(sink)) { + ReplicationAdmin replicationAdmin = new ReplicationAdmin(source.getConfiguration()); + // remove any old replication peers + for (String oldPeer : replicationAdmin.listPeerConfigs().keySet()) { + replicationAdmin.removePeer(oldPeer); + } + + // set the sink to be the target + ReplicationPeerConfig peerConfig = new ReplicationPeerConfig(); + peerConfig.setClusterKey(sink.toString()); + + // set the test table to be the table to replicate + HashMap> toReplicate = new HashMap<>(); + toReplicate.put(tableName, new ArrayList(0)); + + replicationAdmin.addPeer("TestPeer", peerConfig, toReplicate); + + replicationAdmin.enableTableRep(tableName); + replicationAdmin.close(); + } + + for (ClusterID cluster : clusters) { + cluster.closeConnection(); + } + } + + protected void waitForReplication() throws Exception { + // TODO: we shouldn't be sleeping here. It would be better to query the region servers + // and wait for them to report 0 replication lag. + Thread.sleep(generateVerifyGap * 1000); + } + + /** + * Run the {@link org.apache.hadoop.hbase.test.IntegrationTestBigLinkedList.Generator} in the + * source cluster. This assumes that the tables have been setup via setupTablesAndReplication. + * + * @throws Exception + */ + protected void runGenerator() throws Exception { + Path outputPath = new Path(outputDir); + UUID uuid = UUID.randomUUID(); //create a random UUID. + Path generatorOutput = new Path(outputPath, uuid.toString()); + + Generator generator = new Generator(); + generator.setConf(source.getConfiguration()); + + int retCode = generator.run(numMappers, numNodes, generatorOutput, width, wrapMultiplier); + if (retCode > 0) { + throw new RuntimeException("Generator failed with return code: " + retCode); + } + } + + + /** + * Run the {@link org.apache.hadoop.hbase.test.IntegrationTestBigLinkedList.Verify} + * in the sink cluster. If replication is working properly the data written at the source + * cluster should be available in the sink cluster after a reasonable gap + * + * @param expectedNumNodes the number of nodes we are expecting to see in the sink cluster + * @throws Exception + */ + protected void runVerify(long expectedNumNodes) throws Exception { + Path outputPath = new Path(outputDir); + UUID uuid = UUID.randomUUID(); //create a random UUID. + Path iterationOutput = new Path(outputPath, uuid.toString()); + + Verify verify = new Verify(); + verify.setConf(sink.getConfiguration()); + + int retCode = verify.run(iterationOutput, numReducers); + if (retCode > 0) { + throw new RuntimeException("Verify.run failed with return code: " + retCode); + } + + if (!verify.verify(expectedNumNodes)) { + throw new RuntimeException("Verify.verify failed"); + } + + LOG.info("Verify finished with success. Total nodes=" + expectedNumNodes); + } + + /** + * The main test runner + * + * This test has 4 steps: + * 1: setupTablesAndReplication + * 2: generate the data into the source cluster + * 3: wait for replication to propagate + * 4: verify that the data is available in the sink cluster + * + * @param args should be empty + * @return 0 on success + * @throws Exception on an error + */ + @Override + public int run(String[] args) throws Exception { + source = new ClusterID(getConf(), sourceClusterIdString); + sink = new ClusterID(getConf(), sinkClusterIdString); + + setupTablesAndReplication(); + int expectedNumNodes = 0; + for (int i = 0; i < numIterations; i++) { + LOG.info("Starting iteration = " + i); + + expectedNumNodes += numMappers * numNodes; + + runGenerator(); + waitForReplication(); + runVerify(expectedNumNodes); + } + + /** + * we are always returning 0 because exceptions are thrown when there is an error + * in the verification step. + */ + return 0; + } + } + + @Override + protected void addOptions() { + super.addOptions(); + addRequiredOptWithArg("s", SOURCE_CLUSTER_OPT, + "Cluster ID of the source cluster (e.g. localhost:2181:/hbase)"); + addRequiredOptWithArg("r", DEST_CLUSTER_OPT, + "Cluster ID of the sink cluster (e.g. localhost:2182:/hbase)"); + addRequiredOptWithArg("d", OUTPUT_DIR_OPT, + "Temporary directory where to write keys for the test"); + + + addOptWithArg("nm", NUM_MAPPERS_OPT, + "Number of mappers (default: " + DEFAULT_NUM_MAPPERS + ")"); + addOptWithArg("nr", NUM_REDUCERS_OPT, + "Number of reducers (default: " + DEFAULT_NUM_MAPPERS + ")"); + addOptWithArg("n", NUM_NODES_OPT, + "Number of nodes. This should be a multiple of width * wrapMultiplier." + + " (default: " + DEFAULT_NUM_NODES + ")"); + addOptWithArg("i", ITERATIONS_OPT, "Number of iterations to run (default: " + + DEFAULT_NUM_ITERATIONS + ")"); + addOptWithArg("t", GENERATE_VERIFY_GAP_OPT, + "Gap between generate and verify steps in seconds (default: " + + DEFAULT_GENERATE_VERIFY_GAP + ")"); + addOptWithArg("w", WIDTH_OPT, + "Width of the linked list chain (default: " + DEFAULT_WIDTH + ")"); + addOptWithArg("wm", WRAP_MULTIPLIER_OPT, "How many times to wrap around (default: " + + DEFAULT_WRAP_MULTIPLIER + ")"); + } + + @Override + protected void processOptions(CommandLine cmd) { + processBaseOptions(cmd); + + sourceClusterIdString = cmd.getOptionValue(SOURCE_CLUSTER_OPT); + sinkClusterIdString = cmd.getOptionValue(DEST_CLUSTER_OPT); + outputDir = cmd.getOptionValue(OUTPUT_DIR_OPT); + + /** This uses parseInt from {@link org.apache.hadoop.hbase.util.AbstractHBaseTool} */ + numMappers = parseInt(cmd.getOptionValue(NUM_MAPPERS_OPT, + Integer.toString(DEFAULT_NUM_MAPPERS)), + 1, Integer.MAX_VALUE); + numReducers = parseInt(cmd.getOptionValue(NUM_REDUCERS_OPT, + Integer.toString(DEFAULT_NUM_REDUCERS)), + 1, Integer.MAX_VALUE); + numNodes = parseInt(cmd.getOptionValue(NUM_NODES_OPT, Integer.toString(DEFAULT_NUM_NODES)), + 1, Integer.MAX_VALUE); + generateVerifyGap = parseInt(cmd.getOptionValue(GENERATE_VERIFY_GAP_OPT, + Integer.toString(DEFAULT_GENERATE_VERIFY_GAP)), + 1, Integer.MAX_VALUE); + numIterations = parseInt(cmd.getOptionValue(ITERATIONS_OPT, + Integer.toString(DEFAULT_NUM_ITERATIONS)), + 1, Integer.MAX_VALUE); + width = parseInt(cmd.getOptionValue(WIDTH_OPT, Integer.toString(DEFAULT_WIDTH)), + 1, Integer.MAX_VALUE); + wrapMultiplier = parseInt(cmd.getOptionValue(WRAP_MULTIPLIER_OPT, + Integer.toString(DEFAULT_WRAP_MULTIPLIER)), + 1, Integer.MAX_VALUE); + + if (numNodes % (width * wrapMultiplier) != 0) { + throw new RuntimeException("numNodes must be a multiple of width and wrap multiplier"); + } + } + + @Override + public int runTestFromCommandLine() throws Exception { + VerifyReplicationLoop tool = new VerifyReplicationLoop(); + tool.integrationTestBigLinkedList = this; + return ToolRunner.run(getConf(), tool, null); + } + + public static void main(String[] args) throws Exception { + Configuration conf = HBaseConfiguration.create(); + IntegrationTestingUtility.setUseDistributedCluster(conf); + int ret = ToolRunner.run(conf, new IntegrationTestReplication(), args); + System.exit(ret); + } +} diff --git a/hbase-prefix-tree/pom.xml b/hbase-prefix-tree/pom.xml index a97de924640..3fc35a34350 100644 --- a/hbase-prefix-tree/pom.xml +++ b/hbase-prefix-tree/pom.xml @@ -33,25 +33,6 @@ - - maven-compiler-plugin - - - default-compile - - ${java.default.compiler} - true - - - - default-testCompile - - ${java.default.compiler} - true - - - - org.apache.maven.plugins maven-site-plugin @@ -90,6 +71,36 @@ + + + + + org.eclipse.m2e + lifecycle-mapping + 1.0.0 + + + + + + org.apache.maven.plugins + maven-compiler-plugin + [3.2,) + + compile + + + + + + + + + + + + diff --git a/hbase-prefix-tree/src/main/asciidoc/.gitignore b/hbase-prefix-tree/src/main/asciidoc/.gitignore deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeSeeker.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeSeeker.java index b95055ccb8f..d9852befacb 100644 --- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeSeeker.java +++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeSeeker.java @@ -140,35 +140,6 @@ public class PrefixTreeSeeker implements EncodedSeeker { private static final boolean USE_POSITION_BEFORE = false; - /** - * Seek forward only (should be called reseekToKeyInBlock?). - *

      - * If the exact key is found look at the seekBefore variable and:
      - * - if true: go to the previous key if it's true
      - * - if false: stay on the exact key - *

      - * If the exact key is not found, then go to the previous key *if possible*, but remember to - * leave the scanner in a valid state if possible. - *

      - * @param keyOnlyBytes KeyValue format of a Cell's key at which to position the seeker - * @param offset offset into the keyOnlyBytes array - * @param length number of bytes of the keyOnlyBytes array to use - * @param forceBeforeOnExactMatch if an exact match is found and seekBefore=true, back up 1 Cell - * @return 0 if the seeker is on the exact key
      - * 1 if the seeker is not on the key for any reason, including seekBefore being true - */ - @Override - public int seekToKeyInBlock(byte[] keyOnlyBytes, int offset, int length, - boolean forceBeforeOnExactMatch) { - if (USE_POSITION_BEFORE) { - return seekToOrBeforeUsingPositionAtOrBefore(keyOnlyBytes, offset, length, - forceBeforeOnExactMatch); - } else { - return seekToOrBeforeUsingPositionAtOrAfter(keyOnlyBytes, offset, length, - forceBeforeOnExactMatch); - } - } - /* * Support both of these options since the underlying PrefixTree supports both. Possibly * expand the EncodedSeeker to utilize them both. diff --git a/hbase-procedure/pom.xml b/hbase-procedure/pom.xml new file mode 100644 index 00000000000..9683db2eeb1 --- /dev/null +++ b/hbase-procedure/pom.xml @@ -0,0 +1,181 @@ + + + + 4.0.0 + + hbase + org.apache.hbase + 2.0.0-SNAPSHOT + .. + + + hbase-procedure + HBase - Procedure + Procedure Framework + + + + + org.apache.maven.plugins + maven-site-plugin + + true + + + + + org.apache.maven.plugins + maven-source-plugin + + + + maven-assembly-plugin + ${maven.assembly.version} + + true + + + + maven-surefire-plugin + + + + secondPartTestsExecution + test + + test + + + true + + + + + + + + + + org.apache.hbase + hbase-common + ${project.version} + tests + + + org.apache.hbase + hbase-annotations + test-jar + test + + + org.apache.hbase + hbase-protocol + + + org.apache.hbase + hbase-common + + + com.google.guava + guava + + + commons-logging + commons-logging + + + + + + + hadoop-1.1 + + + + hadoop.profile1.1 + + + + + org.apache.hadoop + hadoop-core + + + + + hadoop-1.0 + + + hadoop.profile + 1.0 + + + + + org.apache.hadoop + hadoop-core + + + + + + hadoop-2.0 + + + + !hadoop.profile + + + + + org.apache.hadoop + hadoop-common + + + + + + hadoop-3.0 + + + hadoop.profile + 3.0 + + + + 3.0-SNAPSHOT + + + + org.apache.hadoop + hadoop-common + + + + + diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/OnePhaseProcedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/OnePhaseProcedure.java new file mode 100644 index 00000000000..1c3be2dec54 --- /dev/null +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/OnePhaseProcedure.java @@ -0,0 +1,28 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.procedure2; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + +@InterfaceAudience.Public +@InterfaceStability.Evolving +public abstract class OnePhaseProcedure extends Procedure { + // TODO (e.g. used by online snapshots) +} \ No newline at end of file diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java new file mode 100644 index 00000000000..338fcad4c94 --- /dev/null +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java @@ -0,0 +1,680 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.procedure2; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.lang.reflect.Constructor; +import java.lang.reflect.Modifier; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeoutException; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.procedure2.util.StringUtils; +import org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos; +import org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureState; +import org.apache.hadoop.hbase.util.ByteStringer; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import com.google.protobuf.ByteString; + +/** + * Base Procedure class responsible to handle the Procedure Metadata + * e.g. state, startTime, lastUpdate, stack-indexes, ... + * + * execute() is called each time the procedure is executed. + * it may be called multiple times in case of failure and restart, so the + * code must be idempotent. + * the return is a set of sub-procedures or null in case the procedure doesn't + * have sub-procedures. Once the sub-procedures are successfully completed + * the execute() method is called again, you should think at it as a stack: + * -> step 1 + * ---> step 2 + * -> step 1 + * + * rollback() is called when the procedure or one of the sub-procedures is failed. + * the rollback step is supposed to cleanup the resources created during the + * execute() step. in case of failure and restart rollback() may be called + * multiple times, so the code must be idempotent. + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public abstract class Procedure implements Comparable { + // unchanged after initialization + private String owner = null; + private Long parentProcId = null; + private Long procId = null; + private long startTime; + + // runtime state, updated every operation + private ProcedureState state = ProcedureState.INITIALIZING; + private Integer timeout = null; + private int[] stackIndexes = null; + private int childrenLatch = 0; + private long lastUpdate; + + private RemoteProcedureException exception = null; + private byte[] result = null; + + /** + * The main code of the procedure. It must be idempotent since execute() + * may be called multiple time in case of machine failure in the middle + * of the execution. + * @return a set of sub-procedures or null if there is nothing else to execute. + */ + protected abstract Procedure[] execute(TEnvironment env) + throws ProcedureYieldException; + + /** + * The code to undo what done by the execute() code. + * It is called when the procedure or one of the sub-procedure failed or an + * abort was requested. It should cleanup all the resources created by + * the execute() call. The implementation must be idempotent since rollback() + * may be called multiple time in case of machine failure in the middle + * of the execution. + * @throws IOException temporary failure, the rollback will retry later + */ + protected abstract void rollback(TEnvironment env) + throws IOException; + + /** + * The abort() call is asynchronous and each procedure must decide how to deal + * with that, if they want to be abortable. The simplest implementation + * is to have an AtomicBoolean set in the abort() method and then the execute() + * will check if the abort flag is set or not. + * abort() may be called multiple times from the client, so the implementation + * must be idempotent. + * + * NOTE: abort() is not like Thread.interrupt() it is just a notification + * that allows the procedure implementor where to abort to avoid leak and + * have a better control on what was executed and what not. + */ + protected abstract boolean abort(TEnvironment env); + + /** + * The user-level code of the procedure may have some state to + * persist (e.g. input arguments) to be able to resume on failure. + * @param stream the stream that will contain the user serialized data + */ + protected abstract void serializeStateData(final OutputStream stream) + throws IOException; + + /** + * Called on store load to allow the user to decode the previously serialized + * state. + * @param stream the stream that contains the user serialized data + */ + protected abstract void deserializeStateData(final InputStream stream) + throws IOException; + + /** + * The user should override this method, and try to take a lock if necessary. + * A lock can be anything, and it is up to the implementor. + * Example: in our Master we can execute request in parallel for different tables + * create t1 and create t2 can be executed at the same time. + * anything else on t1/t2 is queued waiting that specific table create to happen. + * + * @return true if the lock was acquired and false otherwise + */ + protected boolean acquireLock(final TEnvironment env) { + return true; + } + + /** + * The user should override this method, and release lock if necessary. + */ + protected void releaseLock(final TEnvironment env) { + // no-op + } + + /** + * Called when the procedure is loaded for replay. + * The procedure implementor may use this method to perform some quick + * operation before replay. + * e.g. failing the procedure if the state on replay may be unknown. + */ + protected void beforeReplay(final TEnvironment env) { + // no-op + } + + /** + * Called when the procedure is marked as completed (success or rollback). + * The procedure implementor may use this method to cleanup in-memory states. + * This operation will not be retried on failure. + */ + protected void completionCleanup(final TEnvironment env) { + // no-op + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + toStringClassDetails(sb); + + if (procId != null) { + sb.append(" id="); + sb.append(getProcId()); + } + + if (hasParent()) { + sb.append(" parent="); + sb.append(getParentProcId()); + } + + if (hasOwner()) { + sb.append(" owner="); + sb.append(getOwner()); + } + + sb.append(" state="); + sb.append(getState()); + return sb.toString(); + } + + /** + * Extend the toString() information with the procedure details + * e.g. className and parameters + * @param builder the string builder to use to append the proc specific information + */ + protected void toStringClassDetails(StringBuilder builder) { + builder.append(getClass().getName()); + } + + /** + * @return the serialized result if any, otherwise null + */ + public byte[] getResult() { + return result; + } + + /** + * The procedure may leave a "result" on completion. + * @param result the serialized result that will be passed to the client + */ + protected void setResult(final byte[] result) { + this.result = result; + } + + public long getProcId() { + return procId; + } + + public boolean hasParent() { + return parentProcId != null; + } + + public boolean hasException() { + return exception != null; + } + + public boolean hasTimeout() { + return timeout != null; + } + + public long getParentProcId() { + return parentProcId; + } + + /** + * @return true if the procedure has failed. + * true may mean failed but not yet rolledback or failed and rolledback. + */ + public synchronized boolean isFailed() { + return exception != null || state == ProcedureState.ROLLEDBACK; + } + + /** + * @return true if the procedure is finished successfully. + */ + public synchronized boolean isSuccess() { + return state == ProcedureState.FINISHED && exception == null; + } + + /** + * @return true if the procedure is finished. The Procedure may be completed + * successfuly or failed and rolledback. + */ + public synchronized boolean isFinished() { + switch (state) { + case ROLLEDBACK: + return true; + case FINISHED: + return exception == null; + default: + break; + } + return false; + } + + /** + * @return true if the procedure is waiting for a child to finish or for an external event. + */ + public synchronized boolean isWaiting() { + switch (state) { + case WAITING: + case WAITING_TIMEOUT: + return true; + default: + break; + } + return false; + } + + public synchronized RemoteProcedureException getException() { + return exception; + } + + public long getStartTime() { + return startTime; + } + + public synchronized long getLastUpdate() { + return lastUpdate; + } + + public synchronized long elapsedTime() { + return lastUpdate - startTime; + } + + /** + * @param timeout timeout in msec + */ + protected void setTimeout(final int timeout) { + this.timeout = timeout; + } + + /** + * @return the timeout in msec + */ + public int getTimeout() { + return timeout; + } + + /** + * @return the remaining time before the timeout + */ + public long getTimeRemaining() { + return Math.max(0, timeout - (EnvironmentEdgeManager.currentTime() - startTime)); + } + + protected void setOwner(final String owner) { + this.owner = StringUtils.isEmpty(owner) ? null : owner; + } + + public String getOwner() { + return owner; + } + + public boolean hasOwner() { + return owner != null; + } + + @VisibleForTesting + @InterfaceAudience.Private + protected synchronized void setState(final ProcedureState state) { + this.state = state; + updateTimestamp(); + } + + @InterfaceAudience.Private + protected synchronized ProcedureState getState() { + return state; + } + + protected void setFailure(final String source, final Throwable cause) { + setFailure(new RemoteProcedureException(source, cause)); + } + + protected synchronized void setFailure(final RemoteProcedureException exception) { + this.exception = exception; + if (!isFinished()) { + setState(ProcedureState.FINISHED); + } + } + + protected void setAbortFailure(final String source, final String msg) { + setFailure(source, new ProcedureAbortedException(msg)); + } + + @InterfaceAudience.Private + protected synchronized boolean setTimeoutFailure() { + if (state == ProcedureState.WAITING_TIMEOUT) { + long timeDiff = EnvironmentEdgeManager.currentTime() - lastUpdate; + setFailure("ProcedureExecutor", new TimeoutException( + "Operation timed out after " + StringUtils.humanTimeDiff(timeDiff))); + return true; + } + return false; + } + + /** + * Called by the ProcedureExecutor to assign the ID to the newly created procedure. + */ + @VisibleForTesting + @InterfaceAudience.Private + protected void setProcId(final long procId) { + this.procId = procId; + this.startTime = EnvironmentEdgeManager.currentTime(); + setState(ProcedureState.RUNNABLE); + } + + /** + * Called by the ProcedureExecutor to assign the parent to the newly created procedure. + */ + @InterfaceAudience.Private + protected void setParentProcId(final long parentProcId) { + this.parentProcId = parentProcId; + } + + /** + * Internal method called by the ProcedureExecutor that starts the + * user-level code execute(). + */ + @InterfaceAudience.Private + protected Procedure[] doExecute(final TEnvironment env) + throws ProcedureYieldException { + try { + updateTimestamp(); + return execute(env); + } finally { + updateTimestamp(); + } + } + + /** + * Internal method called by the ProcedureExecutor that starts the + * user-level code rollback(). + */ + @InterfaceAudience.Private + protected void doRollback(final TEnvironment env) throws IOException { + try { + updateTimestamp(); + rollback(env); + } finally { + updateTimestamp(); + } + } + + /** + * Called on store load to initialize the Procedure internals after + * the creation/deserialization. + */ + @InterfaceAudience.Private + protected void setStartTime(final long startTime) { + this.startTime = startTime; + } + + /** + * Called on store load to initialize the Procedure internals after + * the creation/deserialization. + */ + private synchronized void setLastUpdate(final long lastUpdate) { + this.lastUpdate = lastUpdate; + } + + protected synchronized void updateTimestamp() { + this.lastUpdate = EnvironmentEdgeManager.currentTime(); + } + + /** + * Called by the ProcedureExecutor on procedure-load to restore the latch state + */ + @InterfaceAudience.Private + protected synchronized void setChildrenLatch(final int numChildren) { + this.childrenLatch = numChildren; + } + + /** + * Called by the ProcedureExecutor on procedure-load to restore the latch state + */ + @InterfaceAudience.Private + protected synchronized void incChildrenLatch() { + // TODO: can this be inferred from the stack? I think so... + this.childrenLatch++; + } + + /** + * Called by the ProcedureExecutor to notify that one of the sub-procedures + * has completed. + */ + @InterfaceAudience.Private + protected synchronized boolean childrenCountDown() { + assert childrenLatch > 0; + return --childrenLatch == 0; + } + + /** + * Called by the RootProcedureState on procedure execution. + * Each procedure store its stack-index positions. + */ + @InterfaceAudience.Private + protected synchronized void addStackIndex(final int index) { + if (stackIndexes == null) { + stackIndexes = new int[] { index }; + } else { + int count = stackIndexes.length; + stackIndexes = Arrays.copyOf(stackIndexes, count + 1); + stackIndexes[count] = index; + } + } + + @InterfaceAudience.Private + protected synchronized boolean removeStackIndex() { + if (stackIndexes.length > 1) { + stackIndexes = Arrays.copyOf(stackIndexes, stackIndexes.length - 1); + return false; + } else { + stackIndexes = null; + return true; + } + } + + /** + * Called on store load to initialize the Procedure internals after + * the creation/deserialization. + */ + @InterfaceAudience.Private + protected synchronized void setStackIndexes(final List stackIndexes) { + this.stackIndexes = new int[stackIndexes.size()]; + for (int i = 0; i < this.stackIndexes.length; ++i) { + this.stackIndexes[i] = stackIndexes.get(i); + } + } + + @InterfaceAudience.Private + protected synchronized boolean wasExecuted() { + return stackIndexes != null; + } + + @InterfaceAudience.Private + protected synchronized int[] getStackIndexes() { + return stackIndexes; + } + + @Override + public int compareTo(final Procedure other) { + long diff = getProcId() - other.getProcId(); + return (diff < 0) ? -1 : (diff > 0) ? 1 : 0; + } + + /* + * Helper to lookup the root Procedure ID given a specified procedure. + */ + @InterfaceAudience.Private + protected static Long getRootProcedureId(final Map procedures, Procedure proc) { + while (proc.hasParent()) { + proc = procedures.get(proc.getParentProcId()); + if (proc == null) return null; + } + return proc.getProcId(); + } + + protected static Procedure newInstance(final String className) throws IOException { + try { + Class clazz = Class.forName(className); + if (!Modifier.isPublic(clazz.getModifiers())) { + throw new Exception("the " + clazz + " class is not public"); + } + + Constructor ctor = clazz.getConstructor(); + assert ctor != null : "no constructor found"; + if (!Modifier.isPublic(ctor.getModifiers())) { + throw new Exception("the " + clazz + " constructor is not public"); + } + return (Procedure)ctor.newInstance(); + } catch (Exception e) { + throw new IOException("The procedure class " + className + + " must be accessible and have an empty constructor", e); + } + } + + protected static void validateClass(final Procedure proc) throws IOException { + try { + Class clazz = proc.getClass(); + if (!Modifier.isPublic(clazz.getModifiers())) { + throw new Exception("the " + clazz + " class is not public"); + } + + Constructor ctor = clazz.getConstructor(); + assert ctor != null; + if (!Modifier.isPublic(ctor.getModifiers())) { + throw new Exception("the " + clazz + " constructor is not public"); + } + } catch (Exception e) { + throw new IOException("The procedure class " + proc.getClass().getName() + + " must be accessible and have an empty constructor", e); + } + } + + /** + * Helper to convert the procedure to protobuf. + * Used by ProcedureStore implementations. + */ + @InterfaceAudience.Private + public static ProcedureProtos.Procedure convert(final Procedure proc) + throws IOException { + Preconditions.checkArgument(proc != null); + validateClass(proc); + + ProcedureProtos.Procedure.Builder builder = ProcedureProtos.Procedure.newBuilder() + .setClassName(proc.getClass().getName()) + .setProcId(proc.getProcId()) + .setState(proc.getState()) + .setStartTime(proc.getStartTime()) + .setLastUpdate(proc.getLastUpdate()); + + if (proc.hasParent()) { + builder.setParentId(proc.getParentProcId()); + } + + if (proc.hasTimeout()) { + builder.setTimeout(proc.getTimeout()); + } + + if (proc.hasOwner()) { + builder.setOwner(proc.getOwner()); + } + + int[] stackIds = proc.getStackIndexes(); + if (stackIds != null) { + for (int i = 0; i < stackIds.length; ++i) { + builder.addStackId(stackIds[i]); + } + } + + if (proc.hasException()) { + RemoteProcedureException exception = proc.getException(); + builder.setException( + RemoteProcedureException.toProto(exception.getSource(), exception.getCause())); + } + + byte[] result = proc.getResult(); + if (result != null) { + builder.setResult(ByteStringer.wrap(result)); + } + + ByteString.Output stateStream = ByteString.newOutput(); + proc.serializeStateData(stateStream); + if (stateStream.size() > 0) { + builder.setStateData(stateStream.toByteString()); + } + + return builder.build(); + } + + /** + * Helper to convert the protobuf procedure. + * Used by ProcedureStore implementations. + * + * TODO: OPTIMIZATION: some of the field never change during the execution + * (e.g. className, procId, parentId, ...). + * We can split in 'data' and 'state', and the store + * may take advantage of it by storing the data only on insert(). + */ + @InterfaceAudience.Private + public static Procedure convert(final ProcedureProtos.Procedure proto) + throws IOException { + // Procedure from class name + Procedure proc = Procedure.newInstance(proto.getClassName()); + + // set fields + proc.setProcId(proto.getProcId()); + proc.setState(proto.getState()); + proc.setStartTime(proto.getStartTime()); + proc.setLastUpdate(proto.getLastUpdate()); + + if (proto.hasParentId()) { + proc.setParentProcId(proto.getParentId()); + } + + if (proto.hasOwner()) { + proc.setOwner(proto.getOwner()); + } + + if (proto.hasTimeout()) { + proc.setTimeout(proto.getTimeout()); + } + + if (proto.getStackIdCount() > 0) { + proc.setStackIndexes(proto.getStackIdList()); + } + + if (proto.hasException()) { + assert proc.getState() == ProcedureState.FINISHED || + proc.getState() == ProcedureState.ROLLEDBACK : + "The procedure must be failed (waiting to rollback) or rolledback"; + proc.setFailure(RemoteProcedureException.fromProto(proto.getException())); + } + + if (proto.hasResult()) { + proc.setResult(proto.getResult().toByteArray()); + } + + // we want to call deserialize even when the stream is empty, mainly for testing. + proc.deserializeStateData(proto.getStateData().newInput()); + + return proc; + } +} \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableFactory.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureAbortedException.java similarity index 57% rename from hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableFactory.java rename to hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureAbortedException.java index 6970333557f..2e409cf8d8c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableFactory.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureAbortedException.java @@ -1,5 +1,4 @@ /** - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,36 +15,28 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hbase.client; -import java.io.IOException; +package org.apache.hadoop.hbase.procedure2; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; /** - * Factory for creating HTable instances. - * - * @deprecated as of 0.98.1. See {@link HConnectionManager#createConnection(Configuration)}. + * Thrown when a procedure is aborted */ @InterfaceAudience.Public @InterfaceStability.Stable -@Deprecated -public class HTableFactory implements HTableInterfaceFactory { - @Override - public HTableInterface createHTableInterface(Configuration config, - byte[] tableName) { - try { - return new HTable(config, TableName.valueOf(tableName)); - } catch (IOException ioe) { - throw new RuntimeException(ioe); - } +public class ProcedureAbortedException extends ProcedureException { + /** default constructor */ + public ProcedureAbortedException() { + super(); } - @Override - public void releaseHTableInterface(HTableInterface table) throws IOException { - table.close(); + /** + * Constructor + * @param s message + */ + public ProcedureAbortedException(String s) { + super(s); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterfaceFactory.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureException.java similarity index 59% rename from hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterfaceFactory.java rename to hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureException.java index b6349c24ca6..9f922b1b933 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterfaceFactory.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureException.java @@ -1,5 +1,4 @@ /** - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,39 +15,31 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hbase.client; + +package org.apache.hadoop.hbase.procedure2; import java.io.IOException; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; - -/** - * Defines methods to create new HTableInterface. - * - * @since 0.21.0 - * @deprecated in favor of {@link ConnectionFactory} and {@link Connection}. - */ -@Deprecated @InterfaceAudience.Public @InterfaceStability.Stable -public interface HTableInterfaceFactory { +public class ProcedureException extends IOException { + /** default constructor */ + public ProcedureException() { + super(); + } /** - * Creates a new HTableInterface. - * - * @param config HBaseConfiguration instance. - * @param tableName name of the HBase table. - * @return HTableInterface instance. + * Constructor + * @param s message */ - HTableInterface createHTableInterface(Configuration config, byte[] tableName); + public ProcedureException(String s) { + super(s); + } - - /** - * Release the HTable resource represented by the table. - * @param table - */ - void releaseHTableInterface(final HTableInterface table) throws IOException; + public ProcedureException(Throwable t) { + super(t); + } } diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java new file mode 100644 index 00000000000..29820580fd8 --- /dev/null +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java @@ -0,0 +1,1077 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.procedure2; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.HashSet; +import java.util.TreeSet; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.locks.ReentrantLock; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.TimeUnit; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.procedure2.store.ProcedureStore; +import org.apache.hadoop.hbase.procedure2.util.StringUtils; +import org.apache.hadoop.hbase.procedure2.util.TimeoutBlockingQueue; +import org.apache.hadoop.hbase.procedure2.util.TimeoutBlockingQueue.TimeoutRetriever; +import org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureState; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.util.Pair; + +import com.google.common.base.Preconditions; + +/** + * Thread Pool that executes the submitted procedures. + * The executor has a ProcedureStore associated. + * Each operation is logged and on restart the pending procedures are resumed. + * + * Unless the Procedure code throws an error (e.g. invalid user input) + * the procedure will complete (at some point in time), On restart the pending + * procedures are resumed and the once failed will be rolledback. + * + * The user can add procedures to the executor via submitProcedure(proc) + * check for the finished state via isFinished(procId) + * and get the result via getResult(procId) + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class ProcedureExecutor { + private static final Log LOG = LogFactory.getLog(ProcedureExecutor.class); + + Testing testing = null; + public static class Testing { + protected boolean killBeforeStoreUpdate = false; + protected boolean toggleKillBeforeStoreUpdate = false; + + protected boolean shouldKillBeforeStoreUpdate() { + final boolean kill = this.killBeforeStoreUpdate; + if (this.toggleKillBeforeStoreUpdate) { + this.killBeforeStoreUpdate = !kill; + LOG.warn("Toggle Kill before store update to: " + this.killBeforeStoreUpdate); + } + return kill; + } + } + + public interface ProcedureExecutorListener { + void procedureLoaded(long procId); + void procedureAdded(long procId); + void procedureFinished(long procId); + } + + /** + * Used by the TimeoutBlockingQueue to get the timeout interval of the procedure + */ + private static class ProcedureTimeoutRetriever implements TimeoutRetriever { + @Override + public long getTimeout(Procedure proc) { + return proc.getTimeRemaining(); + } + + @Override + public TimeUnit getTimeUnit(Procedure proc) { + return TimeUnit.MILLISECONDS; + } + } + + /** + * Internal cleaner that removes the completed procedure results after a TTL. + * NOTE: This is a special case handled in timeoutLoop(). + * + * Since the client code looks more or less like: + * procId = master.doOperation() + * while (master.getProcResult(procId) == ProcInProgress); + * The master should not throw away the proc result as soon as the procedure is done + * but should wait a result request from the client (see executor.removeResult(procId)) + * The client will call something like master.isProcDone() or master.getProcResult() + * which will return the result/state to the client, and it will mark the completed + * proc as ready to delete. note that the client may not receive the response from + * the master (e.g. master failover) so, if we delay a bit the real deletion of + * the proc result the client will be able to get the result the next try. + */ + private static class CompletedProcedureCleaner extends Procedure { + private static final Log LOG = LogFactory.getLog(CompletedProcedureCleaner.class); + + private static final String CLEANER_INTERVAL_CONF_KEY = "hbase.procedure.cleaner.interval"; + private static final int DEFAULT_CLEANER_INTERVAL = 30 * 1000; // 30sec + + private static final String EVICT_TTL_CONF_KEY = "hbase.procedure.cleaner.evict.ttl"; + private static final int DEFAULT_EVICT_TTL = 15 * 60000; // 15min + + private static final String EVICT_ACKED_TTL_CONF_KEY ="hbase.procedure.cleaner.acked.evict.ttl"; + private static final int DEFAULT_ACKED_EVICT_TTL = 5 * 60000; // 5min + + private final Map completed; + private final ProcedureStore store; + private final Configuration conf; + + public CompletedProcedureCleaner(final Configuration conf, final ProcedureStore store, + final Map completedMap) { + // set the timeout interval that triggers the periodic-procedure + setTimeout(conf.getInt(CLEANER_INTERVAL_CONF_KEY, DEFAULT_CLEANER_INTERVAL)); + this.completed = completedMap; + this.store = store; + this.conf = conf; + } + + public void periodicExecute(final TEnvironment env) { + if (completed.isEmpty()) { + LOG.debug("no completed procedures to cleanup"); + return; + } + + final long evictTtl = conf.getInt(EVICT_TTL_CONF_KEY, DEFAULT_EVICT_TTL); + final long evictAckTtl = conf.getInt(EVICT_ACKED_TTL_CONF_KEY, DEFAULT_ACKED_EVICT_TTL); + + long now = EnvironmentEdgeManager.currentTime(); + Iterator> it = completed.entrySet().iterator(); + while (it.hasNext() && store.isRunning()) { + Map.Entry entry = it.next(); + ProcedureResult result = entry.getValue(); + + // TODO: Select TTL based on Procedure type + if ((result.hasClientAckTime() && (now - result.getClientAckTime()) >= evictAckTtl) || + (now - result.getLastUpdate()) >= evictTtl) { + LOG.debug("Evict completed procedure " + entry.getKey()); + store.delete(entry.getKey()); + it.remove(); + } + } + } + + @Override + protected Procedure[] execute(final TEnvironment env) { + throw new UnsupportedOperationException(); + } + + @Override + protected void rollback(final TEnvironment env) { + throw new UnsupportedOperationException(); + } + + @Override + protected boolean abort(final TEnvironment env) { + throw new UnsupportedOperationException(); + } + + @Override + public void serializeStateData(final OutputStream stream) { + throw new UnsupportedOperationException(); + } + + @Override + public void deserializeStateData(final InputStream stream) { + throw new UnsupportedOperationException(); + } + } + + /** + * Map the the procId returned by submitProcedure(), the Root-ProcID, to the ProcedureResult. + * Once a Root-Procedure completes (success or failure), the result will be added to this map. + * The user of ProcedureExecutor should call getResult(procId) to get the result. + */ + private final ConcurrentHashMap completed = + new ConcurrentHashMap(); + + /** + * Map the the procId returned by submitProcedure(), the Root-ProcID, to the RootProcedureState. + * The RootProcedureState contains the execution stack of the Root-Procedure, + * It is added to the map by submitProcedure() and removed on procedure completion. + */ + private final ConcurrentHashMap rollbackStack = + new ConcurrentHashMap(); + + /** + * Helper map to lookup the live procedures by ID. + * This map contains every procedure. root-procedures and subprocedures. + */ + private final ConcurrentHashMap procedures = + new ConcurrentHashMap(); + + /** + * Timeout Queue that contains Procedures in a WAITING_TIMEOUT state + * or periodic procedures. + */ + private final TimeoutBlockingQueue waitingTimeout = + new TimeoutBlockingQueue(new ProcedureTimeoutRetriever()); + + /** + * Queue that contains runnable procedures. + */ + private final ProcedureRunnableSet runnables; + + // TODO + private final ReentrantLock submitLock = new ReentrantLock(); + private final AtomicLong lastProcId = new AtomicLong(-1); + + private final CopyOnWriteArrayList listeners = + new CopyOnWriteArrayList(); + + private final AtomicInteger activeExecutorCount = new AtomicInteger(0); + private final AtomicBoolean running = new AtomicBoolean(false); + private final TEnvironment environment; + private final ProcedureStore store; + private final Configuration conf; + + private Thread[] threads; + + public ProcedureExecutor(final Configuration conf, final TEnvironment environment, + final ProcedureStore store) { + this(conf, environment, store, new ProcedureSimpleRunQueue()); + } + + public ProcedureExecutor(final Configuration conf, final TEnvironment environment, + final ProcedureStore store, final ProcedureRunnableSet runqueue) { + this.environment = environment; + this.runnables = runqueue; + this.store = store; + this.conf = conf; + } + + private List> load() throws IOException { + Preconditions.checkArgument(completed.isEmpty()); + Preconditions.checkArgument(rollbackStack.isEmpty()); + Preconditions.checkArgument(procedures.isEmpty()); + Preconditions.checkArgument(waitingTimeout.isEmpty()); + Preconditions.checkArgument(runnables.size() == 0); + + // 1. Load the procedures + Iterator loader = store.load(); + if (loader == null) { + lastProcId.set(0); + return null; + } + + long logMaxProcId = 0; + int runnablesCount = 0; + while (loader.hasNext()) { + Procedure proc = loader.next(); + proc.beforeReplay(getEnvironment()); + procedures.put(proc.getProcId(), proc); + logMaxProcId = Math.max(logMaxProcId, proc.getProcId()); + LOG.debug("Loading procedure state=" + proc.getState() + + " isFailed=" + proc.hasException() + ": " + proc); + if (!proc.hasParent() && !proc.isFinished()) { + rollbackStack.put(proc.getProcId(), new RootProcedureState()); + } + if (proc.getState() == ProcedureState.RUNNABLE) { + runnablesCount++; + } + } + assert lastProcId.get() < 0; + lastProcId.set(logMaxProcId); + + // 2. Initialize the stacks + TreeSet runnableSet = null; + HashSet waitingSet = null; + for (final Procedure proc: procedures.values()) { + Long rootProcId = getRootProcedureId(proc); + if (rootProcId == null) { + // The 'proc' was ready to run but the root procedure was rolledback? + runnables.addBack(proc); + continue; + } + + if (!proc.hasParent() && proc.isFinished()) { + LOG.debug("The procedure is completed state=" + proc.getState() + + " isFailed=" + proc.hasException() + ": " + proc); + assert !rollbackStack.containsKey(proc.getProcId()); + completed.put(proc.getProcId(), newResultFromProcedure(proc)); + continue; + } + + if (proc.hasParent() && !proc.isFinished()) { + Procedure parent = procedures.get(proc.getParentProcId()); + // corrupted procedures are handled later at step 3 + if (parent != null) { + parent.incChildrenLatch(); + } + } + + RootProcedureState procStack = rollbackStack.get(rootProcId); + procStack.loadStack(proc); + + switch (proc.getState()) { + case RUNNABLE: + if (runnableSet == null) { + runnableSet = new TreeSet(); + } + runnableSet.add(proc); + break; + case WAITING_TIMEOUT: + if (waitingSet == null) { + waitingSet = new HashSet(); + } + waitingSet.add(proc); + break; + case FINISHED: + if (proc.hasException()) { + // add the proc to the runnables to perform the rollback + runnables.addBack(proc); + break; + } + case ROLLEDBACK: + case INITIALIZING: + String msg = "Unexpected " + proc.getState() + " state for " + proc; + LOG.error(msg); + throw new UnsupportedOperationException(msg); + default: + break; + } + } + + // 3. Validate the stacks + List> corrupted = null; + Iterator> itStack = rollbackStack.entrySet().iterator(); + while (itStack.hasNext()) { + Map.Entry entry = itStack.next(); + RootProcedureState procStack = entry.getValue(); + if (procStack.isValid()) continue; + + for (Procedure proc: procStack.getSubprocedures()) { + procedures.remove(proc.getProcId()); + if (runnableSet != null) runnableSet.remove(proc); + if (waitingSet != null) waitingSet.remove(proc); + } + itStack.remove(); + if (corrupted == null) { + corrupted = new ArrayList>(); + } + corrupted.add(entry); + } + + // 4. Push the runnables + if (runnableSet != null) { + // TODO: See ProcedureWALFormatReader.readInitEntry() some procedure + // may be started way before this stuff. + for (Procedure proc: runnableSet) { + if (!proc.hasParent()) { + sendProcedureLoadedNotification(proc.getProcId()); + } + runnables.addBack(proc); + } + } + return corrupted; + } + + public void start(int numThreads) throws IOException { + if (running.getAndSet(true)) { + LOG.warn("Already running"); + return; + } + + // We have numThreads executor + one timer thread used for timing out + // procedures and triggering periodic procedures. + threads = new Thread[numThreads + 1]; + LOG.info("Starting procedure executor threads=" + threads.length); + + // Initialize procedures executor + for (int i = 0; i < numThreads; ++i) { + threads[i] = new Thread("ProcedureExecutorThread-" + i) { + @Override + public void run() { + execLoop(); + } + }; + } + + // Initialize procedures timeout handler (this is the +1 thread) + threads[numThreads] = new Thread("ProcedureExecutorTimeoutThread") { + @Override + public void run() { + timeoutLoop(); + } + }; + + // Acquire the store lease. + store.recoverLease(); + + // TODO: Split in two steps. + // TODO: Handle corrupted procedure returned (probably just a WARN) + // The first one will make sure that we have the latest id, + // so we can start the threads and accept new procedures. + // The second step will do the actual load of old procedures. + load(); + + // Start the executors. Here we must have the lastProcId set. + for (int i = 0; i < threads.length; ++i) { + threads[i].start(); + } + + // Add completed cleaner + waitingTimeout.add(new CompletedProcedureCleaner(conf, store, completed)); + } + + public void stop() { + if (!running.getAndSet(false)) { + return; + } + + LOG.info("Stopping the procedure executor"); + runnables.signalAll(); + waitingTimeout.signalAll(); + } + + public void join() { + boolean interrupted = false; + + for (int i = 0; i < threads.length; ++i) { + try { + threads[i].join(); + } catch (InterruptedException ex) { + interrupted = true; + } + } + + if (interrupted) { + Thread.currentThread().interrupt(); + } + + completed.clear(); + rollbackStack.clear(); + procedures.clear(); + waitingTimeout.clear(); + runnables.clear(); + lastProcId.set(-1); + } + + public boolean isRunning() { + return running.get(); + } + + /** + * @return the number of execution threads. + */ + public int getNumThreads() { + return threads == null ? 0 : (threads.length - 1); + } + + public int getActiveExecutorCount() { + return activeExecutorCount.get(); + } + + public TEnvironment getEnvironment() { + return this.environment; + } + + public ProcedureStore getStore() { + return this.store; + } + + public void registerListener(ProcedureExecutorListener listener) { + this.listeners.add(listener); + } + + public boolean unregisterListener(ProcedureExecutorListener listener) { + return this.listeners.remove(listener); + } + + /** + * Add a new root-procedure to the executor. + * @param proc the new procedure to execute. + * @return the procedure id, that can be used to monitor the operation + */ + public long submitProcedure(final Procedure proc) { + Preconditions.checkArgument(proc.getState() == ProcedureState.INITIALIZING); + Preconditions.checkArgument(isRunning()); + Preconditions.checkArgument(lastProcId.get() >= 0); + Preconditions.checkArgument(!proc.hasParent()); + + // Initialize the Procedure ID + proc.setProcId(nextProcId()); + + // Commit the transaction + store.insert(proc, null); + LOG.debug("procedure " + proc + " added to the store"); + + // Create the rollback stack for the procedure + RootProcedureState stack = new RootProcedureState(); + rollbackStack.put(proc.getProcId(), stack); + + // Submit the new subprocedures + assert !procedures.containsKey(proc.getProcId()); + procedures.put(proc.getProcId(), proc); + sendProcedureAddedNotification(proc.getProcId()); + runnables.addBack(proc); + return proc.getProcId(); + } + + public ProcedureResult getResult(final long procId) { + return completed.get(procId); + } + + /** + * Return true if the procedure is finished. + * The state may be "completed successfully" or "failed and rolledback". + * Use getResult() to check the state or get the result data. + * @param procId the ID of the procedure to check + * @return true if the procedure execution is finished, otherwise false. + */ + public boolean isFinished(final long procId) { + return completed.containsKey(procId); + } + + /** + * Return true if the procedure is started. + * @param procId the ID of the procedure to check + * @return true if the procedure execution is started, otherwise false. + */ + public boolean isStarted(final long procId) { + Procedure proc = procedures.get(procId); + if (proc == null) { + return completed.get(procId) != null; + } + return proc.wasExecuted(); + } + + /** + * Mark the specified completed procedure, as ready to remove. + * @param procId the ID of the procedure to remove + */ + public void removeResult(final long procId) { + ProcedureResult result = completed.get(procId); + if (result == null) { + assert !procedures.containsKey(procId) : "procId=" + procId + " is still running"; + LOG.debug("Procedure procId=" + procId + " already removed by the cleaner"); + return; + } + + // The CompletedProcedureCleaner will take care of deletion, once the TTL is expired. + result.setClientAckTime(EnvironmentEdgeManager.currentTime()); + } + + /** + * Send an abort notification the specified procedure. + * Depending on the procedure implementation the abort can be considered or ignored. + * @param procId the procedure to abort + * @return true if the procedure exist and has received the abort, otherwise false. + */ + public boolean abort(final long procId) { + Procedure proc = procedures.get(procId); + if (proc != null) { + return proc.abort(getEnvironment()); + } + return false; + } + + public Map getResults() { + return Collections.unmodifiableMap(completed); + } + + public Procedure getProcedure(final long procId) { + return procedures.get(procId); + } + + protected ProcedureRunnableSet getRunnableSet() { + return runnables; + } + + /** + * Execution loop (N threads) + * while the executor is in a running state, + * fetch a procedure from the runnables queue and start the execution. + */ + private void execLoop() { + while (isRunning()) { + Long procId = runnables.poll(); + Procedure proc = procId != null ? procedures.get(procId) : null; + if (proc == null) continue; + + try { + activeExecutorCount.incrementAndGet(); + execLoop(proc); + } finally { + activeExecutorCount.decrementAndGet(); + } + } + } + + private void execLoop(Procedure proc) { + if (LOG.isTraceEnabled()) { + LOG.trace("trying to start the execution of " + proc); + } + + Long rootProcId = getRootProcedureId(proc); + if (rootProcId == null) { + // The 'proc' was ready to run but the root procedure was rolledback + executeRollback(proc); + return; + } + + RootProcedureState procStack = rollbackStack.get(rootProcId); + if (procStack == null) return; + + do { + // Try to acquire the execution + if (!procStack.acquire(proc)) { + if (procStack.setRollback()) { + // we have the 'rollback-lock' we can start rollingback + if (!executeRollback(rootProcId, procStack)) { + procStack.unsetRollback(); + runnables.yield(proc); + } + } else { + // if we can't rollback means that some child is still running. + // the rollback will be executed after all the children are done. + // If the procedure was never executed, remove and mark it as rolledback. + if (!proc.wasExecuted()) { + if (!executeRollback(proc)) { + runnables.yield(proc); + } + } + } + break; + } + + // Execute the procedure + assert proc.getState() == ProcedureState.RUNNABLE; + if (proc.acquireLock(getEnvironment())) { + execProcedure(procStack, proc); + proc.releaseLock(getEnvironment()); + } else { + runnables.yield(proc); + } + procStack.release(proc); + + // allows to kill the executor before something is stored to the wal. + // useful to test the procedure recovery. + if (testing != null && !isRunning()) { + break; + } + + if (proc.getProcId() == rootProcId && proc.isSuccess()) { + // Finalize the procedure state + LOG.info("Procedure completed in " + + StringUtils.humanTimeDiff(proc.elapsedTime()) + ": " + proc); + procedureFinished(proc); + break; + } + } while (procStack.isFailed()); + } + + private void timeoutLoop() { + while (isRunning()) { + Procedure proc = waitingTimeout.poll(); + if (proc == null) continue; + + if (proc.getTimeRemaining() > 100) { + // got an early wake, maybe a stop? + // re-enqueue the task in case was not a stop or just a signal + waitingTimeout.add(proc); + continue; + } + + // ---------------------------------------------------------------------------- + // TODO-MAYBE: Should we provide a notification to the store with the + // full set of procedures pending and completed to write a compacted + // version of the log (in case is a log)? + // In theory no, procedures are have a short life, so at some point the store + // will have the tracker saying everything is in the last log. + // ---------------------------------------------------------------------------- + + // The CompletedProcedureCleaner is a special case, and it acts as a chore. + // instead of bringing the Chore class in, we reuse this timeout thread for + // this special case. + if (proc instanceof CompletedProcedureCleaner) { + try { + ((CompletedProcedureCleaner)proc).periodicExecute(getEnvironment()); + } catch (Throwable e) { + LOG.error("ignoring CompletedProcedureCleaner exception: " + e.getMessage(), e); + } + proc.setStartTime(EnvironmentEdgeManager.currentTime()); + waitingTimeout.add(proc); + continue; + } + + // The procedure received an "abort-timeout", call abort() and + // add the procedure back in the queue for rollback. + if (proc.setTimeoutFailure()) { + long rootProcId = Procedure.getRootProcedureId(procedures, proc); + RootProcedureState procStack = rollbackStack.get(rootProcId); + procStack.abort(); + store.update(proc); + runnables.addFront(proc); + continue; + } + } + } + + /** + * Execute the rollback of the full procedure stack. + * Once the procedure is rolledback, the root-procedure will be visible as + * finished to user, and the result will be the fatal exception. + */ + private boolean executeRollback(final long rootProcId, final RootProcedureState procStack) { + Procedure rootProc = procedures.get(rootProcId); + RemoteProcedureException exception = rootProc.getException(); + if (exception == null) { + exception = procStack.getException(); + rootProc.setFailure(exception); + store.update(rootProc); + } + + List subprocStack = procStack.getSubprocedures(); + assert subprocStack != null : "called rollback with no steps executed rootProc=" + rootProc; + + int stackTail = subprocStack.size(); + boolean reuseLock = false; + while (stackTail --> 0) { + final Procedure proc = subprocStack.get(stackTail); + + if (!reuseLock && !proc.acquireLock(getEnvironment())) { + // can't take a lock on the procedure, add the root-proc back on the + // queue waiting for the lock availability + return false; + } + + boolean abortRollback = !executeRollback(proc); + abortRollback |= !isRunning() || !store.isRunning(); + + // If the next procedure is the same to this one + // (e.g. StateMachineProcedure reuse the same instance) + // we can avoid to lock/unlock each step + reuseLock = stackTail > 0 && (subprocStack.get(stackTail - 1) == proc) && !abortRollback; + if (!reuseLock) { + proc.releaseLock(getEnvironment()); + } + + // allows to kill the executor before something is stored to the wal. + // useful to test the procedure recovery. + if (abortRollback) { + return false; + } + + subprocStack.remove(stackTail); + } + + // Finalize the procedure state + LOG.info("Rolledback procedure " + rootProc + + " exec-time=" + StringUtils.humanTimeDiff(rootProc.elapsedTime()) + + " exception=" + exception.getMessage()); + procedureFinished(rootProc); + return true; + } + + /** + * Execute the rollback of the procedure step. + * It updates the store with the new state (stack index) + * or will remove completly the procedure in case it is a child. + */ + private boolean executeRollback(final Procedure proc) { + try { + proc.doRollback(getEnvironment()); + } catch (IOException e) { + LOG.debug("rollback attempt failed for " + proc, e); + return false; + } catch (Throwable e) { + // Catch NullPointerExceptions or similar errors... + LOG.fatal("CODE-BUG: uncatched runtime exception for procedure: " + proc, e); + } + + // allows to kill the executor before something is stored to the wal. + // useful to test the procedure recovery. + if (testing != null && testing.shouldKillBeforeStoreUpdate()) { + LOG.debug("TESTING: Kill before store update"); + stop(); + return false; + } + + if (proc.removeStackIndex()) { + proc.setState(ProcedureState.ROLLEDBACK); + if (proc.hasParent()) { + store.delete(proc.getProcId()); + procedures.remove(proc.getProcId()); + } else { + store.update(proc); + } + } else { + store.update(proc); + } + return true; + } + + /** + * Executes the specified procedure + * - calls the doExecute() of the procedure + * - if the procedure execution didn't fail (e.g. invalid user input) + * - ...and returned subprocedures + * - the subprocedures are initialized. + * - the subprocedures are added to the store + * - the subprocedures are added to the runnable queue + * - the procedure is now in a WAITING state, waiting for the subprocedures to complete + * - ...if there are no subprocedure + * - the procedure completed successfully + * - if there is a parent (WAITING) + * - the parent state will be set to RUNNABLE + * - in case of failure + * - the store is updated with the new state + * - the executor (caller of this method) will start the rollback of the procedure + */ + private void execProcedure(final RootProcedureState procStack, final Procedure procedure) { + Preconditions.checkArgument(procedure.getState() == ProcedureState.RUNNABLE); + + // Execute the procedure + boolean reExecute = false; + Procedure[] subprocs = null; + do { + reExecute = false; + try { + subprocs = procedure.doExecute(getEnvironment()); + if (subprocs != null && subprocs.length == 0) { + subprocs = null; + } + } catch (ProcedureYieldException e) { + if (LOG.isTraceEnabled()) { + LOG.trace("yield procedure: " + procedure); + } + runnables.yield(procedure); + return; + } catch (Throwable e) { + // Catch NullPointerExceptions or similar errors... + String msg = "CODE-BUG: uncatched runtime exception for procedure: " + procedure; + LOG.error(msg, e); + procedure.setFailure(new RemoteProcedureException(msg, e)); + } + + if (!procedure.isFailed()) { + if (subprocs != null) { + if (subprocs.length == 1 && subprocs[0] == procedure) { + // quick-shortcut for a state machine like procedure + subprocs = null; + reExecute = true; + } else { + // yield the current procedure, and make the subprocedure runnable + for (int i = 0; i < subprocs.length; ++i) { + Procedure subproc = subprocs[i]; + if (subproc == null) { + String msg = "subproc[" + i + "] is null, aborting the procedure"; + procedure.setFailure(new RemoteProcedureException(msg, + new IllegalArgumentException(msg))); + subprocs = null; + break; + } + + assert subproc.getState() == ProcedureState.INITIALIZING; + subproc.setParentProcId(procedure.getProcId()); + subproc.setProcId(nextProcId()); + } + + if (!procedure.isFailed()) { + procedure.setChildrenLatch(subprocs.length); + switch (procedure.getState()) { + case RUNNABLE: + procedure.setState(ProcedureState.WAITING); + break; + case WAITING_TIMEOUT: + waitingTimeout.add(procedure); + break; + default: + break; + } + } + } + } else if (procedure.getState() == ProcedureState.WAITING_TIMEOUT) { + waitingTimeout.add(procedure); + } else { + // No subtask, so we are done + procedure.setState(ProcedureState.FINISHED); + } + } + + // Add the procedure to the stack + procStack.addRollbackStep(procedure); + + // allows to kill the executor before something is stored to the wal. + // useful to test the procedure recovery. + if (testing != null && testing.shouldKillBeforeStoreUpdate()) { + LOG.debug("TESTING: Kill before store update"); + stop(); + return; + } + + // Commit the transaction + if (subprocs != null && !procedure.isFailed()) { + if (LOG.isTraceEnabled()) { + LOG.trace("store add " + procedure + " children " + Arrays.toString(subprocs)); + } + store.insert(procedure, subprocs); + } else { + if (LOG.isTraceEnabled()) { + LOG.trace("store update " + procedure); + } + store.update(procedure); + } + + // if the store is not running we are aborting + if (!store.isRunning()) { + return; + } + + assert (reExecute && subprocs == null) || !reExecute; + } while (reExecute); + + // Submit the new subprocedures + if (subprocs != null && !procedure.isFailed()) { + for (int i = 0; i < subprocs.length; ++i) { + Procedure subproc = subprocs[i]; + assert !procedures.containsKey(subproc.getProcId()); + procedures.put(subproc.getProcId(), subproc); + runnables.addFront(subproc); + } + } + + if (procedure.isFinished() && procedure.hasParent()) { + Procedure parent = procedures.get(procedure.getParentProcId()); + if (parent == null) { + assert procStack.isRollingback(); + return; + } + + // If this procedure is the last child awake the parent procedure + if (LOG.isTraceEnabled()) { + LOG.trace(parent + " child is done: " + procedure); + } + if (parent.childrenCountDown() && parent.getState() == ProcedureState.WAITING) { + parent.setState(ProcedureState.RUNNABLE); + store.update(parent); + runnables.addFront(parent); + if (LOG.isTraceEnabled()) { + LOG.trace(parent + " all the children finished their work, resume."); + } + return; + } + } + } + + private void sendProcedureLoadedNotification(final long procId) { + if (!this.listeners.isEmpty()) { + for (ProcedureExecutorListener listener: this.listeners) { + try { + listener.procedureLoaded(procId); + } catch (Throwable e) { + LOG.error("the listener " + listener + " had an error: " + e.getMessage(), e); + } + } + } + } + + private void sendProcedureAddedNotification(final long procId) { + if (!this.listeners.isEmpty()) { + for (ProcedureExecutorListener listener: this.listeners) { + try { + listener.procedureAdded(procId); + } catch (Throwable e) { + LOG.error("the listener " + listener + " had an error: " + e.getMessage(), e); + } + } + } + } + + private void sendProcedureFinishedNotification(final long procId) { + if (!this.listeners.isEmpty()) { + for (ProcedureExecutorListener listener: this.listeners) { + try { + listener.procedureFinished(procId); + } catch (Throwable e) { + LOG.error("the listener " + listener + " had an error: " + e.getMessage(), e); + } + } + } + } + + private long nextProcId() { + long procId = lastProcId.incrementAndGet(); + if (procId < 0) { + while (!lastProcId.compareAndSet(procId, 0)) { + procId = lastProcId.get(); + if (procId >= 0) + break; + } + while (procedures.containsKey(procId)) { + procId = lastProcId.incrementAndGet(); + } + } + return procId; + } + + private Long getRootProcedureId(Procedure proc) { + return Procedure.getRootProcedureId(procedures, proc); + } + + private void procedureFinished(final Procedure proc) { + // call the procedure completion cleanup handler + try { + proc.completionCleanup(getEnvironment()); + } catch (Throwable e) { + // Catch NullPointerExceptions or similar errors... + LOG.error("CODE-BUG: uncatched runtime exception for procedure: " + proc, e); + } + + // update the executor internal state maps + completed.put(proc.getProcId(), newResultFromProcedure(proc)); + rollbackStack.remove(proc.getProcId()); + procedures.remove(proc.getProcId()); + + // call the runnableSet completion cleanup handler + try { + runnables.completionCleanup(proc); + } catch (Throwable e) { + // Catch NullPointerExceptions or similar errors... + LOG.error("CODE-BUG: uncatched runtime exception for runnableSet: " + runnables, e); + } + + // Notify the listeners + sendProcedureFinishedNotification(proc.getProcId()); + } + + public Pair getResultOrProcedure(final long procId) { + ProcedureResult result = completed.get(procId); + Procedure proc = null; + if (result == null) { + proc = procedures.get(procId); + if (proc == null) { + result = completed.get(procId); + } + } + return new Pair(result, proc); + } + + private static ProcedureResult newResultFromProcedure(final Procedure proc) { + if (proc.isFailed()) { + return new ProcedureResult(proc.getStartTime(), proc.getLastUpdate(), proc.getException()); + } + return new ProcedureResult(proc.getStartTime(), proc.getLastUpdate(), proc.getResult()); + } +} \ No newline at end of file diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureFairRunQueues.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureFairRunQueues.java new file mode 100644 index 00000000000..242ae868e7d --- /dev/null +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureFairRunQueues.java @@ -0,0 +1,174 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.procedure2; + +import java.util.Map; + +import java.util.concurrent.locks.ReentrantLock; +import java.util.concurrent.ConcurrentSkipListMap; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + +/** + * This class is a container of queues that allows to select a queue + * in a round robin fashion, considering priority of the queue. + * + * the quantum is just how many poll() will return the same object. + * e.g. if quantum is 1 and you have A and B as object you'll get: A B A B + * e.g. if quantum is 2 and you have A and B as object you'll get: A A B B A A B B + * then the object priority is just a priority * quantum + * + * Example: + * - three queues (A, B, C) with priorities (1, 1, 2) + * - The first poll() will return A + * - The second poll() will return B + * - The third and forth poll() will return C + * - and so on again and again. + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class ProcedureFairRunQueues { + private ConcurrentSkipListMap objMap = + new ConcurrentSkipListMap(); + + private final ReentrantLock lock = new ReentrantLock(); + private final int quantum; + + private Map.Entry current = null; + private int currentQuantum = 0; + + public interface FairObject { + boolean isAvailable(); + int getPriority(); + } + + /** + * @param quantum how many poll() will return the same object. + */ + public ProcedureFairRunQueues(final int quantum) { + this.quantum = quantum; + } + + public TQueue get(final TKey key) { + return objMap.get(key); + } + + public TQueue add(final TKey key, final TQueue queue) { + TQueue oldq = objMap.putIfAbsent(key, queue); + return oldq != null ? oldq : queue; + } + + public TQueue remove(final TKey key) { + TQueue queue = objMap.get(key); + if (queue != null) { + lock.lock(); + try { + queue = objMap.remove(key); + if (current != null && queue == current.getValue()) { + currentQuantum = 0; + current = null; + } + } finally { + lock.unlock(); + } + } + return queue; + } + + public void clear() { + lock.lock(); + try { + currentQuantum = 0; + current = null; + objMap.clear(); + } finally { + lock.unlock(); + } + } + + /** + * @return the next available item if present + */ + public TQueue poll() { + lock.lock(); + try { + TQueue queue; + if (currentQuantum == 0) { + if (nextObject() == null) { + // nothing here + return null; + } + + queue = current.getValue(); + currentQuantum = calculateQuantum(queue) - 1; + } else { + currentQuantum--; + queue = current.getValue(); + } + + if (!queue.isAvailable()) { + Map.Entry last = current; + // Try the next one + do { + if (nextObject() == null) + return null; + } while (current.getValue() != last.getValue() && !current.getValue().isAvailable()); + + queue = current.getValue(); + currentQuantum = calculateQuantum(queue) - 1; + } + + return queue; + } finally { + lock.unlock(); + } + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append('{'); + for (Map.Entry entry: objMap.entrySet()) { + builder.append(entry.getKey()); + builder.append(':'); + builder.append(entry.getValue()); + } + builder.append('}'); + return builder.toString(); + } + + private Map.Entry nextObject() { + Map.Entry next = null; + + // If we have already a key, try the next one + if (current != null) { + next = objMap.higherEntry(current.getKey()); + } + + // if there is no higher key, go back to the first + current = (next != null) ? next : objMap.firstEntry(); + return current; + } + + private int calculateQuantum(final TQueue fairObject) { + // TODO + return Math.max(1, fairObject.getPriority() * quantum); + } +} diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureResult.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureResult.java new file mode 100644 index 00000000000..0aebd5a083e --- /dev/null +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureResult.java @@ -0,0 +1,95 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.procedure2; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + +/** + * Once a Procedure completes the ProcedureExecutor takes all the useful + * information of the procedure (e.g. exception/result) and creates a ProcedureResult. + * The user of the Procedure framework will get the procedure result with + * procedureExecutor.getResult(procId) + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class ProcedureResult { + private final RemoteProcedureException exception; + private final long lastUpdate; + private final long startTime; + private final byte[] result; + + private long clientAckTime = -1; + + public ProcedureResult(final long startTime, final long lastUpdate, + final RemoteProcedureException exception) { + this.lastUpdate = lastUpdate; + this.startTime = startTime; + this.exception = exception; + this.result = null; + } + + public ProcedureResult(final long startTime, final long lastUpdate, final byte[] result) { + this.lastUpdate = lastUpdate; + this.startTime = startTime; + this.exception = null; + this.result = result; + } + + public boolean isFailed() { + return exception != null; + } + + public RemoteProcedureException getException() { + return exception; + } + + public boolean hasResultData() { + return result != null; + } + + public byte[] getResult() { + return result; + } + + public long getStartTime() { + return startTime; + } + + public long getLastUpdate() { + return lastUpdate; + } + + public long executionTime() { + return lastUpdate - startTime; + } + + public boolean hasClientAckTime() { + return clientAckTime > 0; + } + + public long getClientAckTime() { + return clientAckTime; + } + + @InterfaceAudience.Private + protected void setClientAckTime(final long timestamp) { + this.clientAckTime = timestamp; + } +} \ No newline at end of file diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureRunnableSet.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureRunnableSet.java new file mode 100644 index 00000000000..2d7ba39be36 --- /dev/null +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureRunnableSet.java @@ -0,0 +1,78 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.procedure2; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + +/** + * Keep track of the runnable procedures + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public interface ProcedureRunnableSet { + /** + * Inserts the specified element at the front of this queue. + * @param proc the Procedure to add + */ + void addFront(Procedure proc); + + /** + * Inserts the specified element at the end of this queue. + * @param proc the Procedure to add + */ + void addBack(Procedure proc); + + /** + * The procedure can't run at the moment. + * add it back to the queue, giving priority to someone else. + * @param proc the Procedure to add back to the list + */ + void yield(Procedure proc); + + /** + * The procedure in execution completed. + * This can be implemented to perform cleanups. + * @param proc the Procedure that completed the execution. + */ + void completionCleanup(Procedure proc); + + /** + * Fetch one Procedure from the queue + * @return the Procedure ID to execute, or null if nothing present. + */ + Long poll(); + + /** + * In case the class is blocking on poll() waiting for items to be added, + * this method should awake poll() and poll() should return. + */ + void signalAll(); + + /** + * Returns the number of elements in this collection. + * @return the number of elements in this collection. + */ + int size(); + + /** + * Removes all of the elements from this collection. + */ + void clear(); +} diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureSimpleRunQueue.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureSimpleRunQueue.java new file mode 100644 index 00000000000..7b17fb26f18 --- /dev/null +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureSimpleRunQueue.java @@ -0,0 +1,121 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.procedure2; + +import java.util.ArrayDeque; +import java.util.Deque; +import java.util.concurrent.locks.Condition; +import java.util.concurrent.locks.ReentrantLock; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + +/** + * Simple runqueue for the procedures + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class ProcedureSimpleRunQueue implements ProcedureRunnableSet { + private final Deque runnables = new ArrayDeque(); + private final ReentrantLock lock = new ReentrantLock(); + private final Condition waitCond = lock.newCondition(); + + @Override + public void addFront(final Procedure proc) { + lock.lock(); + try { + runnables.addFirst(proc.getProcId()); + waitCond.signal(); + } finally { + lock.unlock(); + } + } + + @Override + public void addBack(final Procedure proc) { + lock.lock(); + try { + runnables.addLast(proc.getProcId()); + waitCond.signal(); + } finally { + lock.unlock(); + } + } + + @Override + public void yield(final Procedure proc) { + addBack(proc); + } + + @Override + @edu.umd.cs.findbugs.annotations.SuppressWarnings("WA_AWAIT_NOT_IN_LOOP") + public Long poll() { + lock.lock(); + try { + if (runnables.isEmpty()) { + waitCond.await(); + if (!runnables.isEmpty()) { + return runnables.pop(); + } + } else { + return runnables.pop(); + } + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + return null; + } finally { + lock.unlock(); + } + return null; + } + + @Override + public void signalAll() { + lock.lock(); + try { + waitCond.signalAll(); + } finally { + lock.unlock(); + } + } + + @Override + public void clear() { + lock.lock(); + try { + runnables.clear(); + } finally { + lock.unlock(); + } + } + + @Override + public int size() { + lock.lock(); + try { + return runnables.size(); + } finally { + lock.unlock(); + } + } + + @Override + public void completionCleanup(Procedure proc) { + } +} \ No newline at end of file diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureYieldException.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureYieldException.java new file mode 100644 index 00000000000..177ff5b0fc3 --- /dev/null +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureYieldException.java @@ -0,0 +1,40 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.procedure2; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + +// TODO: Not used yet +@InterfaceAudience.Public +@InterfaceStability.Stable +public class ProcedureYieldException extends ProcedureException { + /** default constructor */ + public ProcedureYieldException() { + super(); + } + + /** + * Constructor + * @param s message + */ + public ProcedureYieldException(String s) { + super(s); + } +} diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureException.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureException.java new file mode 100644 index 00000000000..6be512ddeac --- /dev/null +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureException.java @@ -0,0 +1,116 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.procedure2; + +import java.io.IOException; + +import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage; +import org.apache.hadoop.hbase.util.ForeignExceptionUtil; + +import com.google.protobuf.InvalidProtocolBufferException; + +/** + * A RemoteProcedureException is an exception from another thread or process. + *

      + * RemoteProcedureExceptions are sent to 'remote' peers to signal an abort in the face of failures. + * When serialized for transmission we encode using Protobufs to ensure version compatibility. + *

      + * RemoteProcedureException exceptions contain a Throwable as its cause. + * This can be a "regular" exception generated locally or a ProxyThrowable that is a representation + * of the original exception created on original 'remote' source. These ProxyThrowables have their + * their stacks traces and messages overridden to reflect the original 'remote' exception. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +@SuppressWarnings("serial") +public class RemoteProcedureException extends ProcedureException { + + /** + * Name of the throwable's source such as a host or thread name. Must be non-null. + */ + private final String source; + + /** + * Create a new RemoteProcedureException that can be serialized. + * It is assumed that this came form a local source. + * @param source + * @param cause + */ + public RemoteProcedureException(String source, Throwable cause) { + super(cause); + assert source != null; + assert cause != null; + this.source = source; + } + + public String getSource() { + return source; + } + + public IOException unwrapRemoteException() { + if (getCause() instanceof RemoteException) { + return ((RemoteException)getCause()).unwrapRemoteException(); + } + if (getCause() instanceof IOException) { + return (IOException)getCause(); + } + return new IOException(getCause()); + } + + @Override + public String toString() { + String className = getCause().getClass().getName(); + return className + " via " + getSource() + ":" + getLocalizedMessage(); + } + + /** + * Converts a RemoteProcedureException to an array of bytes. + * @param source the name of the external exception source + * @param t the "local" external exception (local) + * @return protobuf serialized version of RemoteProcedureException + */ + public static byte[] serialize(String source, Throwable t) { + return toProto(source, t).toByteArray(); + } + + /** + * Takes a series of bytes and tries to generate an RemoteProcedureException instance for it. + * @param bytes + * @return the ForeignExcpetion instance + * @throws InvalidProtocolBufferException if there was deserialization problem this is thrown. + */ + public static RemoteProcedureException deserialize(byte[] bytes) + throws InvalidProtocolBufferException { + return fromProto(ForeignExceptionMessage.parseFrom(bytes)); + } + + public ForeignExceptionMessage convert() { + return ForeignExceptionUtil.toProtoForeignException(getSource(), getCause()); + } + + public static ForeignExceptionMessage toProto(String source, Throwable t) { + return ForeignExceptionUtil.toProtoForeignException(source, t); + } + + public static RemoteProcedureException fromProto(final ForeignExceptionMessage eem) { + return new RemoteProcedureException(eem.getSource(), ForeignExceptionUtil.toIOException(eem)); + } +} diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RootProcedureState.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RootProcedureState.java new file mode 100644 index 00000000000..bc1af207b0b --- /dev/null +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RootProcedureState.java @@ -0,0 +1,185 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.procedure2; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureState; + +/** + * Internal state of the ProcedureExecutor that describes the state of a "Root Procedure". + * A "Root Procedure" is a Procedure without parent, each subprocedure will be + * added to the "Root Procedure" stack (or rollback-stack). + * + * RootProcedureState is used and managed only by the ProcedureExecutor. + * Long rootProcId = getRootProcedureId(proc); + * rollbackStack.get(rootProcId).acquire(proc) + * rollbackStack.get(rootProcId).release(proc) + * ... + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +class RootProcedureState { + private static final Log LOG = LogFactory.getLog(RootProcedureState.class); + + private enum State { + RUNNING, // The Procedure is running or ready to run + FAILED, // The Procedure failed, waiting for the rollback executing + ROLLINGBACK, // The Procedure failed and the execution was rolledback + } + + private ArrayList subprocedures = null; + private State state = State.RUNNING; + private int running = 0; + + public synchronized boolean isFailed() { + switch (state) { + case ROLLINGBACK: + case FAILED: + return true; + default: + break; + } + return false; + } + + public synchronized boolean isRollingback() { + return state == State.ROLLINGBACK; + } + + /** + * Called by the ProcedureExecutor to mark rollback execution + */ + protected synchronized boolean setRollback() { + if (running == 0 && state == State.FAILED) { + state = State.ROLLINGBACK; + return true; + } + return false; + } + + /** + * Called by the ProcedureExecutor to mark rollback execution + */ + protected synchronized void unsetRollback() { + assert state == State.ROLLINGBACK; + state = State.FAILED; + } + + protected synchronized List getSubprocedures() { + return subprocedures; + } + + protected synchronized RemoteProcedureException getException() { + if (subprocedures != null) { + for (Procedure proc: subprocedures) { + if (proc.hasException()) { + return proc.getException(); + } + } + } + return null; + } + + /** + * Called by the ProcedureExecutor to mark the procedure step as running. + */ + protected synchronized boolean acquire(final Procedure proc) { + if (state != State.RUNNING) return false; + + running++; + return true; + } + + /** + * Called by the ProcedureExecutor to mark the procedure step as finished. + */ + protected synchronized void release(final Procedure proc) { + running--; + } + + protected synchronized void abort() { + if (state == State.RUNNING) { + state = State.FAILED; + } + } + + /** + * Called by the ProcedureExecutor after the procedure step is completed, + * to add the step to the rollback list (or procedure stack) + */ + protected synchronized void addRollbackStep(final Procedure proc) { + if (proc.isFailed()) { + state = State.FAILED; + } + if (subprocedures == null) { + subprocedures = new ArrayList(); + } + proc.addStackIndex(subprocedures.size()); + subprocedures.add(proc); + } + + /** + * Called on store load by the ProcedureExecutor to load part of the stack. + * + * Each procedure has its own stack-positions. Which means we have to write + * to the store only the Procedure we executed, and nothing else. + * on load we recreate the full stack by aggregating each procedure stack-positions. + */ + protected synchronized void loadStack(final Procedure proc) { + int[] stackIndexes = proc.getStackIndexes(); + if (stackIndexes != null) { + if (subprocedures == null) { + subprocedures = new ArrayList(); + } + int diff = (1 + stackIndexes[stackIndexes.length - 1]) - subprocedures.size(); + if (diff > 0) { + subprocedures.ensureCapacity(1 + stackIndexes[stackIndexes.length - 1]); + while (diff-- > 0) subprocedures.add(null); + } + for (int i = 0; i < stackIndexes.length; ++i) { + subprocedures.set(stackIndexes[i], proc); + } + } + if (proc.getState() == ProcedureState.ROLLEDBACK) { + state = State.ROLLINGBACK; + } else if (proc.isFailed()) { + state = State.FAILED; + } + } + + /** + * Called on store load by the ProcedureExecutor to validate the procedure stack. + */ + protected synchronized boolean isValid() { + if (subprocedures != null) { + for (Procedure proc: subprocedures) { + if (proc == null) { + return false; + } + } + } + return true; + } +} \ No newline at end of file diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SequentialProcedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SequentialProcedure.java new file mode 100644 index 00000000000..b4b35f246e2 --- /dev/null +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SequentialProcedure.java @@ -0,0 +1,81 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.procedure2; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData; + +/** + * A SequentialProcedure describes one step in a procedure chain. + * -> Step 1 -> Step 2 -> Step 3 + * + * The main difference from a base Procedure is that the execute() of a + * SequentialProcedure will be called only once, there will be no second + * execute() call once the child are finished. which means once the child + * of a SequentialProcedure are completed the SequentialProcedure is completed too. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public abstract class SequentialProcedure extends Procedure { + private boolean executed = false; + + @Override + protected Procedure[] doExecute(final TEnvironment env) + throws ProcedureYieldException { + updateTimestamp(); + try { + Procedure[] children = !executed ? execute(env) : null; + executed = !executed; + return children; + } finally { + updateTimestamp(); + } + } + + @Override + protected void doRollback(final TEnvironment env) throws IOException { + updateTimestamp(); + if (executed) { + try { + rollback(env); + executed = !executed; + } finally { + updateTimestamp(); + } + } + } + + @Override + protected void serializeStateData(final OutputStream stream) throws IOException { + SequentialProcedureData.Builder data = SequentialProcedureData.newBuilder(); + data.setExecuted(executed); + data.build().writeDelimitedTo(stream); + } + + @Override + protected void deserializeStateData(final InputStream stream) throws IOException { + SequentialProcedureData data = SequentialProcedureData.parseDelimitedFrom(stream); + executed = data.getExecuted(); + } +} \ No newline at end of file diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.java new file mode 100644 index 00000000000..eab96e4b7d8 --- /dev/null +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.java @@ -0,0 +1,166 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.procedure2; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.Arrays; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData; + +/** + * Procedure described by a series of steps. + * + * The procedure implementor must have an enum of 'states', describing + * the various step of the procedure. + * Once the procedure is running, the procedure-framework will call executeFromState() + * using the 'state' provided by the user. The first call to executeFromState() + * will be performed with 'state = null'. The implementor can jump between + * states using setNextState(MyStateEnum.ordinal()). + * The rollback will call rollbackState() for each state that was executed, in reverse order. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public abstract class StateMachineProcedure + extends Procedure { + private int stateCount = 0; + private int[] states = null; + + protected enum Flow { + HAS_MORE_STATE, + NO_MORE_STATE, + } + + /** + * called to perform a single step of the specified 'state' of the procedure + * @param state state to execute + * @return Flow.NO_MORE_STATE if the procedure is completed, + * Flow.HAS_MORE_STATE if there is another step. + */ + protected abstract Flow executeFromState(TEnvironment env, TState state) + throws ProcedureYieldException; + + /** + * called to perform the rollback of the specified state + * @param state state to rollback + * @throws IOException temporary failure, the rollback will retry later + */ + protected abstract void rollbackState(TEnvironment env, TState state) + throws IOException; + + /** + * Convert an ordinal (or state id) to an Enum (or more descriptive) state object. + * @param stateId the ordinal() of the state enum (or state id) + * @return the state enum object + */ + protected abstract TState getState(int stateId); + + /** + * Convert the Enum (or more descriptive) state object to an ordinal (or state id). + * @param state the state enum object + * @return stateId the ordinal() of the state enum (or state id) + */ + protected abstract int getStateId(TState state); + + /** + * Return the initial state object that will be used for the first call to executeFromState(). + * @return the initial state enum object + */ + protected abstract TState getInitialState(); + + /** + * Set the next state for the procedure. + * @param state the state enum object + */ + protected void setNextState(final TState state) { + setNextState(getStateId(state)); + } + + @Override + protected Procedure[] execute(final TEnvironment env) + throws ProcedureYieldException { + updateTimestamp(); + try { + TState state = stateCount > 0 ? getState(states[stateCount-1]) : getInitialState(); + if (stateCount == 0) { + setNextState(getStateId(state)); + } + if (executeFromState(env, state) == Flow.NO_MORE_STATE) { + // completed + return null; + } + return (isWaiting() || isFailed()) ? null : new Procedure[] {this}; + } finally { + updateTimestamp(); + } + } + + @Override + protected void rollback(final TEnvironment env) throws IOException { + try { + updateTimestamp(); + rollbackState(env, stateCount > 0 ? getState(states[stateCount-1]) : getInitialState()); + stateCount--; + } finally { + updateTimestamp(); + } + } + + /** + * Set the next state for the procedure. + * @param stateId the ordinal() of the state enum (or state id) + */ + private void setNextState(final int stateId) { + if (states == null || states.length == stateCount) { + int newCapacity = stateCount + 8; + if (states != null) { + states = Arrays.copyOf(states, newCapacity); + } else { + states = new int[newCapacity]; + } + } + states[stateCount++] = stateId; + } + + @Override + protected void serializeStateData(final OutputStream stream) throws IOException { + StateMachineProcedureData.Builder data = StateMachineProcedureData.newBuilder(); + for (int i = 0; i < stateCount; ++i) { + data.addState(states[i]); + } + data.build().writeDelimitedTo(stream); + } + + @Override + protected void deserializeStateData(final InputStream stream) throws IOException { + StateMachineProcedureData data = StateMachineProcedureData.parseDelimitedFrom(stream); + stateCount = data.getStateCount(); + if (stateCount > 0) { + states = new int[stateCount]; + for (int i = 0; i < stateCount; ++i) { + states[i] = data.getState(i); + } + } else { + states = null; + } + } +} \ No newline at end of file diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TwoPhaseProcedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TwoPhaseProcedure.java new file mode 100644 index 00000000000..cd6b0a7a254 --- /dev/null +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TwoPhaseProcedure.java @@ -0,0 +1,28 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.procedure2; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + +@InterfaceAudience.Public +@InterfaceStability.Evolving +public abstract class TwoPhaseProcedure extends Procedure { + // TODO (e.g. used by ACLs/VisibilityTags updates) +} \ No newline at end of file diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.java new file mode 100644 index 00000000000..0d1c050bd4f --- /dev/null +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.java @@ -0,0 +1,121 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.procedure2.store; + +import java.io.IOException; +import java.util.Iterator; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.procedure2.Procedure; + +/** + * The ProcedureStore is used by the executor to persist the state of each procedure execution. + * This allows to resume the execution of pending/in-progress procedures in case + * of machine failure or service shutdown. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public interface ProcedureStore { + /** + * Store listener interface. + * The main process should register a listener and respond to the store events. + */ + public interface ProcedureStoreListener { + /** + * triggered when the store is not able to write out data. + * the main process should abort. + */ + void abortProcess(); + } + + /** + * Add the listener to the notification list. + * @param listener The AssignmentListener to register + */ + void registerListener(ProcedureStoreListener listener); + + /** + * Remove the listener from the notification list. + * @param listener The AssignmentListener to unregister + * @return true if the listner was in the list and it was removed, otherwise false. + */ + boolean unregisterListener(ProcedureStoreListener listener); + + /** + * Start/Open the procedure store + * @param numThreads + */ + void start(int numThreads) throws IOException; + + /** + * Stop/Close the procedure store + * @param abort true if the stop is an abort + */ + void stop(boolean abort); + + /** + * @return true if the store is running, otherwise false. + */ + boolean isRunning(); + + /** + * @return the number of threads/slots passed to start() + */ + int getNumThreads(); + + /** + * Acquire the lease for the procedure store. + */ + void recoverLease() throws IOException; + + /** + * Load the Procedures in the store. + * @return the set of procedures present in the store + */ + Iterator load() throws IOException; + + /** + * When a procedure is submitted to the executor insert(proc, null) will be called. + * 'proc' has a 'RUNNABLE' state and the initial information required to start up. + * + * When a procedure is executed and it returns children insert(proc, subprocs) will be called. + * 'proc' has a 'WAITING' state and an update state. + * 'subprocs' are the children in 'RUNNABLE' state with the initial information. + * + * @param proc the procedure to serialize and write to the store. + * @param subprocs the newly created child of the proc. + */ + void insert(Procedure proc, Procedure[] subprocs); + + /** + * The specified procedure was executed, + * and the new state should be written to the store. + * @param proc the procedure to serialize and write to the store. + */ + void update(Procedure proc); + + /** + * The specified procId was removed from the executor, + * due to completion, abort or failure. + * The store implementor should remove all the information about the specified procId. + * @param procId the ID of the procedure to remove. + */ + void delete(long procId); +} \ No newline at end of file diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java new file mode 100644 index 00000000000..a4711f1d16e --- /dev/null +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java @@ -0,0 +1,548 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.procedure2.store; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.Iterator; +import java.util.Map; +import java.util.TreeMap; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.procedure2.Procedure; +import org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos; + +/** + * Keeps track of live procedures. + * + * It can be used by the ProcedureStore to identify which procedures are already + * deleted/completed to avoid the deserialization step on restart. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class ProcedureStoreTracker { + private final TreeMap map = new TreeMap(); + + private boolean keepDeletes = false; + private boolean partial = false; + + public enum DeleteState { YES, NO, MAYBE } + + public static class BitSetNode { + private final static long WORD_MASK = 0xffffffffffffffffL; + private final static int ADDRESS_BITS_PER_WORD = 6; + private final static int BITS_PER_WORD = 1 << ADDRESS_BITS_PER_WORD; + private final static int MAX_NODE_SIZE = 4 << ADDRESS_BITS_PER_WORD; + + private long[] updated; + private long[] deleted; + private long start; + + public void dump() { + System.out.printf("%06d:%06d min=%d max=%d%n", getStart(), getEnd(), + getMinProcId(), getMaxProcId()); + System.out.println("Update:"); + for (int i = 0; i < updated.length; ++i) { + for (int j = 0; j < BITS_PER_WORD; ++j) { + System.out.print((updated[i] & (1L << j)) != 0 ? "1" : "0"); + } + System.out.println(" " + i); + } + System.out.println(); + System.out.println("Delete:"); + for (int i = 0; i < deleted.length; ++i) { + for (int j = 0; j < BITS_PER_WORD; ++j) { + System.out.print((deleted[i] & (1L << j)) != 0 ? "1" : "0"); + } + System.out.println(" " + i); + } + System.out.println(); + } + + public BitSetNode(final long procId, final boolean partial) { + start = alignDown(procId); + + int count = 2; + updated = new long[count]; + deleted = new long[count]; + for (int i = 0; i < count; ++i) { + updated[i] = 0; + deleted[i] = partial ? 0 : WORD_MASK; + } + + updateState(procId, false); + } + + protected BitSetNode(final long start, final long[] updated, final long[] deleted) { + this.start = start; + this.updated = updated; + this.deleted = deleted; + } + + public void update(final long procId) { + updateState(procId, false); + } + + public void delete(final long procId) { + updateState(procId, true); + } + + public Long getStart() { + return start; + } + + public Long getEnd() { + return start + (updated.length << ADDRESS_BITS_PER_WORD) - 1; + } + + public boolean contains(final long procId) { + return start <= procId && procId <= getEnd(); + } + + public DeleteState isDeleted(final long procId) { + int bitmapIndex = getBitmapIndex(procId); + int wordIndex = bitmapIndex >> ADDRESS_BITS_PER_WORD; + if (wordIndex >= deleted.length) { + return DeleteState.MAYBE; + } + return (deleted[wordIndex] & (1L << bitmapIndex)) != 0 ? DeleteState.YES : DeleteState.NO; + } + + private boolean isUpdated(final long procId) { + int bitmapIndex = getBitmapIndex(procId); + int wordIndex = bitmapIndex >> ADDRESS_BITS_PER_WORD; + if (wordIndex >= updated.length) { + return false; + } + return (updated[wordIndex] & (1L << bitmapIndex)) != 0; + } + + public boolean isUpdated() { + // TODO: cache the value + for (int i = 0; i < updated.length; ++i) { + long deleteMask = ~deleted[i]; + if ((updated[i] & deleteMask) != (WORD_MASK & deleteMask)) { + return false; + } + } + return true; + } + + public boolean isEmpty() { + // TODO: cache the value + for (int i = 0; i < deleted.length; ++i) { + if (deleted[i] != WORD_MASK) { + return false; + } + } + return true; + } + + public void resetUpdates() { + for (int i = 0; i < updated.length; ++i) { + updated[i] = 0; + } + } + + public void undeleteAll() { + for (int i = 0; i < updated.length; ++i) { + deleted[i] = 0; + } + } + + public ProcedureProtos.ProcedureStoreTracker.TrackerNode convert() { + ProcedureProtos.ProcedureStoreTracker.TrackerNode.Builder builder = + ProcedureProtos.ProcedureStoreTracker.TrackerNode.newBuilder(); + builder.setStartId(start); + for (int i = 0; i < updated.length; ++i) { + builder.addUpdated(updated[i]); + builder.addDeleted(deleted[i]); + } + return builder.build(); + } + + public static BitSetNode convert(ProcedureProtos.ProcedureStoreTracker.TrackerNode data) { + long start = data.getStartId(); + int size = data.getUpdatedCount(); + long[] updated = new long[size]; + long[] deleted = new long[size]; + for (int i = 0; i < size; ++i) { + updated[i] = data.getUpdated(i); + deleted[i] = data.getDeleted(i); + } + return new BitSetNode(start, updated, deleted); + } + + // ======================================================================== + // Grow/Merge Helpers + // ======================================================================== + public boolean canGrow(final long procId) { + return Math.abs(procId - start) < MAX_NODE_SIZE; + } + + public boolean canMerge(final BitSetNode rightNode) { + assert start < rightNode.getEnd(); + return (rightNode.getEnd() - start) < MAX_NODE_SIZE; + } + + public void grow(final long procId) { + int delta, offset; + + if (procId < start) { + // add to head + long newStart = alignDown(procId); + delta = (int)(start - newStart) >> ADDRESS_BITS_PER_WORD; + offset = delta; + } else { + // Add to tail + long newEnd = alignUp(procId + 1); + delta = (int)(newEnd - getEnd()) >> ADDRESS_BITS_PER_WORD; + offset = 0; + } + + long[] newBitmap; + int oldSize = updated.length; + + newBitmap = new long[oldSize + delta]; + System.arraycopy(updated, 0, newBitmap, offset, oldSize); + updated = newBitmap; + + newBitmap = new long[deleted.length + delta]; + System.arraycopy(deleted, 0, newBitmap, offset, oldSize); + deleted = newBitmap; + + for (int i = 0; i < delta; ++i) { + updated[oldSize + i] = 0; + deleted[oldSize + i] = WORD_MASK; + } + } + + public void merge(final BitSetNode rightNode) { + int delta = (int)(rightNode.getEnd() - getEnd()) >> ADDRESS_BITS_PER_WORD; + + long[] newBitmap; + int oldSize = updated.length; + int newSize = (delta - rightNode.updated.length); + int offset = oldSize + newSize; + + newBitmap = new long[oldSize + delta]; + System.arraycopy(updated, 0, newBitmap, 0, oldSize); + System.arraycopy(rightNode.updated, 0, newBitmap, offset, rightNode.updated.length); + updated = newBitmap; + + newBitmap = new long[oldSize + delta]; + System.arraycopy(deleted, 0, newBitmap, 0, oldSize); + System.arraycopy(rightNode.deleted, 0, newBitmap, offset, rightNode.deleted.length); + deleted = newBitmap; + + for (int i = 0; i < newSize; ++i) { + updated[offset + i] = 0; + deleted[offset + i] = WORD_MASK; + } + } + + @Override + public String toString() { + return "BitSetNode(" + getStart() + "-" + getEnd() + ")"; + } + + // ======================================================================== + // Min/Max Helpers + // ======================================================================== + public long getMinProcId() { + long minProcId = start; + for (int i = 0; i < deleted.length; ++i) { + if (deleted[i] == 0) { + return(minProcId); + } + + if (deleted[i] != WORD_MASK) { + for (int j = 0; j < BITS_PER_WORD; ++j) { + if ((deleted[i] & (1L << j)) != 0) { + return minProcId + j; + } + } + } + + minProcId += BITS_PER_WORD; + } + return minProcId; + } + + public long getMaxProcId() { + long maxProcId = getEnd(); + for (int i = deleted.length - 1; i >= 0; --i) { + if (deleted[i] == 0) { + return maxProcId; + } + + if (deleted[i] != WORD_MASK) { + for (int j = BITS_PER_WORD - 1; j >= 0; --j) { + if ((deleted[i] & (1L << j)) == 0) { + return maxProcId - (BITS_PER_WORD - 1 - j); + } + } + } + maxProcId -= BITS_PER_WORD; + } + return maxProcId; + } + + // ======================================================================== + // Bitmap Helpers + // ======================================================================== + private int getBitmapIndex(final long procId) { + return (int)(procId - start); + } + + private void updateState(final long procId, final boolean isDeleted) { + int bitmapIndex = getBitmapIndex(procId); + int wordIndex = bitmapIndex >> ADDRESS_BITS_PER_WORD; + long value = (1L << bitmapIndex); + + if (isDeleted) { + updated[wordIndex] |= value; + deleted[wordIndex] |= value; + } else { + updated[wordIndex] |= value; + deleted[wordIndex] &= ~value; + } + } + + // ======================================================================== + // Helpers + // ======================================================================== + private static long alignUp(final long x) { + return (x + (BITS_PER_WORD - 1)) & -BITS_PER_WORD; + } + + private static long alignDown(final long x) { + return x & -BITS_PER_WORD; + } + } + + public void insert(final Procedure proc, final Procedure[] subprocs) { + insert(proc.getProcId()); + if (subprocs != null) { + for (int i = 0; i < subprocs.length; ++i) { + insert(subprocs[i].getProcId()); + } + } + } + + public void update(final Procedure proc) { + update(proc.getProcId()); + } + + public void insert(long procId) { + BitSetNode node = getOrCreateNode(procId); + node.update(procId); + } + + public void update(long procId) { + Map.Entry entry = map.floorEntry(procId); + assert entry != null : "expected node to update procId=" + procId; + + BitSetNode node = entry.getValue(); + assert node.contains(procId); + node.update(procId); + } + + public void delete(long procId) { + Map.Entry entry = map.floorEntry(procId); + assert entry != null : "expected node to delete procId=" + procId; + + BitSetNode node = entry.getValue(); + assert node.contains(procId) : "expected procId in the node"; + node.delete(procId); + + if (!keepDeletes && node.isEmpty()) { + // TODO: RESET if (map.size() == 1) + map.remove(entry.getKey()); + } + } + + @InterfaceAudience.Private + public void setDeleted(final long procId, final boolean isDeleted) { + BitSetNode node = getOrCreateNode(procId); + assert node.contains(procId) : "expected procId in the node"; + node.updateState(procId, isDeleted); + } + + public void clear() { + this.map.clear(); + } + + public DeleteState isDeleted(long procId) { + Map.Entry entry = map.floorEntry(procId); + if (entry != null) { + BitSetNode node = entry.getValue(); + DeleteState state = node.isDeleted(procId); + return partial && !node.isUpdated(procId) ? DeleteState.MAYBE : state; + } + return partial ? DeleteState.MAYBE : DeleteState.YES; + } + + public long getMinProcId() { + // TODO: Cache? + Map.Entry entry = map.firstEntry(); + return entry == null ? 0 : entry.getValue().getMinProcId(); + } + + public void setKeepDeletes(boolean keepDeletes) { + this.keepDeletes = keepDeletes; + if (!keepDeletes) { + Iterator> it = map.entrySet().iterator(); + while (it.hasNext()) { + Map.Entry entry = it.next(); + if (entry.getValue().isEmpty()) { + it.remove(); + } + } + } + } + + public void setPartialFlag(boolean isPartial) { + this.partial = isPartial; + } + + public boolean isEmpty() { + for (Map.Entry entry : map.entrySet()) { + if (entry.getValue().isEmpty() == false) { + return false; + } + } + return true; + } + + public boolean isUpdated() { + for (Map.Entry entry : map.entrySet()) { + if (entry.getValue().isUpdated() == false) { + return false; + } + } + return true; + } + + public void resetUpdates() { + for (Map.Entry entry : map.entrySet()) { + entry.getValue().resetUpdates(); + } + } + + public void undeleteAll() { + for (Map.Entry entry : map.entrySet()) { + entry.getValue().undeleteAll(); + } + } + + private BitSetNode getOrCreateNode(final long procId) { + // can procId fit in the left node? + BitSetNode leftNode = null; + boolean leftCanGrow = false; + Map.Entry leftEntry = map.floorEntry(procId); + if (leftEntry != null) { + leftNode = leftEntry.getValue(); + if (leftNode.contains(procId)) { + return leftNode; + } + leftCanGrow = leftNode.canGrow(procId); + } + + BitSetNode rightNode = null; + boolean rightCanGrow = false; + Map.Entry rightEntry = map.ceilingEntry(procId); + if (rightEntry != null) { + rightNode = rightEntry.getValue(); + rightCanGrow = rightNode.canGrow(procId); + if (leftNode != null) { + if (leftNode.canMerge(rightNode)) { + // merge left and right node + return mergeNodes(leftNode, rightNode); + } + + if (leftCanGrow && rightCanGrow) { + if ((procId - leftNode.getEnd()) <= (rightNode.getStart() - procId)) { + // grow the left node + return growNode(leftNode, procId); + } + // grow the right node + return growNode(rightNode, procId); + } + } + } + + // grow the left node + if (leftCanGrow) { + return growNode(leftNode, procId); + } + + // grow the right node + if (rightCanGrow) { + return growNode(rightNode, procId); + } + + // add new node + BitSetNode node = new BitSetNode(procId, partial); + map.put(node.getStart(), node); + return node; + } + + private BitSetNode growNode(BitSetNode node, long procId) { + map.remove(node.getStart()); + node.grow(procId); + map.put(node.getStart(), node); + return node; + } + + private BitSetNode mergeNodes(BitSetNode leftNode, BitSetNode rightNode) { + assert leftNode.getStart() < rightNode.getStart(); + leftNode.merge(rightNode); + map.remove(rightNode.getStart()); + return leftNode; + } + + public void dump() { + System.out.println("map " + map.size()); + for (Map.Entry entry : map.entrySet()) { + entry.getValue().dump(); + } + } + + public void writeTo(final OutputStream stream) throws IOException { + ProcedureProtos.ProcedureStoreTracker.Builder builder = + ProcedureProtos.ProcedureStoreTracker.newBuilder(); + for (Map.Entry entry : map.entrySet()) { + builder.addNode(entry.getValue().convert()); + } + builder.build().writeDelimitedTo(stream); + } + + public void readFrom(final InputStream stream) throws IOException { + ProcedureProtos.ProcedureStoreTracker data = + ProcedureProtos.ProcedureStoreTracker.parseDelimitedFrom(stream); + map.clear(); + for (ProcedureProtos.ProcedureStoreTracker.TrackerNode protoNode: data.getNodeList()) { + BitSetNode node = BitSetNode.convert(protoNode); + map.put(node.getStart(), node); + } + } +} \ No newline at end of file diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/CorruptedWALProcedureStoreException.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/CorruptedWALProcedureStoreException.java new file mode 100644 index 00000000000..29db3bfea3a --- /dev/null +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/CorruptedWALProcedureStoreException.java @@ -0,0 +1,43 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.procedure2.store.wal; + +import org.apache.hadoop.hbase.HBaseIOException; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + +/** + * Thrown when a procedure WAL is corrupted + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class CorruptedWALProcedureStoreException extends HBaseIOException { + /** default constructor */ + public CorruptedWALProcedureStoreException() { + super(); + } + + /** + * Constructor + * @param s message + */ + public CorruptedWALProcedureStoreException(String s) { + super(s); + } +} diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFile.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFile.java new file mode 100644 index 00000000000..859b3cbc21a --- /dev/null +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFile.java @@ -0,0 +1,152 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.procedure2.store.wal; + +import java.io.IOException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.procedure2.store.ProcedureStoreTracker; +import org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader; +import org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer; + +/** + * Describes a WAL File + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class ProcedureWALFile implements Comparable { + private static final Log LOG = LogFactory.getLog(ProcedureWALFile.class); + + private ProcedureWALHeader header; + private FSDataInputStream stream; + private FileStatus logStatus; + private FileSystem fs; + private Path logFile; + private long startPos; + + public ProcedureWALFile(final FileSystem fs, final FileStatus logStatus) { + this.fs = fs; + this.logStatus = logStatus; + this.logFile = logStatus.getPath(); + } + + public ProcedureWALFile(FileSystem fs, Path logFile, ProcedureWALHeader header, long startPos) { + this.fs = fs; + this.logFile = logFile; + this.header = header; + this.startPos = startPos; + } + + public void open() throws IOException { + if (stream == null) { + stream = fs.open(logFile); + } + + if (header == null) { + header = ProcedureWALFormat.readHeader(stream); + startPos = stream.getPos(); + } else { + stream.seek(startPos); + } + } + + public ProcedureWALTrailer readTrailer() throws IOException { + try { + return ProcedureWALFormat.readTrailer(stream, startPos, logStatus.getLen()); + } finally { + stream.seek(startPos); + } + } + + public void readTracker(ProcedureStoreTracker tracker) throws IOException { + ProcedureWALTrailer trailer = readTrailer(); + try { + stream.seek(trailer.getTrackerPos()); + tracker.readFrom(stream); + } finally { + stream.seek(startPos); + } + } + + public void close() { + if (stream == null) return; + try { + stream.close(); + } catch (IOException e) { + LOG.warn("unable to close the wal file: " + logFile, e); + } finally { + stream = null; + } + } + + public FSDataInputStream getStream() { + return stream; + } + + public ProcedureWALHeader getHeader() { + return header; + } + + public boolean isCompacted() { + return header.getType() == ProcedureWALFormat.LOG_TYPE_COMPACTED; + } + + public long getLogId() { + return header.getLogId(); + } + + public long getSize() { + return logStatus.getLen(); + } + + public void removeFile() throws IOException { + close(); + fs.delete(logFile, false); + } + + @Override + public int compareTo(final ProcedureWALFile other) { + long diff = header.getLogId() - other.header.getLogId(); + return (diff < 0) ? -1 : (diff > 0) ? 1 : 0; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof ProcedureWALFile)) return false; + return compareTo((ProcedureWALFile)o) == 0; + } + + @Override + public int hashCode() { + return logFile.hashCode(); + } + + @Override + public String toString() { + return logFile.toString(); + } +} \ No newline at end of file diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormat.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormat.java new file mode 100644 index 00000000000..17432ac24c9 --- /dev/null +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormat.java @@ -0,0 +1,234 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.procedure2.store.wal; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.Iterator; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.hbase.io.util.StreamUtils; +import org.apache.hadoop.hbase.procedure2.Procedure; +import org.apache.hadoop.hbase.procedure2.store.ProcedureStoreTracker; +import org.apache.hadoop.hbase.procedure2.util.ByteSlot; +import org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry; +import org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader; +import org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer; + +import com.google.protobuf.InvalidProtocolBufferException; + +/** + * Helper class that contains the WAL serialization utils. + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public final class ProcedureWALFormat { + static final byte LOG_TYPE_STREAM = 0; + static final byte LOG_TYPE_COMPACTED = 1; + static final byte LOG_TYPE_MAX_VALID = 1; + + static final byte HEADER_VERSION = 1; + static final byte TRAILER_VERSION = 1; + static final long HEADER_MAGIC = 0x31764c4157637250L; + static final long TRAILER_MAGIC = 0x50726357414c7631L; + + @InterfaceAudience.Private + public static class InvalidWALDataException extends IOException { + public InvalidWALDataException(String s) { + super(s); + } + + public InvalidWALDataException(Throwable t) { + super(t); + } + } + + interface Loader { + void removeLog(ProcedureWALFile log); + void markCorruptedWAL(ProcedureWALFile log, IOException e); + } + + private ProcedureWALFormat() {} + + public static Iterator load(final Iterator logs, + final ProcedureStoreTracker tracker, final Loader loader) throws IOException { + ProcedureWALFormatReader reader = new ProcedureWALFormatReader(tracker); + tracker.setKeepDeletes(true); + try { + while (logs.hasNext()) { + ProcedureWALFile log = logs.next(); + log.open(); + try { + reader.read(log, loader); + } finally { + log.close(); + } + } + // The tracker is now updated with all the procedures read from the logs + tracker.setPartialFlag(false); + tracker.resetUpdates(); + } finally { + tracker.setKeepDeletes(false); + } + // TODO: Write compacted version? + return reader.getProcedures(); + } + + public static void writeHeader(OutputStream stream, ProcedureWALHeader header) + throws IOException { + header.writeDelimitedTo(stream); + } + + /* + * +-----------------+ + * | END OF WAL DATA | <---+ + * +-----------------+ | + * | | | + * | Tracker | | + * | | | + * +-----------------+ | + * | version | | + * +-----------------+ | + * | TRAILER_MAGIC | | + * +-----------------+ | + * | offset |-----+ + * +-----------------+ + */ + public static void writeTrailer(FSDataOutputStream stream, ProcedureStoreTracker tracker) + throws IOException { + long offset = stream.getPos(); + + // Write EOF Entry + ProcedureWALEntry.newBuilder() + .setType(ProcedureWALEntry.Type.EOF) + .build().writeDelimitedTo(stream); + + // Write Tracker + tracker.writeTo(stream); + + stream.write(TRAILER_VERSION); + StreamUtils.writeLong(stream, TRAILER_MAGIC); + StreamUtils.writeLong(stream, offset); + } + + public static ProcedureWALHeader readHeader(InputStream stream) + throws IOException { + ProcedureWALHeader header; + try { + header = ProcedureWALHeader.parseDelimitedFrom(stream); + } catch (InvalidProtocolBufferException e) { + throw new InvalidWALDataException(e); + } + + if (header == null) { + throw new InvalidWALDataException("No data available to read the Header"); + } + + if (header.getVersion() < 0 || header.getVersion() != HEADER_VERSION) { + throw new InvalidWALDataException("Invalid Header version. got " + header.getVersion() + + " expected " + HEADER_VERSION); + } + + if (header.getType() < 0 || header.getType() > LOG_TYPE_MAX_VALID) { + throw new InvalidWALDataException("Invalid header type. got " + header.getType()); + } + + return header; + } + + public static ProcedureWALTrailer readTrailer(FSDataInputStream stream, long startPos, long size) + throws IOException { + long trailerPos = size - 17; // Beginning of the Trailer Jump + + if (trailerPos < startPos) { + throw new InvalidWALDataException("Missing trailer: size=" + size + " startPos=" + startPos); + } + + stream.seek(trailerPos); + int version = stream.read(); + if (version != TRAILER_VERSION) { + throw new InvalidWALDataException("Invalid Trailer version. got " + version + + " expected " + TRAILER_VERSION); + } + + long magic = StreamUtils.readLong(stream); + if (magic != TRAILER_MAGIC) { + throw new InvalidWALDataException("Invalid Trailer magic. got " + magic + + " expected " + TRAILER_MAGIC); + } + + long trailerOffset = StreamUtils.readLong(stream); + stream.seek(trailerOffset); + + ProcedureWALEntry entry = readEntry(stream); + if (entry.getType() != ProcedureWALEntry.Type.EOF) { + throw new InvalidWALDataException("Invalid Trailer begin"); + } + + ProcedureWALTrailer trailer = ProcedureWALTrailer.newBuilder() + .setVersion(version) + .setTrackerPos(stream.getPos()) + .build(); + return trailer; + } + + public static ProcedureWALEntry readEntry(InputStream stream) throws IOException { + return ProcedureWALEntry.parseDelimitedFrom(stream); + } + + public static void writeEntry(ByteSlot slot, ProcedureWALEntry.Type type, + Procedure proc, Procedure[] subprocs) throws IOException { + ProcedureWALEntry.Builder builder = ProcedureWALEntry.newBuilder(); + builder.setType(type); + builder.addProcedure(Procedure.convert(proc)); + if (subprocs != null) { + for (int i = 0; i < subprocs.length; ++i) { + builder.addProcedure(Procedure.convert(subprocs[i])); + } + } + builder.build().writeDelimitedTo(slot); + } + + public static void writeInsert(ByteSlot slot, Procedure proc) + throws IOException { + writeEntry(slot, ProcedureWALEntry.Type.INIT, proc, null); + } + + public static void writeInsert(ByteSlot slot, Procedure proc, Procedure[] subprocs) + throws IOException { + writeEntry(slot, ProcedureWALEntry.Type.INSERT, proc, subprocs); + } + + public static void writeUpdate(ByteSlot slot, Procedure proc) + throws IOException { + writeEntry(slot, ProcedureWALEntry.Type.UPDATE, proc, null); + } + + public static void writeDelete(ByteSlot slot, long procId) + throws IOException { + ProcedureWALEntry.Builder builder = ProcedureWALEntry.newBuilder(); + builder.setType(ProcedureWALEntry.Type.DELETE); + builder.setProcId(procId); + builder.build().writeDelimitedTo(slot); + } +} \ No newline at end of file diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormatReader.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormatReader.java new file mode 100644 index 00000000000..a60b8f5d88b --- /dev/null +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormatReader.java @@ -0,0 +1,166 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.procedure2.store.wal; + +import java.io.IOException; +import java.util.Iterator; +import java.util.Map; +import java.util.HashMap; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.hbase.procedure2.Procedure; +import org.apache.hadoop.hbase.procedure2.store.ProcedureStoreTracker; +import org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos; +import org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry; + +/** + * Helper class that loads the procedures stored in a WAL + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class ProcedureWALFormatReader { + private static final Log LOG = LogFactory.getLog(ProcedureWALFormatReader.class); + + private final ProcedureStoreTracker tracker; + //private final long compactionLogId; + + private final Map procedures = new HashMap(); + private final Map localProcedures = + new HashMap(); + + private long maxProcId = 0; + + public ProcedureWALFormatReader(final ProcedureStoreTracker tracker) { + this.tracker = tracker; + } + + public void read(ProcedureWALFile log, ProcedureWALFormat.Loader loader) throws IOException { + FSDataInputStream stream = log.getStream(); + try { + boolean hasMore = true; + while (hasMore) { + ProcedureWALEntry entry = ProcedureWALFormat.readEntry(stream); + if (entry == null) { + LOG.warn("nothing left to decode. exiting with missing EOF"); + hasMore = false; + break; + } + switch (entry.getType()) { + case INIT: + readInitEntry(entry); + break; + case INSERT: + readInsertEntry(entry); + break; + case UPDATE: + case COMPACT: + readUpdateEntry(entry); + break; + case DELETE: + readDeleteEntry(entry); + break; + case EOF: + hasMore = false; + break; + default: + throw new CorruptedWALProcedureStoreException("Invalid entry: " + entry); + } + } + } catch (IOException e) { + LOG.error("got an exception while reading the procedure WAL: " + log, e); + loader.markCorruptedWAL(log, e); + } + + if (localProcedures.isEmpty()) { + LOG.info("No active entry found in state log " + log + ". removing it"); + loader.removeLog(log); + } else { + Iterator> itd = + localProcedures.entrySet().iterator(); + while (itd.hasNext()) { + Map.Entry entry = itd.next(); + itd.remove(); + + // Deserialize the procedure + Procedure proc = Procedure.convert(entry.getValue()); + procedures.put(entry.getKey(), proc); + } + + // TODO: Some procedure may be already runnables (see readInitEntry()) + // (we can also check the "update map" in the log trackers) + } + } + + public Iterator getProcedures() { + return procedures.values().iterator(); + } + + private void loadEntries(final ProcedureWALEntry entry) { + for (ProcedureProtos.Procedure proc: entry.getProcedureList()) { + maxProcId = Math.max(maxProcId, proc.getProcId()); + if (isRequired(proc.getProcId())) { + if (LOG.isTraceEnabled()) { + LOG.trace("read " + entry.getType() + " entry " + proc.getProcId()); + } + localProcedures.put(proc.getProcId(), proc); + tracker.setDeleted(proc.getProcId(), false); + } + } + } + + private void readInitEntry(final ProcedureWALEntry entry) + throws IOException { + assert entry.getProcedureCount() == 1 : "Expected only one procedure"; + // TODO: Make it runnable, before reading other files + loadEntries(entry); + } + + private void readInsertEntry(final ProcedureWALEntry entry) throws IOException { + assert entry.getProcedureCount() >= 1 : "Expected one or more procedures"; + loadEntries(entry); + } + + private void readUpdateEntry(final ProcedureWALEntry entry) throws IOException { + assert entry.getProcedureCount() == 1 : "Expected only one procedure"; + loadEntries(entry); + } + + private void readDeleteEntry(final ProcedureWALEntry entry) throws IOException { + assert entry.getProcedureCount() == 0 : "Expected no procedures"; + assert entry.hasProcId() : "expected ProcID"; + if (LOG.isTraceEnabled()) { + LOG.trace("read delete entry " + entry.getProcId()); + } + maxProcId = Math.max(maxProcId, entry.getProcId()); + localProcedures.remove(entry.getProcId()); + tracker.setDeleted(entry.getProcId(), true); + } + + private boolean isDeleted(final long procId) { + return tracker.isDeleted(procId) == ProcedureStoreTracker.DeleteState.YES; + } + + private boolean isRequired(final long procId) { + return !isDeleted(procId) && !procedures.containsKey(procId); + } +} \ No newline at end of file diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java new file mode 100644 index 00000000000..09d2f7a36fc --- /dev/null +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java @@ -0,0 +1,721 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.procedure2.store.wal; + +import java.io.IOException; +import java.io.FileNotFoundException; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.locks.Condition; +import java.util.concurrent.locks.ReentrantLock; +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.TimeUnit; +import java.util.Arrays; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.Set; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileAlreadyExistsException; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathFilter; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.procedure2.Procedure; +import org.apache.hadoop.hbase.procedure2.store.ProcedureStore; +import org.apache.hadoop.hbase.procedure2.store.ProcedureStoreTracker; +import org.apache.hadoop.hbase.procedure2.util.ByteSlot; +import org.apache.hadoop.hbase.procedure2.util.StringUtils; +import org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader; + +/** + * WAL implementation of the ProcedureStore. + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class WALProcedureStore implements ProcedureStore { + private static final Log LOG = LogFactory.getLog(WALProcedureStore.class); + + public interface LeaseRecovery { + void recoverFileLease(FileSystem fs, Path path) throws IOException; + } + + private static final int MAX_RETRIES_BEFORE_ABORT = 3; + + private static final String SYNC_WAIT_MSEC_CONF_KEY = "hbase.procedure.store.wal.sync.wait.msec"; + private static final int DEFAULT_SYNC_WAIT_MSEC = 100; + + private final CopyOnWriteArrayList listeners = + new CopyOnWriteArrayList(); + + private final LinkedList logs = new LinkedList(); + private final ProcedureStoreTracker storeTracker = new ProcedureStoreTracker(); + private final AtomicBoolean running = new AtomicBoolean(false); + private final ReentrantLock lock = new ReentrantLock(); + private final Condition waitCond = lock.newCondition(); + private final Condition slotCond = lock.newCondition(); + private final Condition syncCond = lock.newCondition(); + + private final LeaseRecovery leaseRecovery; + private final Configuration conf; + private final FileSystem fs; + private final Path logDir; + + private AtomicBoolean inSync = new AtomicBoolean(false); + private ArrayBlockingQueue slotsCache = null; + private Set corruptedLogs = null; + private FSDataOutputStream stream = null; + private long totalSynced = 0; + private long flushLogId = 0; + private int slotIndex = 0; + private Thread syncThread; + private ByteSlot[] slots; + private int syncWaitMsec; + + public WALProcedureStore(final Configuration conf, final FileSystem fs, final Path logDir, + final LeaseRecovery leaseRecovery) { + this.fs = fs; + this.conf = conf; + this.logDir = logDir; + this.leaseRecovery = leaseRecovery; + } + + @Override + public void start(int numSlots) throws IOException { + if (running.getAndSet(true)) { + return; + } + + // Init buffer slots + slots = new ByteSlot[numSlots]; + slotsCache = new ArrayBlockingQueue(numSlots, true); + while (slotsCache.remainingCapacity() > 0) { + slotsCache.offer(new ByteSlot()); + } + + // Tunings + syncWaitMsec = conf.getInt(SYNC_WAIT_MSEC_CONF_KEY, DEFAULT_SYNC_WAIT_MSEC); + + // Init sync thread + syncThread = new Thread("WALProcedureStoreSyncThread") { + @Override + public void run() { + while (running.get()) { + try { + syncLoop(); + } catch (IOException e) { + LOG.error("got an exception from the sync-loop", e); + sendAbortProcessSignal(); + } + } + } + }; + syncThread.start(); + } + + @Override + public void stop(boolean abort) { + if (!running.getAndSet(false)) { + return; + } + + LOG.info("Stopping the WAL Procedure Store"); + if (lock.tryLock()) { + try { + waitCond.signalAll(); + } finally { + lock.unlock(); + } + } + + if (!abort) { + try { + syncThread.join(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + } + + // Close the writer + closeStream(); + + // Close the old logs + // they should be already closed, this is just in case the load fails + // and we call start() and then stop() + for (ProcedureWALFile log: logs) { + log.close(); + } + logs.clear(); + } + + @Override + public boolean isRunning() { + return running.get(); + } + + @Override + public int getNumThreads() { + return slots == null ? 0 : slots.length; + } + + public ProcedureStoreTracker getStoreTracker() { + return storeTracker; + } + + @Override + public void registerListener(ProcedureStoreListener listener) { + this.listeners.add(listener); + } + + @Override + public boolean unregisterListener(ProcedureStoreListener listener) { + return this.listeners.remove(listener); + } + + @Override + public void recoverLease() throws IOException { + LOG.info("Starting WAL Procedure Store lease recovery"); + FileStatus[] oldLogs = getLogFiles(); + while (running.get()) { + // Get Log-MaxID and recover lease on old logs + flushLogId = initOldLogs(oldLogs) + 1; + + // Create new state-log + if (!rollWriter(flushLogId)) { + // someone else has already created this log + LOG.debug("someone else has already created log " + flushLogId); + continue; + } + + // We have the lease on the log + oldLogs = getLogFiles(); + if (getMaxLogId(oldLogs) > flushLogId) { + // Someone else created new logs + LOG.debug("someone else created new logs. expected maxLogId < " + flushLogId); + logs.getLast().removeFile(); + continue; + } + + LOG.info("lease acquired flushLogId=" + flushLogId); + break; + } + } + + @Override + public Iterator load() throws IOException { + if (logs.isEmpty()) { + throw new RuntimeException("recoverLease() must be called before loading data"); + } + + // Nothing to do, If we have only the current log. + if (logs.size() == 1) { + LOG.debug("No state logs to replay"); + return null; + } + + // Load the old logs + final ArrayList toRemove = new ArrayList(); + Iterator it = logs.descendingIterator(); + it.next(); // Skip the current log + try { + return ProcedureWALFormat.load(it, storeTracker, new ProcedureWALFormat.Loader() { + @Override + public void removeLog(ProcedureWALFile log) { + toRemove.add(log); + } + + @Override + public void markCorruptedWAL(ProcedureWALFile log, IOException e) { + if (corruptedLogs == null) { + corruptedLogs = new HashSet(); + } + corruptedLogs.add(log); + // TODO: sideline corrupted log + } + }); + } finally { + if (!toRemove.isEmpty()) { + for (ProcedureWALFile log: toRemove) { + removeLogFile(log); + } + } + } + } + + @Override + public void insert(final Procedure proc, final Procedure[] subprocs) { + if (LOG.isTraceEnabled()) { + LOG.trace("insert " + proc + " subproc=" + Arrays.toString(subprocs)); + } + + ByteSlot slot = acquireSlot(); + long logId = -1; + try { + // Serialize the insert + if (subprocs != null) { + ProcedureWALFormat.writeInsert(slot, proc, subprocs); + } else { + assert !proc.hasParent(); + ProcedureWALFormat.writeInsert(slot, proc); + } + + // Push the transaction data and wait until it is persisted + logId = pushData(slot); + } catch (IOException e) { + // We are not able to serialize the procedure. + // this is a code error, and we are not able to go on. + LOG.fatal("Unable to serialize one of the procedure: proc=" + proc + + " subprocs=" + Arrays.toString(subprocs), e); + throw new RuntimeException(e); + } finally { + releaseSlot(slot); + } + + // Update the store tracker + synchronized (storeTracker) { + if (logId == flushLogId) { + storeTracker.insert(proc, subprocs); + } + } + } + + @Override + public void update(final Procedure proc) { + if (LOG.isTraceEnabled()) { + LOG.trace("update " + proc); + } + + ByteSlot slot = acquireSlot(); + long logId = -1; + try { + // Serialize the update + ProcedureWALFormat.writeUpdate(slot, proc); + + // Push the transaction data and wait until it is persisted + logId = pushData(slot); + } catch (IOException e) { + // We are not able to serialize the procedure. + // this is a code error, and we are not able to go on. + LOG.fatal("Unable to serialize the procedure: " + proc, e); + throw new RuntimeException(e); + } finally { + releaseSlot(slot); + } + + // Update the store tracker + boolean removeOldLogs = false; + synchronized (storeTracker) { + if (logId == flushLogId) { + storeTracker.update(proc); + removeOldLogs = storeTracker.isUpdated(); + } + } + + if (removeOldLogs) { + removeAllLogs(logId - 1); + } + } + + @Override + public void delete(final long procId) { + if (LOG.isTraceEnabled()) { + LOG.trace("delete " + procId); + } + + ByteSlot slot = acquireSlot(); + long logId = -1; + try { + // Serialize the delete + ProcedureWALFormat.writeDelete(slot, procId); + + // Push the transaction data and wait until it is persisted + logId = pushData(slot); + } catch (IOException e) { + // We are not able to serialize the procedure. + // this is a code error, and we are not able to go on. + LOG.fatal("Unable to serialize the procedure: " + procId, e); + throw new RuntimeException(e); + } finally { + releaseSlot(slot); + } + + boolean removeOldLogs = false; + synchronized (storeTracker) { + if (logId == flushLogId) { + storeTracker.delete(procId); + if (storeTracker.isEmpty()) { + removeOldLogs = rollWriterOrDie(logId + 1); + } + } + } + + if (removeOldLogs) { + removeAllLogs(logId); + } + } + + private ByteSlot acquireSlot() { + ByteSlot slot = slotsCache.poll(); + return slot != null ? slot : new ByteSlot(); + } + + private void releaseSlot(final ByteSlot slot) { + slot.reset(); + slotsCache.offer(slot); + } + + private long pushData(final ByteSlot slot) { + assert isRunning() && !logs.isEmpty() : "recoverLease() must be called before inserting data"; + long logId = -1; + + lock.lock(); + try { + // Wait for the sync to be completed + while (true) { + if (inSync.get()) { + syncCond.await(); + } else if (slotIndex == slots.length) { + slotCond.signal(); + syncCond.await(); + } else { + break; + } + } + + slots[slotIndex++] = slot; + logId = flushLogId; + + // Notify that there is new data + if (slotIndex == 1) { + waitCond.signal(); + } + + // Notify that the slots are full + if (slotIndex == slots.length) { + slotCond.signal(); + } + syncCond.await(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + sendAbortProcessSignal(); + } finally { + lock.unlock(); + } + return logId; + } + + private void syncLoop() throws IOException { + inSync.set(false); + while (running.get()) { + lock.lock(); + try { + // Wait until new data is available + if (slotIndex == 0) { + if (LOG.isTraceEnabled()) { + LOG.trace("Waiting for data. flushed=" + StringUtils.humanSize(totalSynced)); + } + waitCond.await(); + if (slotIndex == 0) { + // no data.. probably a stop() + continue; + } + } + + // Wait SYNC_WAIT_MSEC or the signal of "slots full" before flushing + slotCond.await(syncWaitMsec, TimeUnit.MILLISECONDS); + + inSync.set(true); + totalSynced += syncSlots(); + slotIndex = 0; + inSync.set(false); + syncCond.signalAll(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + sendAbortProcessSignal(); + } finally { + lock.unlock(); + } + } + } + + private long syncSlots() { + int retry = 0; + long totalSynced = 0; + do { + try { + totalSynced = syncSlots(stream, slots, 0, slotIndex); + break; + } catch (Throwable e) { + if (++retry == MAX_RETRIES_BEFORE_ABORT) { + LOG.error("sync slot failed, abort.", e); + sendAbortProcessSignal(); + } + } + } while (running.get()); + return totalSynced; + } + + protected long syncSlots(FSDataOutputStream stream, ByteSlot[] slots, int offset, int count) + throws IOException { + long totalSynced = 0; + for (int i = 0; i < count; ++i) { + ByteSlot data = slots[offset + i]; + data.writeTo(stream); + totalSynced += data.size(); + } + stream.hsync(); + if (LOG.isTraceEnabled()) { + LOG.trace("Sync slots=" + count + '/' + slots.length + + " flushed=" + StringUtils.humanSize(totalSynced)); + } + return totalSynced; + } + + private void sendAbortProcessSignal() { + if (!this.listeners.isEmpty()) { + for (ProcedureStoreListener listener : this.listeners) { + listener.abortProcess(); + } + } + } + + private boolean rollWriterOrDie(final long logId) { + try { + return rollWriter(logId); + } catch (IOException e) { + LOG.warn("Unable to roll the log", e); + sendAbortProcessSignal(); + return false; + } + } + + private boolean rollWriter(final long logId) throws IOException { + ProcedureWALHeader header = ProcedureWALHeader.newBuilder() + .setVersion(ProcedureWALFormat.HEADER_VERSION) + .setType(ProcedureWALFormat.LOG_TYPE_STREAM) + .setMinProcId(storeTracker.getMinProcId()) + .setLogId(logId) + .build(); + + FSDataOutputStream newStream = null; + Path newLogFile = null; + long startPos = -1; + try { + newLogFile = getLogFilePath(logId); + newStream = fs.create(newLogFile, false); + ProcedureWALFormat.writeHeader(newStream, header); + startPos = newStream.getPos(); + } catch (FileAlreadyExistsException e) { + LOG.error("Log file with id=" + logId + " already exists", e); + return false; + } + lock.lock(); + try { + closeStream(); + synchronized (storeTracker) { + storeTracker.resetUpdates(); + } + stream = newStream; + flushLogId = logId; + totalSynced = 0; + logs.add(new ProcedureWALFile(fs, newLogFile, header, startPos)); + } finally { + lock.unlock(); + } + LOG.info("Roll new state log: " + logId); + return true; + } + + private void closeStream() { + try { + if (stream != null) { + try { + ProcedureWALFormat.writeTrailer(stream, storeTracker); + } catch (IOException e) { + LOG.warn("Unable to write the trailer: " + e.getMessage()); + } + stream.close(); + } + } catch (IOException e) { + LOG.error("Unable to close the stream", e); + } finally { + stream = null; + } + } + + private void removeAllLogs(long lastLogId) { + LOG.info("Remove all state logs with ID less then " + lastLogId); + while (!logs.isEmpty()) { + ProcedureWALFile log = logs.getFirst(); + if (lastLogId < log.getLogId()) { + break; + } + + removeLogFile(log); + } + } + + private boolean removeLogFile(final ProcedureWALFile log) { + try { + LOG.debug("remove log: " + log); + log.removeFile(); + logs.remove(log); + } catch (IOException e) { + LOG.error("unable to remove log " + log, e); + return false; + } + return true; + } + + public Set getCorruptedLogs() { + return corruptedLogs; + } + + // ========================================================================== + // FileSystem Log Files helpers + // ========================================================================== + public Path getLogDir() { + return this.logDir; + } + + public FileSystem getFileSystem() { + return this.fs; + } + + protected Path getLogFilePath(final long logId) throws IOException { + return new Path(logDir, String.format("state-%020d.log", logId)); + } + + private static long getLogIdFromName(final String name) { + int end = name.lastIndexOf(".log"); + int start = name.lastIndexOf('-') + 1; + while (start < end) { + if (name.charAt(start) != '0') + break; + start++; + } + return Long.parseLong(name.substring(start, end)); + } + + private FileStatus[] getLogFiles() throws IOException { + try { + return fs.listStatus(logDir, new PathFilter() { + @Override + public boolean accept(Path path) { + String name = path.getName(); + return name.startsWith("state-") && name.endsWith(".log"); + } + }); + } catch (FileNotFoundException e) { + LOG.warn("log directory not found: " + e.getMessage()); + return null; + } + } + + private long getMaxLogId(final FileStatus[] logFiles) { + long maxLogId = 0; + if (logFiles != null && logFiles.length > 0) { + for (int i = 0; i < logFiles.length; ++i) { + maxLogId = Math.max(maxLogId, getLogIdFromName(logFiles[i].getPath().getName())); + } + } + return maxLogId; + } + + /** + * @return Max-LogID of the specified log file set + */ + private long initOldLogs(final FileStatus[] logFiles) throws IOException { + this.logs.clear(); + + long maxLogId = 0; + if (logFiles != null && logFiles.length > 0) { + for (int i = 0; i < logFiles.length; ++i) { + final Path logPath = logFiles[i].getPath(); + leaseRecovery.recoverFileLease(fs, logPath); + maxLogId = Math.max(maxLogId, getLogIdFromName(logPath.getName())); + + ProcedureWALFile log = initOldLog(logFiles[i]); + if (log != null) { + this.logs.add(log); + } + } + Collections.sort(this.logs); + initTrackerFromOldLogs(); + } + return maxLogId; + } + + private void initTrackerFromOldLogs() { + // TODO: Load the most recent tracker available + if (!logs.isEmpty()) { + ProcedureWALFile log = logs.getLast(); + try { + log.readTracker(storeTracker); + } catch (IOException e) { + LOG.warn("Unable to read tracker for " + log + " - " + e.getMessage()); + // try the next one... + storeTracker.clear(); + storeTracker.setPartialFlag(true); + } + } + } + + private ProcedureWALFile initOldLog(final FileStatus logFile) throws IOException { + ProcedureWALFile log = new ProcedureWALFile(fs, logFile); + if (logFile.getLen() == 0) { + LOG.warn("Remove uninitialized log " + logFile); + log.removeFile(); + return null; + } + + LOG.debug("opening state-log: " + logFile); + try { + log.open(); + } catch (ProcedureWALFormat.InvalidWALDataException e) { + LOG.warn("Remove uninitialized log " + logFile, e); + log.removeFile(); + return null; + } catch (IOException e) { + String msg = "Unable to read state log: " + logFile; + LOG.error(msg, e); + throw new IOException(msg, e); + } + + if (log.isCompacted()) { + try { + log.readTrailer(); + } catch (IOException e) { + // unfinished compacted log throw it away + LOG.warn("Unfinished compacted log " + logFile, e); + log.removeFile(); + return null; + } + } + return log; + } +} diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/ByteSlot.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/ByteSlot.java new file mode 100644 index 00000000000..890411661b9 --- /dev/null +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/ByteSlot.java @@ -0,0 +1,111 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.procedure2.util; + +import java.io.IOException; +import java.io.OutputStream; +import java.util.Arrays; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + +/** + * Similar to the ByteArrayOutputStream, with the exception that we can prepend an header. + * e.g. you write some data and you want to prepend an header that contains the data len or cksum. + * + * ByteSlot slot = new ByteSlot(); + * // write data + * slot.write(...); + * slot.write(...); + * // write header with the size of the written data + * slot.markHead(); + * slot.write(Bytes.toBytes(slot.size())); + * // flush to stream as [header, data] + * slot.writeTo(stream); + * + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class ByteSlot extends OutputStream { + private static final int DOUBLE_GROW_LIMIT = 1 << 20; + private static final int GROW_ALIGN = 128; + + private byte[] buf; + private int head; + private int size; + + public void reset() { + head = 0; + size = 0; + } + + public void markHead() { + head = size; + } + + public int getHead() { + return head; + } + + public int size() { + return size; + } + + public byte[] getBuffer() { + return buf; + } + + public void writeAt(int offset, int b) { + head = Math.min(head, offset); + buf[offset] = (byte)b; + } + + public void write(int b) { + ensureCapacity(size + 1); + buf[size++] = (byte)b; + } + + public void write(byte[] b, int off, int len) { + ensureCapacity(size + len); + System.arraycopy(b, off, buf, size, len); + size += len; + } + + public void writeTo(final OutputStream stream) throws IOException { + if (head != 0) { + stream.write(buf, head, size - head); + stream.write(buf, 0, head); + } else { + stream.write(buf, 0, size); + } + } + + private void ensureCapacity(int minCapacity) { + minCapacity = (minCapacity + (GROW_ALIGN - 1)) & -GROW_ALIGN; + if (buf == null) { + buf = new byte[minCapacity]; + } else if (minCapacity > buf.length) { + int newCapacity = buf.length << 1; + if (minCapacity > newCapacity || newCapacity > DOUBLE_GROW_LIMIT) { + newCapacity = minCapacity; + } + buf = Arrays.copyOf(buf, newCapacity); + } + } +} \ No newline at end of file diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/StringUtils.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/StringUtils.java new file mode 100644 index 00000000000..97134c23e43 --- /dev/null +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/StringUtils.java @@ -0,0 +1,80 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.procedure2.util; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + +@InterfaceAudience.Private +@InterfaceStability.Evolving +public final class StringUtils { + private StringUtils() {} + + public static String humanTimeDiff(long timeDiff) { + StringBuilder buf = new StringBuilder(); + long hours = timeDiff / (60*60*1000); + long rem = (timeDiff % (60*60*1000)); + long minutes = rem / (60*1000); + rem = rem % (60*1000); + float seconds = rem / 1000.0f; + + if (hours != 0){ + buf.append(hours); + buf.append("hrs, "); + } + if (minutes != 0){ + buf.append(minutes); + buf.append("mins, "); + } + if (hours > 0 || minutes > 0) { + buf.append(seconds); + buf.append("sec"); + } else { + buf.append(String.format("%.4fsec", seconds)); + } + return buf.toString(); + } + + public static String humanSize(double size) { + if (size >= (1L << 40)) return String.format("%.1fT", size / (1L << 40)); + if (size >= (1L << 30)) return String.format("%.1fG", size / (1L << 30)); + if (size >= (1L << 20)) return String.format("%.1fM", size / (1L << 20)); + if (size >= (1L << 10)) return String.format("%.1fK", size / (1L << 10)); + return String.format("%.0f", size); + } + + public static boolean isEmpty(final String input) { + return input == null || input.length() == 0; + } + + public static String buildString(final String... parts) { + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < parts.length; ++i) { + sb.append(parts[i]); + } + return sb.toString(); + } + + public static StringBuilder appendStrings(final StringBuilder sb, final String... parts) { + for (int i = 0; i < parts.length; ++i) { + sb.append(parts[i]); + } + return sb; + } +} \ No newline at end of file diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/TimeoutBlockingQueue.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/TimeoutBlockingQueue.java new file mode 100644 index 00000000000..f710ef404d8 --- /dev/null +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/TimeoutBlockingQueue.java @@ -0,0 +1,217 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.procedure2.util; + +import java.util.concurrent.locks.Condition; +import java.util.concurrent.locks.ReentrantLock; +import java.util.concurrent.TimeUnit; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class TimeoutBlockingQueue { + public static interface TimeoutRetriever { + long getTimeout(T object); + TimeUnit getTimeUnit(T object); + } + + private final ReentrantLock lock = new ReentrantLock(); + private final Condition waitCond = lock.newCondition(); + private final TimeoutRetriever timeoutRetriever; + + private E[] objects; + private int head = 0; + private int tail = 0; + + public TimeoutBlockingQueue(TimeoutRetriever timeoutRetriever) { + this(32, timeoutRetriever); + } + + @SuppressWarnings("unchecked") + public TimeoutBlockingQueue(int capacity, TimeoutRetriever timeoutRetriever) { + this.objects = (E[])new Object[capacity]; + this.timeoutRetriever = timeoutRetriever; + } + + public void dump() { + for (int i = 0; i < objects.length; ++i) { + if (i == head) { + System.out.print("[" + objects[i] + "] "); + } else if (i == tail) { + System.out.print("]" + objects[i] + "[ "); + } else { + System.out.print(objects[i] + " "); + } + } + System.out.println(); + } + + public void clear() { + lock.lock(); + try { + if (head != tail) { + for (int i = head; i < tail; ++i) { + objects[i] = null; + } + head = 0; + tail = 0; + waitCond.signal(); + } + } finally { + lock.unlock(); + } + } + + public void add(E e) { + if (e == null) throw new NullPointerException(); + + lock.lock(); + try { + addElement(e); + waitCond.signal(); + } finally { + lock.unlock(); + } + } + + @edu.umd.cs.findbugs.annotations.SuppressWarnings("WA_AWAIT_NOT_IN_LOOP") + public E poll() { + lock.lock(); + try { + if (isEmpty()) { + waitCond.await(); + return null; + } + + E elem = objects[head]; + long nanos = getNanosTimeout(elem); + nanos = waitCond.awaitNanos(nanos); + return nanos > 0 ? null : removeFirst(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + return null; + } finally { + lock.unlock(); + } + } + + public int size() { + return tail - head; + } + + public boolean isEmpty() { + return (tail - head) == 0; + } + + public void signalAll() { + lock.lock(); + try { + waitCond.signalAll(); + } finally { + lock.unlock(); + } + } + + private void addElement(E elem) { + int size = (tail - head); + if ((objects.length - size) == 0) { + int capacity = size + ((size < 64) ? (size + 2) : (size >> 1)); + E[] newObjects = (E[])new Object[capacity]; + + if (compareTimeouts(objects[tail - 1], elem) <= 0) { + // Append + System.arraycopy(objects, head, newObjects, 0, tail); + tail -= head; + newObjects[tail++] = elem; + } else if (compareTimeouts(objects[head], elem) > 0) { + // Prepend + System.arraycopy(objects, head, newObjects, 1, tail); + newObjects[0] = elem; + tail -= (head - 1); + } else { + // Insert in the middle + int index = upperBound(head, tail - 1, elem); + int newIndex = (index - head); + System.arraycopy(objects, head, newObjects, 0, newIndex); + newObjects[newIndex] = elem; + System.arraycopy(objects, index, newObjects, newIndex + 1, tail - index); + tail -= (head - 1); + } + head = 0; + objects = newObjects; + } else { + if (tail == objects.length) { + // shift down |-----AAAAAAA| + tail -= head; + System.arraycopy(objects, head, objects, 0, tail); + head = 0; + } + + if (tail == head || compareTimeouts(objects[tail - 1], elem) <= 0) { + // Append + objects[tail++] = elem; + } else if (head > 0 && compareTimeouts(objects[head], elem) > 0) { + // Prepend + objects[--head] = elem; + } else { + // Insert in the middle + int index = upperBound(head, tail - 1, elem); + System.arraycopy(objects, index, objects, index + 1, tail - index); + objects[index] = elem; + tail++; + } + } + } + + private E removeFirst() { + E elem = objects[head]; + objects[head] = null; + head = (head + 1) % objects.length; + if (head == 0) tail = 0; + return elem; + } + + private int upperBound(int start, int end, E key) { + while (start < end) { + int mid = (start + end) >>> 1; + E mitem = objects[mid]; + int cmp = compareTimeouts(mitem, key); + if (cmp > 0) { + end = mid; + } else { + start = mid + 1; + } + } + return start; + } + + private int compareTimeouts(final E a, final E b) { + long t1 = getNanosTimeout(a); + long t2 = getNanosTimeout(b); + return (t1 < t2) ? -1 : (t1 > t2) ? 1 : 0; + } + + private long getNanosTimeout(final E obj) { + TimeUnit unit = timeoutRetriever.getTimeUnit(obj); + long timeout = timeoutRetriever.getTimeout(obj); + return unit.toNanos(timeout); + } +} diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java new file mode 100644 index 00000000000..7b9fc69ed1a --- /dev/null +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java @@ -0,0 +1,163 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.procedure2; + +import java.io.IOException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.util.Threads; +import org.apache.hadoop.hbase.procedure2.store.ProcedureStore; +import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +public class ProcedureTestingUtility { + private static final Log LOG = LogFactory.getLog(ProcedureTestingUtility.class); + + private ProcedureTestingUtility() { + } + + public static ProcedureStore createStore(final Configuration conf, final FileSystem fs, + final Path baseDir) throws IOException { + return createWalStore(conf, fs, baseDir); + } + + public static WALProcedureStore createWalStore(final Configuration conf, final FileSystem fs, + final Path logDir) throws IOException { + return new WALProcedureStore(conf, fs, logDir, new WALProcedureStore.LeaseRecovery() { + @Override + public void recoverFileLease(FileSystem fs, Path path) throws IOException { + // no-op + } + }); + } + + public static void restart(ProcedureExecutor procExecutor) + throws Exception { + restart(procExecutor, null); + } + + public static void restart(ProcedureExecutor procExecutor, + Runnable beforeStartAction) throws Exception { + ProcedureStore procStore = procExecutor.getStore(); + int storeThreads = procExecutor.getNumThreads(); + int execThreads = procExecutor.getNumThreads(); + // stop + procExecutor.stop(); + procExecutor.join(); + procStore.stop(false); + // nothing running... + if (beforeStartAction != null) { + beforeStartAction.run(); + } + // re-start + procStore.start(storeThreads); + procExecutor.start(execThreads); + } + + public static void setKillBeforeStoreUpdate(ProcedureExecutor procExecutor, + boolean value) { + if (procExecutor.testing == null) { + procExecutor.testing = new ProcedureExecutor.Testing(); + } + procExecutor.testing.killBeforeStoreUpdate = value; + LOG.warn("Set Kill before store update to: " + procExecutor.testing.killBeforeStoreUpdate); + } + + public static void setToggleKillBeforeStoreUpdate(ProcedureExecutor procExecutor, + boolean value) { + if (procExecutor.testing == null) { + procExecutor.testing = new ProcedureExecutor.Testing(); + } + procExecutor.testing.toggleKillBeforeStoreUpdate = value; + } + + public static void toggleKillBeforeStoreUpdate(ProcedureExecutor procExecutor) { + if (procExecutor.testing == null) { + procExecutor.testing = new ProcedureExecutor.Testing(); + } + procExecutor.testing.killBeforeStoreUpdate = !procExecutor.testing.killBeforeStoreUpdate; + LOG.warn("Set Kill before store update to: " + procExecutor.testing.killBeforeStoreUpdate); + } + + public static void setKillAndToggleBeforeStoreUpdate(ProcedureExecutor procExecutor, + boolean value) { + ProcedureTestingUtility.setKillBeforeStoreUpdate(procExecutor, value); + ProcedureTestingUtility.setToggleKillBeforeStoreUpdate(procExecutor, value); + } + + public static long submitAndWait(ProcedureExecutor procExecutor, Procedure proc) { + long procId = procExecutor.submitProcedure(proc); + waitProcedure(procExecutor, procId); + return procId; + } + + public static void waitProcedure(ProcedureExecutor procExecutor, long procId) { + while (!procExecutor.isFinished(procId) && procExecutor.isRunning()) { + Threads.sleepWithoutInterrupt(250); + } + } + + public static void waitNoProcedureRunning(ProcedureExecutor procExecutor) { + int stableRuns = 0; + while (stableRuns < 10) { + if (procExecutor.getActiveExecutorCount() > 0 || procExecutor.getRunnableSet().size() > 0) { + stableRuns = 0; + Threads.sleepWithoutInterrupt(100); + } else { + stableRuns++; + Threads.sleepWithoutInterrupt(25); + } + } + } + + public static void assertProcNotYetCompleted(ProcedureExecutor procExecutor, + long procId) { + assertFalse("expected a running proc", procExecutor.isFinished(procId)); + assertEquals(null, procExecutor.getResult(procId)); + } + + public static void assertProcNotFailed(ProcedureExecutor procExecutor, + long procId) { + ProcedureResult result = procExecutor.getResult(procId); + assertTrue("expected procedure result", result != null); + assertProcNotFailed(result); + } + + public static void assertProcNotFailed(final ProcedureResult result) { + Exception exception = result.getException(); + String msg = exception != null ? exception.toString() : "no exception found"; + assertFalse(msg, result.isFailed()); + } + + public static void assertIsAbortException(final ProcedureResult result) { + LOG.info(result.getException()); + assertEquals(true, result.isFailed()); + Throwable cause = result.getException().getCause(); + assertTrue("expected abort exception, got "+ cause, + cause instanceof ProcedureAbortedException); + } +} diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureExecution.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureExecution.java new file mode 100644 index 00000000000..7fe109e7334 --- /dev/null +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureExecution.java @@ -0,0 +1,338 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.procedure2; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.TimeoutException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseCommonTestingUtility; +import org.apache.hadoop.hbase.procedure2.store.ProcedureStore; +import org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureState; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; + +import org.junit.After; +import org.junit.Before; +import org.junit.Assert; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +@Category({MasterTests.class, SmallTests.class}) +public class TestProcedureExecution { + private static final Log LOG = LogFactory.getLog(TestProcedureExecution.class); + + private static final int PROCEDURE_EXECUTOR_SLOTS = 1; + private static final Procedure NULL_PROC = null; + + private ProcedureExecutor procExecutor; + private ProcedureStore procStore; + + private HBaseCommonTestingUtility htu; + private FileSystem fs; + private Path testDir; + private Path logDir; + + @Before + public void setUp() throws IOException { + htu = new HBaseCommonTestingUtility(); + testDir = htu.getDataTestDir(); + fs = testDir.getFileSystem(htu.getConfiguration()); + assertTrue(testDir.depth() > 1); + + logDir = new Path(testDir, "proc-logs"); + procStore = ProcedureTestingUtility.createWalStore(htu.getConfiguration(), fs, logDir); + procExecutor = new ProcedureExecutor(htu.getConfiguration(), null, procStore); + procStore.start(PROCEDURE_EXECUTOR_SLOTS); + procExecutor.start(PROCEDURE_EXECUTOR_SLOTS); + } + + @After + public void tearDown() throws IOException { + procExecutor.stop(); + procStore.stop(false); + fs.delete(logDir, true); + } + + private static class TestProcedureException extends Exception { + public TestProcedureException(String msg) { super(msg); } + } + + public static class TestSequentialProcedure extends SequentialProcedure { + private final Procedure[] subProcs; + private final List state; + private final Exception failure; + private final String name; + + public TestSequentialProcedure() { + throw new UnsupportedOperationException("recovery should not be triggered here"); + } + + public TestSequentialProcedure(String name, List state, Procedure... subProcs) { + this.state = state; + this.subProcs = subProcs; + this.name = name; + this.failure = null; + } + + public TestSequentialProcedure(String name, List state, Exception failure) { + this.state = state; + this.subProcs = null; + this.name = name; + this.failure = failure; + } + + @Override + protected Procedure[] execute(Void env) { + state.add(name + "-execute"); + if (failure != null) { + setFailure(new RemoteProcedureException(name + "-failure", failure)); + return null; + } + return subProcs; + } + + @Override + protected void rollback(Void env) { + state.add(name + "-rollback"); + } + + @Override + protected boolean abort(Void env) { + state.add(name + "-abort"); + return true; + } + } + + @Test(timeout=30000) + public void testBadSubprocList() { + List state = new ArrayList(); + Procedure subProc2 = new TestSequentialProcedure("subProc2", state); + Procedure subProc1 = new TestSequentialProcedure("subProc1", state, subProc2, NULL_PROC); + Procedure rootProc = new TestSequentialProcedure("rootProc", state, subProc1); + long rootId = ProcedureTestingUtility.submitAndWait(procExecutor, rootProc); + + // subProc1 has a "null" subprocedure which is catched as InvalidArgument + // failed state with 2 execute and 2 rollback + LOG.info(state); + ProcedureResult result = procExecutor.getResult(rootId); + LOG.info(result.getException()); + assertTrue(state.toString(), result.isFailed()); + assertTrue(result.getException().toString(), + result.getException().getCause() instanceof IllegalArgumentException); + + assertEquals(state.toString(), 4, state.size()); + assertEquals("rootProc-execute", state.get(0)); + assertEquals("subProc1-execute", state.get(1)); + assertEquals("subProc1-rollback", state.get(2)); + assertEquals("rootProc-rollback", state.get(3)); + } + + @Test(timeout=30000) + public void testSingleSequentialProc() { + List state = new ArrayList(); + Procedure subProc2 = new TestSequentialProcedure("subProc2", state); + Procedure subProc1 = new TestSequentialProcedure("subProc1", state, subProc2); + Procedure rootProc = new TestSequentialProcedure("rootProc", state, subProc1); + long rootId = ProcedureTestingUtility.submitAndWait(procExecutor, rootProc); + + // successful state, with 3 execute + LOG.info(state); + ProcedureResult result = procExecutor.getResult(rootId); + ProcedureTestingUtility.assertProcNotFailed(result); + assertEquals(state.toString(), 3, state.size()); + } + + @Test(timeout=30000) + public void testSingleSequentialProcRollback() { + List state = new ArrayList(); + Procedure subProc2 = new TestSequentialProcedure("subProc2", state, + new TestProcedureException("fail test")); + Procedure subProc1 = new TestSequentialProcedure("subProc1", state, subProc2); + Procedure rootProc = new TestSequentialProcedure("rootProc", state, subProc1); + long rootId = ProcedureTestingUtility.submitAndWait(procExecutor, rootProc); + + // the 3rd proc fail, rollback after 2 successful execution + LOG.info(state); + ProcedureResult result = procExecutor.getResult(rootId); + LOG.info(result.getException()); + assertTrue(state.toString(), result.isFailed()); + assertTrue(result.getException().toString(), + result.getException().getCause() instanceof TestProcedureException); + + assertEquals(state.toString(), 6, state.size()); + assertEquals("rootProc-execute", state.get(0)); + assertEquals("subProc1-execute", state.get(1)); + assertEquals("subProc2-execute", state.get(2)); + assertEquals("subProc2-rollback", state.get(3)); + assertEquals("subProc1-rollback", state.get(4)); + assertEquals("rootProc-rollback", state.get(5)); + } + + public static class TestFaultyRollback extends SequentialProcedure { + private int retries = 0; + + public TestFaultyRollback() { } + + @Override + protected Procedure[] execute(Void env) { + setFailure("faulty-rollback-test", new TestProcedureException("test faulty rollback")); + return null; + } + + @Override + protected void rollback(Void env) throws IOException { + if (++retries < 3) { + LOG.info("inject rollback failure " + retries); + throw new IOException("injected failure number " + retries); + } + LOG.info("execute non faulty rollback step retries=" + retries); + } + + @Override + protected boolean abort(Void env) { return false; } + } + + @Test(timeout=30000) + public void testRollbackRetriableFailure() { + long procId = ProcedureTestingUtility.submitAndWait(procExecutor, new TestFaultyRollback()); + + ProcedureResult result = procExecutor.getResult(procId); + LOG.info(result.getException()); + assertTrue("expected a failure", result.isFailed()); + assertTrue(result.getException().toString(), + result.getException().getCause() instanceof TestProcedureException); + } + + public static class TestWaitingProcedure extends SequentialProcedure { + private final List state; + private final boolean hasChild; + private final String name; + + public TestWaitingProcedure() { + throw new UnsupportedOperationException("recovery should not be triggered here"); + } + + public TestWaitingProcedure(String name, List state, boolean hasChild) { + this.hasChild = hasChild; + this.state = state; + this.name = name; + } + + @Override + protected Procedure[] execute(Void env) { + state.add(name + "-execute"); + setState(ProcedureState.WAITING_TIMEOUT); + return hasChild ? new Procedure[] { new TestWaitChild(name, state) } : null; + } + + @Override + protected void rollback(Void env) { + state.add(name + "-rollback"); + } + + @Override + protected boolean abort(Void env) { + state.add(name + "-abort"); + return true; + } + + public static class TestWaitChild extends SequentialProcedure { + private final List state; + private final String name; + + public TestWaitChild() { + throw new UnsupportedOperationException("recovery should not be triggered here"); + } + + public TestWaitChild(String name, List state) { + this.name = name; + this.state = state; + } + + @Override + protected Procedure[] execute(Void env) { + state.add(name + "-child-execute"); + return null; + } + + @Override + protected void rollback(Void env) { + state.add(name + "-child-rollback"); + } + + @Override + protected boolean abort(Void env) { + state.add(name + "-child-abort"); + return true; + } + } + } + + @Test(timeout=30000) + public void testAbortTimeout() { + final int PROC_TIMEOUT_MSEC = 2500; + List state = new ArrayList(); + Procedure proc = new TestWaitingProcedure("wproc", state, false); + proc.setTimeout(PROC_TIMEOUT_MSEC); + long startTime = EnvironmentEdgeManager.currentTime(); + long rootId = ProcedureTestingUtility.submitAndWait(procExecutor, proc); + long execTime = EnvironmentEdgeManager.currentTime() - startTime; + LOG.info(state); + assertTrue("we didn't wait enough execTime=" + execTime, execTime >= PROC_TIMEOUT_MSEC); + ProcedureResult result = procExecutor.getResult(rootId); + LOG.info(result.getException()); + assertTrue(state.toString(), result.isFailed()); + assertTrue(result.getException().toString(), + result.getException().getCause() instanceof TimeoutException); + assertEquals(state.toString(), 2, state.size()); + assertEquals("wproc-execute", state.get(0)); + assertEquals("wproc-rollback", state.get(1)); + } + + @Test(timeout=30000) + public void testAbortTimeoutWithChildren() { + List state = new ArrayList(); + Procedure proc = new TestWaitingProcedure("wproc", state, true); + proc.setTimeout(2500); + long rootId = ProcedureTestingUtility.submitAndWait(procExecutor, proc); + LOG.info(state); + ProcedureResult result = procExecutor.getResult(rootId); + LOG.info(result.getException()); + assertTrue(state.toString(), result.isFailed()); + assertTrue(result.getException().toString(), + result.getException().getCause() instanceof TimeoutException); + assertEquals(state.toString(), 4, state.size()); + assertEquals("wproc-execute", state.get(0)); + assertEquals("wproc-child-execute", state.get(1)); + assertEquals("wproc-child-rollback", state.get(2)); + assertEquals("wproc-rollback", state.get(3)); + } +} \ No newline at end of file diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureFairRunQueues.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureFairRunQueues.java new file mode 100644 index 00000000000..e36a295baf5 --- /dev/null +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureFairRunQueues.java @@ -0,0 +1,155 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.procedure2; + +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.testclassification.MasterTests; + +import org.junit.Assert; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import static org.junit.Assert.assertEquals; + +@Category({MasterTests.class, SmallTests.class}) +public class TestProcedureFairRunQueues { + private static class TestRunQueue implements ProcedureFairRunQueues.FairObject { + private final int priority; + private final String name; + + private boolean available = true; + + public TestRunQueue(String name, int priority) { + this.name = name; + this.priority = priority; + } + + @Override + public String toString() { + return name; + } + + private void setAvailable(boolean available) { + this.available = available; + } + + @Override + public boolean isAvailable() { + return available; + } + + @Override + public int getPriority() { + return priority; + } + } + + @Test + public void testEmptyFairQueues() throws Exception { + ProcedureFairRunQueues fairq + = new ProcedureFairRunQueues(1); + for (int i = 0; i < 3; ++i) { + assertEquals(null, fairq.poll()); + } + } + + @Test + public void testFairQueues() throws Exception { + ProcedureFairRunQueues fairq + = new ProcedureFairRunQueues(1); + TestRunQueue a = fairq.add("A", new TestRunQueue("A", 1)); + TestRunQueue b = fairq.add("B", new TestRunQueue("B", 1)); + TestRunQueue m = fairq.add("M", new TestRunQueue("M", 2)); + + for (int i = 0; i < 3; ++i) { + assertEquals(a, fairq.poll()); + assertEquals(b, fairq.poll()); + assertEquals(m, fairq.poll()); + assertEquals(m, fairq.poll()); + } + } + + @Test + public void testFairQueuesNotAvailable() throws Exception { + ProcedureFairRunQueues fairq + = new ProcedureFairRunQueues(1); + TestRunQueue a = fairq.add("A", new TestRunQueue("A", 1)); + TestRunQueue b = fairq.add("B", new TestRunQueue("B", 1)); + TestRunQueue m = fairq.add("M", new TestRunQueue("M", 2)); + + // m is not available + m.setAvailable(false); + for (int i = 0; i < 3; ++i) { + assertEquals(a, fairq.poll()); + assertEquals(b, fairq.poll()); + } + + // m is available + m.setAvailable(true); + for (int i = 0; i < 3; ++i) { + assertEquals(m, fairq.poll()); + assertEquals(m, fairq.poll()); + assertEquals(a, fairq.poll()); + assertEquals(b, fairq.poll()); + } + + // b is not available + b.setAvailable(false); + for (int i = 0; i < 3; ++i) { + assertEquals(m, fairq.poll()); + assertEquals(m, fairq.poll()); + assertEquals(a, fairq.poll()); + } + + assertEquals(m, fairq.poll()); + m.setAvailable(false); + // m should be fetched next, but is no longer available + assertEquals(a, fairq.poll()); + assertEquals(a, fairq.poll()); + b.setAvailable(true); + for (int i = 0; i < 3; ++i) { + assertEquals(b, fairq.poll()); + assertEquals(a, fairq.poll()); + } + } + + @Test + public void testFairQueuesDelete() throws Exception { + ProcedureFairRunQueues fairq + = new ProcedureFairRunQueues(1); + TestRunQueue a = fairq.add("A", new TestRunQueue("A", 1)); + TestRunQueue b = fairq.add("B", new TestRunQueue("B", 1)); + TestRunQueue m = fairq.add("M", new TestRunQueue("M", 2)); + + // Fetch A and then remove it + assertEquals(a, fairq.poll()); + assertEquals(a, fairq.remove("A")); + + // Fetch B and then remove it + assertEquals(b, fairq.poll()); + assertEquals(b, fairq.remove("B")); + + // Fetch M and then remove it + assertEquals(m, fairq.poll()); + assertEquals(m, fairq.remove("M")); + + // nothing left + assertEquals(null, fairq.poll()); + } +} \ No newline at end of file diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.java new file mode 100644 index 00000000000..0b7395b2bef --- /dev/null +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.java @@ -0,0 +1,488 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.procedure2; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.concurrent.atomic.AtomicBoolean; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseCommonTestingUtility; +import org.apache.hadoop.hbase.procedure2.store.ProcedureStore; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.util.Threads; + +import org.junit.After; +import org.junit.Before; +import org.junit.Assert; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +@Category({MasterTests.class, SmallTests.class}) +public class TestProcedureRecovery { + private static final Log LOG = LogFactory.getLog(TestProcedureRecovery.class); + + private static final int PROCEDURE_EXECUTOR_SLOTS = 1; + private static final Procedure NULL_PROC = null; + + private static ProcedureExecutor procExecutor; + private static ProcedureStore procStore; + private static int procSleepInterval; + + private HBaseCommonTestingUtility htu; + private FileSystem fs; + private Path testDir; + private Path logDir; + + @Before + public void setUp() throws IOException { + htu = new HBaseCommonTestingUtility(); + testDir = htu.getDataTestDir(); + fs = testDir.getFileSystem(htu.getConfiguration()); + assertTrue(testDir.depth() > 1); + + logDir = new Path(testDir, "proc-logs"); + procStore = ProcedureTestingUtility.createStore(htu.getConfiguration(), fs, logDir); + procExecutor = new ProcedureExecutor(htu.getConfiguration(), null, procStore); + procExecutor.testing = new ProcedureExecutor.Testing(); + procStore.start(PROCEDURE_EXECUTOR_SLOTS); + procExecutor.start(PROCEDURE_EXECUTOR_SLOTS); + procSleepInterval = 0; + } + + @After + public void tearDown() throws IOException { + procExecutor.stop(); + procStore.stop(false); + fs.delete(logDir, true); + } + + private void restart() throws Exception { + dumpLogDirState(); + ProcedureTestingUtility.restart(procExecutor); + dumpLogDirState(); + } + + public static class TestSingleStepProcedure extends SequentialProcedure { + private int step = 0; + + public TestSingleStepProcedure() { } + + @Override + protected Procedure[] execute(Void env) { + LOG.debug("execute procedure " + this + " step=" + step); + step++; + setResult(Bytes.toBytes(step)); + return null; + } + + @Override + protected void rollback(Void env) { } + + @Override + protected boolean abort(Void env) { return true; } + } + + public static class BaseTestStepProcedure extends SequentialProcedure { + private AtomicBoolean abort = new AtomicBoolean(false); + private int step = 0; + + @Override + protected Procedure[] execute(Void env) { + LOG.debug("execute procedure " + this + " step=" + step); + ProcedureTestingUtility.toggleKillBeforeStoreUpdate(procExecutor); + step++; + Threads.sleepWithoutInterrupt(procSleepInterval); + if (isAborted()) { + setFailure(new RemoteProcedureException(getClass().getName(), + new ProcedureAbortedException( + "got an abort at " + getClass().getName() + " step=" + step))); + return null; + } + return null; + } + + @Override + protected void rollback(Void env) { + LOG.debug("rollback procedure " + this + " step=" + step); + ProcedureTestingUtility.toggleKillBeforeStoreUpdate(procExecutor); + step++; + } + + @Override + protected boolean abort(Void env) { + abort.set(true); + return true; + } + + private boolean isAborted() { + boolean aborted = abort.get(); + BaseTestStepProcedure proc = this; + while (proc.hasParent() && !aborted) { + proc = (BaseTestStepProcedure)procExecutor.getProcedure(proc.getParentProcId()); + aborted = proc.isAborted(); + } + return aborted; + } + } + + public static class TestMultiStepProcedure extends BaseTestStepProcedure { + public TestMultiStepProcedure() { } + + @Override + public Procedure[] execute(Void env) { + super.execute(env); + return isFailed() ? null : new Procedure[] { new Step1Procedure() }; + } + + public static class Step1Procedure extends BaseTestStepProcedure { + public Step1Procedure() { } + + @Override + protected Procedure[] execute(Void env) { + super.execute(env); + return isFailed() ? null : new Procedure[] { new Step2Procedure() }; + } + } + + public static class Step2Procedure extends BaseTestStepProcedure { + public Step2Procedure() { } + } + } + + @Test + public void testNoopLoad() throws Exception { + restart(); + } + + @Test(timeout=30000) + public void testSingleStepProcRecovery() throws Exception { + Procedure proc = new TestSingleStepProcedure(); + procExecutor.testing.killBeforeStoreUpdate = true; + long procId = ProcedureTestingUtility.submitAndWait(procExecutor, proc); + assertFalse(procExecutor.isRunning()); + procExecutor.testing.killBeforeStoreUpdate = false; + + // Restart and verify that the procedures restart + long restartTs = EnvironmentEdgeManager.currentTime(); + restart(); + waitProcedure(procId); + ProcedureResult result = procExecutor.getResult(procId); + assertTrue(result.getLastUpdate() > restartTs); + ProcedureTestingUtility.assertProcNotFailed(result); + assertEquals(1, Bytes.toInt(result.getResult())); + long resultTs = result.getLastUpdate(); + + // Verify that after another restart the result is still there + restart(); + result = procExecutor.getResult(procId); + ProcedureTestingUtility.assertProcNotFailed(result); + assertEquals(resultTs, result.getLastUpdate()); + assertEquals(1, Bytes.toInt(result.getResult())); + } + + @Test(timeout=30000) + public void testMultiStepProcRecovery() throws Exception { + // Step 0 - kill + Procedure proc = new TestMultiStepProcedure(); + long procId = ProcedureTestingUtility.submitAndWait(procExecutor, proc); + assertFalse(procExecutor.isRunning()); + + // Step 0 exec && Step 1 - kill + restart(); + waitProcedure(procId); + ProcedureTestingUtility.assertProcNotYetCompleted(procExecutor, procId); + assertFalse(procExecutor.isRunning()); + + // Step 1 exec && step 2 - kill + restart(); + waitProcedure(procId); + ProcedureTestingUtility.assertProcNotYetCompleted(procExecutor, procId); + assertFalse(procExecutor.isRunning()); + + // Step 2 exec + restart(); + waitProcedure(procId); + assertTrue(procExecutor.isRunning()); + + // The procedure is completed + ProcedureResult result = procExecutor.getResult(procId); + ProcedureTestingUtility.assertProcNotFailed(result); + } + + @Test(timeout=30000) + public void testMultiStepRollbackRecovery() throws Exception { + // Step 0 - kill + Procedure proc = new TestMultiStepProcedure(); + long procId = ProcedureTestingUtility.submitAndWait(procExecutor, proc); + assertFalse(procExecutor.isRunning()); + + // Step 0 exec && Step 1 - kill + restart(); + waitProcedure(procId); + ProcedureTestingUtility.assertProcNotYetCompleted(procExecutor, procId); + assertFalse(procExecutor.isRunning()); + + // Step 1 exec && step 2 - kill + restart(); + waitProcedure(procId); + ProcedureTestingUtility.assertProcNotYetCompleted(procExecutor, procId); + assertFalse(procExecutor.isRunning()); + + // Step 2 exec - rollback - kill + procSleepInterval = 2500; + restart(); + assertTrue(procExecutor.abort(procId)); + waitProcedure(procId); + assertFalse(procExecutor.isRunning()); + + // rollback - kill + restart(); + waitProcedure(procId); + ProcedureTestingUtility.assertProcNotYetCompleted(procExecutor, procId); + assertFalse(procExecutor.isRunning()); + + // rollback - complete + restart(); + waitProcedure(procId); + ProcedureTestingUtility.assertProcNotYetCompleted(procExecutor, procId); + assertFalse(procExecutor.isRunning()); + + // Restart the executor and get the result + restart(); + waitProcedure(procId); + + // The procedure is completed + ProcedureResult result = procExecutor.getResult(procId); + ProcedureTestingUtility.assertIsAbortException(result); + } + + public static class TestStateMachineProcedure + extends StateMachineProcedure { + enum State { STATE_1, STATE_2, STATE_3, DONE } + + public TestStateMachineProcedure() {} + + private AtomicBoolean aborted = new AtomicBoolean(false); + private int iResult = 0; + + @Override + protected StateMachineProcedure.Flow executeFromState(Void env, State state) { + switch (state) { + case STATE_1: + LOG.info("execute step 1 " + this); + setNextState(State.STATE_2); + iResult += 3; + break; + case STATE_2: + LOG.info("execute step 2 " + this); + setNextState(State.STATE_3); + iResult += 5; + break; + case STATE_3: + LOG.info("execute step 3 " + this); + Threads.sleepWithoutInterrupt(procSleepInterval); + if (aborted.get()) { + LOG.info("aborted step 3 " + this); + setAbortFailure("test", "aborted"); + break; + } + setNextState(State.DONE); + iResult += 7; + setResult(Bytes.toBytes(iResult)); + return Flow.NO_MORE_STATE; + default: + throw new UnsupportedOperationException(); + } + return Flow.HAS_MORE_STATE; + } + + @Override + protected void rollbackState(Void env, final State state) { + switch (state) { + case STATE_1: + LOG.info("rollback step 1 " + this); + break; + case STATE_2: + LOG.info("rollback step 2 " + this); + break; + case STATE_3: + LOG.info("rollback step 3 " + this); + break; + default: + throw new UnsupportedOperationException(); + } + } + + @Override + protected State getState(final int stateId) { + return State.values()[stateId]; + } + + @Override + protected int getStateId(final State state) { + return state.ordinal(); + } + + @Override + protected State getInitialState() { + return State.STATE_1; + } + + @Override + protected boolean abort(Void env) { + aborted.set(true); + return true; + } + + @Override + protected void serializeStateData(final OutputStream stream) throws IOException { + super.serializeStateData(stream); + stream.write(Bytes.toBytes(iResult)); + } + + @Override + protected void deserializeStateData(final InputStream stream) throws IOException { + super.deserializeStateData(stream); + byte[] data = new byte[4]; + stream.read(data); + iResult = Bytes.toInt(data); + } + } + + @Test(timeout=30000) + public void testStateMachineRecovery() throws Exception { + ProcedureTestingUtility.setToggleKillBeforeStoreUpdate(procExecutor, true); + ProcedureTestingUtility.setKillBeforeStoreUpdate(procExecutor, true); + + // Step 1 - kill + Procedure proc = new TestStateMachineProcedure(); + long procId = ProcedureTestingUtility.submitAndWait(procExecutor, proc); + assertFalse(procExecutor.isRunning()); + + // Step 1 exec && Step 2 - kill + restart(); + waitProcedure(procId); + ProcedureTestingUtility.assertProcNotYetCompleted(procExecutor, procId); + assertFalse(procExecutor.isRunning()); + + // Step 2 exec && step 3 - kill + restart(); + waitProcedure(procId); + ProcedureTestingUtility.assertProcNotYetCompleted(procExecutor, procId); + assertFalse(procExecutor.isRunning()); + + // Step 3 exec + restart(); + waitProcedure(procId); + assertTrue(procExecutor.isRunning()); + + // The procedure is completed + ProcedureResult result = procExecutor.getResult(procId); + ProcedureTestingUtility.assertProcNotFailed(result); + assertEquals(15, Bytes.toInt(result.getResult())); + } + + @Test(timeout=30000) + public void testStateMachineRollbackRecovery() throws Exception { + ProcedureTestingUtility.setToggleKillBeforeStoreUpdate(procExecutor, true); + ProcedureTestingUtility.setKillBeforeStoreUpdate(procExecutor, true); + + // Step 1 - kill + Procedure proc = new TestStateMachineProcedure(); + long procId = ProcedureTestingUtility.submitAndWait(procExecutor, proc); + ProcedureTestingUtility.assertProcNotYetCompleted(procExecutor, procId); + assertFalse(procExecutor.isRunning()); + + // Step 1 exec && Step 2 - kill + restart(); + waitProcedure(procId); + ProcedureTestingUtility.assertProcNotYetCompleted(procExecutor, procId); + assertFalse(procExecutor.isRunning()); + + // Step 2 exec && step 3 - kill + restart(); + waitProcedure(procId); + ProcedureTestingUtility.assertProcNotYetCompleted(procExecutor, procId); + assertFalse(procExecutor.isRunning()); + + // Step 3 exec - rollback step 3 - kill + procSleepInterval = 2500; + restart(); + assertTrue(procExecutor.abort(procId)); + waitProcedure(procId); + ProcedureTestingUtility.assertProcNotYetCompleted(procExecutor, procId); + assertFalse(procExecutor.isRunning()); + + // Rollback step 3 - rollback step 2 - kill + restart(); + waitProcedure(procId); + assertFalse(procExecutor.isRunning()); + ProcedureTestingUtility.assertProcNotYetCompleted(procExecutor, procId); + + // Rollback step 2 - step 1 - kill + restart(); + waitProcedure(procId); + assertFalse(procExecutor.isRunning()); + ProcedureTestingUtility.assertProcNotYetCompleted(procExecutor, procId); + + // Rollback step 1 - complete + restart(); + waitProcedure(procId); + assertTrue(procExecutor.isRunning()); + + // The procedure is completed + ProcedureResult result = procExecutor.getResult(procId); + ProcedureTestingUtility.assertIsAbortException(result); + } + + private void waitProcedure(final long procId) { + ProcedureTestingUtility.waitProcedure(procExecutor, procId); + dumpLogDirState(); + } + + private void dumpLogDirState() { + try { + FileStatus[] files = fs.listStatus(logDir); + if (files != null && files.length > 0) { + for (FileStatus file: files) { + assertTrue(file.toString(), file.isFile()); + LOG.debug("log file " + file.getPath() + " size=" + file.getLen()); + } + } else { + LOG.debug("no files under: " + logDir); + } + } catch (IOException e) { + LOG.warn("Unable to dump " + logDir, e); + } + } +} \ No newline at end of file diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureReplayOrder.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureReplayOrder.java new file mode 100644 index 00000000000..88645ed7ce6 --- /dev/null +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureReplayOrder.java @@ -0,0 +1,226 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.procedure2; + +import java.io.IOException; +import java.util.ArrayList; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseCommonTestingUtility; +import org.apache.hadoop.hbase.procedure2.store.ProcedureStore; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.testclassification.MasterTests; + +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Ignore; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +@Category({MasterTests.class, LargeTests.class}) +public class TestProcedureReplayOrder { + private static final Log LOG = LogFactory.getLog(TestProcedureReplayOrder.class); + + private static final Procedure NULL_PROC = null; + + private ProcedureExecutor procExecutor; + private TestProcedureEnv procEnv; + private ProcedureStore procStore; + + private HBaseCommonTestingUtility htu; + private FileSystem fs; + private Path testDir; + private Path logDir; + + @Before + public void setUp() throws IOException { + htu = new HBaseCommonTestingUtility(); + htu.getConfiguration().setInt("hbase.procedure.store.wal.sync.wait.msec", 10); + + testDir = htu.getDataTestDir(); + fs = testDir.getFileSystem(htu.getConfiguration()); + assertTrue(testDir.depth() > 1); + + logDir = new Path(testDir, "proc-logs"); + procEnv = new TestProcedureEnv(); + procStore = ProcedureTestingUtility.createWalStore(htu.getConfiguration(), fs, logDir); + procExecutor = new ProcedureExecutor(htu.getConfiguration(), procEnv, procStore); + procStore.start(24); + procExecutor.start(1); + } + + @After + public void tearDown() throws IOException { + procExecutor.stop(); + procStore.stop(false); + fs.delete(logDir, true); + } + + @Test(timeout=90000) + public void testSingleStepReplyOrder() throws Exception { + // avoid the procedure to be runnable + procEnv.setAcquireLock(false); + + // submit the procedures + submitProcedures(16, 25, TestSingleStepProcedure.class); + + // restart the executor and allow the procedures to run + ProcedureTestingUtility.restart(procExecutor, new Runnable() { + @Override + public void run() { + procEnv.setAcquireLock(true); + } + }); + + // wait the execution of all the procedures and + // assert that the execution order was sorted by procId + ProcedureTestingUtility.waitNoProcedureRunning(procExecutor); + procEnv.assertSortedExecList(); + + // TODO: FIXME: This should be revisited + } + + @Ignore + @Test(timeout=90000) + public void testMultiStepReplyOrder() throws Exception { + // avoid the procedure to be runnable + procEnv.setAcquireLock(false); + + // submit the procedures + submitProcedures(16, 10, TestTwoStepProcedure.class); + + // restart the executor and allow the procedures to run + ProcedureTestingUtility.restart(procExecutor, new Runnable() { + @Override + public void run() { + procEnv.setAcquireLock(true); + } + }); + + fail("TODO: FIXME: NOT IMPLEMENT REPLAY ORDER"); + } + + private void submitProcedures(final int nthreads, final int nprocPerThread, + final Class procClazz) throws Exception { + Thread[] submitThreads = new Thread[nthreads]; + for (int i = 0; i < submitThreads.length; ++i) { + submitThreads[i] = new Thread() { + @Override + public void run() { + for (int i = 0; i < nprocPerThread; ++i) { + try { + procExecutor.submitProcedure((Procedure)procClazz.newInstance()); + } catch (InstantiationException|IllegalAccessException e) { + LOG.error("unable to instantiate the procedure", e); + fail("failure during the proc.newInstance(): " + e.getMessage()); + } + } + } + }; + } + + for (int i = 0; i < submitThreads.length; ++i) { + submitThreads[i].start(); + } + + for (int i = 0; i < submitThreads.length; ++i) { + submitThreads[i].join(); + } + } + + private static class TestProcedureEnv { + private ArrayList execList = new ArrayList(); + private boolean acquireLock = true; + + public void setAcquireLock(boolean acquireLock) { + this.acquireLock = acquireLock; + } + + public boolean canAcquireLock() { + return acquireLock; + } + + public void addToExecList(final Procedure proc) { + execList.add(proc.getProcId()); + } + + public ArrayList getExecList() { + return execList; + } + + public void assertSortedExecList() { + LOG.debug("EXEC LIST: " + execList); + for (int i = 1; i < execList.size(); ++i) { + assertTrue("exec list not sorted: " + execList.get(i-1) + " >= " + execList.get(i), + execList.get(i-1) < execList.get(i)); + } + } + } + + public static class TestSingleStepProcedure extends SequentialProcedure { + public TestSingleStepProcedure() { } + + @Override + protected Procedure[] execute(TestProcedureEnv env) { + LOG.debug("execute procedure " + this); + env.addToExecList(this); + return null; + } + + protected boolean acquireLock(final TestProcedureEnv env) { + return env.canAcquireLock(); + } + + @Override + protected void rollback(TestProcedureEnv env) { } + + @Override + protected boolean abort(TestProcedureEnv env) { return true; } + } + + public static class TestTwoStepProcedure extends SequentialProcedure { + public TestTwoStepProcedure() { } + + @Override + protected Procedure[] execute(TestProcedureEnv env) { + LOG.debug("execute procedure " + this); + env.addToExecList(this); + return new Procedure[] { new TestSingleStepProcedure() }; + } + + protected boolean acquireLock(final TestProcedureEnv env) { + return true; + } + + @Override + protected void rollback(TestProcedureEnv env) { } + + @Override + protected boolean abort(TestProcedureEnv env) { return true; } + } +} \ No newline at end of file diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/TestProcedureStoreTracker.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/TestProcedureStoreTracker.java new file mode 100644 index 00000000000..be759dcc6e3 --- /dev/null +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/TestProcedureStoreTracker.java @@ -0,0 +1,193 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.procedure2.store; + +import java.io.InputStream; +import java.io.OutputStream; +import java.util.Random; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.procedure2.Procedure; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.testclassification.MasterTests; + +import org.junit.Assert; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +@Category({MasterTests.class, SmallTests.class}) +public class TestProcedureStoreTracker { + private static final Log LOG = LogFactory.getLog(TestProcedureStoreTracker.class); + + static class TestProcedure extends Procedure { + public TestProcedure(long procId) { + setProcId(procId); + } + + @Override + protected Procedure[] execute(Void env) { return null; } + + @Override + protected void rollback(Void env) { /* no-op */ } + + @Override + protected boolean abort(Void env) { return false; } + + @Override + protected void serializeStateData(final OutputStream stream) { /* no-op */ } + + @Override + protected void deserializeStateData(final InputStream stream) { /* no-op */ } + } + + @Test + public void testSeqInsertAndDelete() { + ProcedureStoreTracker tracker = new ProcedureStoreTracker(); + assertTrue(tracker.isEmpty()); + + final int MIN_PROC = 1; + final int MAX_PROC = 1 << 10; + + // sequential insert + for (int i = MIN_PROC; i < MAX_PROC; ++i) { + tracker.insert(i); + + // All the proc that we inserted should not be deleted + for (int j = MIN_PROC; j <= i; ++j) { + assertEquals(ProcedureStoreTracker.DeleteState.NO, tracker.isDeleted(j)); + } + // All the proc that are not yet inserted should be result as deleted + for (int j = i + 1; j < MAX_PROC; ++j) { + assertTrue(tracker.isDeleted(j) != ProcedureStoreTracker.DeleteState.NO); + } + } + + // sequential delete + for (int i = MIN_PROC; i < MAX_PROC; ++i) { + tracker.delete(i); + + // All the proc that we deleted should be deleted + for (int j = MIN_PROC; j <= i; ++j) { + assertEquals(ProcedureStoreTracker.DeleteState.YES, tracker.isDeleted(j)); + } + // All the proc that are not yet deleted should be result as not deleted + for (int j = i + 1; j < MAX_PROC; ++j) { + assertEquals(ProcedureStoreTracker.DeleteState.NO, tracker.isDeleted(j)); + } + } + assertTrue(tracker.isEmpty()); + } + + @Test + public void testPartialTracker() { + ProcedureStoreTracker tracker = new ProcedureStoreTracker(); + tracker.setPartialFlag(true); + + // nothing in the tracker, the state is unknown + assertTrue(tracker.isEmpty()); + assertEquals(ProcedureStoreTracker.DeleteState.MAYBE, tracker.isDeleted(1)); + assertEquals(ProcedureStoreTracker.DeleteState.MAYBE, tracker.isDeleted(579)); + + // Mark 1 as deleted, now that is a known state + tracker.setDeleted(1, true); + tracker.dump(); + assertEquals(ProcedureStoreTracker.DeleteState.YES, tracker.isDeleted(1)); + assertEquals(ProcedureStoreTracker.DeleteState.MAYBE, tracker.isDeleted(2)); + assertEquals(ProcedureStoreTracker.DeleteState.MAYBE, tracker.isDeleted(579)); + + // Mark 579 as non-deleted, now that is a known state + tracker.setDeleted(579, false); + assertEquals(ProcedureStoreTracker.DeleteState.YES, tracker.isDeleted(1)); + assertEquals(ProcedureStoreTracker.DeleteState.MAYBE, tracker.isDeleted(2)); + assertEquals(ProcedureStoreTracker.DeleteState.NO, tracker.isDeleted(579)); + assertEquals(ProcedureStoreTracker.DeleteState.MAYBE, tracker.isDeleted(577)); + assertEquals(ProcedureStoreTracker.DeleteState.MAYBE, tracker.isDeleted(580)); + } + + @Test + public void testBasicCRUD() { + ProcedureStoreTracker tracker = new ProcedureStoreTracker(); + assertTrue(tracker.isEmpty()); + + Procedure[] procs = new TestProcedure[] { + new TestProcedure(1), new TestProcedure(2), new TestProcedure(3), + new TestProcedure(4), new TestProcedure(5), new TestProcedure(6), + }; + + tracker.insert(procs[0], null); + tracker.insert(procs[1], new Procedure[] { procs[2], procs[3], procs[4] }); + assertFalse(tracker.isEmpty()); + assertTrue(tracker.isUpdated()); + + tracker.resetUpdates(); + assertFalse(tracker.isUpdated()); + + for (int i = 0; i < 4; ++i) { + tracker.update(procs[i]); + assertFalse(tracker.isEmpty()); + assertFalse(tracker.isUpdated()); + } + + tracker.update(procs[4]); + assertFalse(tracker.isEmpty()); + assertTrue(tracker.isUpdated()); + + tracker.update(procs[5]); + assertFalse(tracker.isEmpty()); + assertTrue(tracker.isUpdated()); + + for (int i = 0; i < 5; ++i) { + tracker.delete(procs[i].getProcId()); + assertFalse(tracker.isEmpty()); + assertTrue(tracker.isUpdated()); + } + tracker.delete(procs[5].getProcId()); + assertTrue(tracker.isEmpty()); + } + + @Test + public void testRandLoad() { + final int NPROCEDURES = 2500; + final int NRUNS = 5000; + + final ProcedureStoreTracker tracker = new ProcedureStoreTracker(); + + Random rand = new Random(1); + for (int i = 0; i < NRUNS; ++i) { + assertTrue(tracker.isEmpty()); + + int count = 0; + while (count < NPROCEDURES) { + long procId = rand.nextLong(); + if (procId < 1) continue; + + tracker.setDeleted(procId, i % 2 == 0); + count++; + } + + tracker.clear(); + } + } +} diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestWALProcedureStore.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestWALProcedureStore.java new file mode 100644 index 00000000000..344b28b85a9 --- /dev/null +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestWALProcedureStore.java @@ -0,0 +1,267 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.procedure2.store.wal; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.Iterator; +import java.util.HashSet; +import java.util.Set; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseCommonTestingUtility; +import org.apache.hadoop.hbase.procedure2.Procedure; +import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; +import org.apache.hadoop.hbase.procedure2.SequentialProcedure; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.io.IOUtils; + +import org.junit.After; +import org.junit.Before; +import org.junit.Assert; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +@Category({MasterTests.class, SmallTests.class}) +public class TestWALProcedureStore { + private static final Log LOG = LogFactory.getLog(TestWALProcedureStore.class); + + private static final int PROCEDURE_STORE_SLOTS = 1; + private static final Procedure NULL_PROC = null; + + private WALProcedureStore procStore; + + private HBaseCommonTestingUtility htu; + private FileSystem fs; + private Path testDir; + private Path logDir; + + @Before + public void setUp() throws IOException { + htu = new HBaseCommonTestingUtility(); + testDir = htu.getDataTestDir(); + fs = testDir.getFileSystem(htu.getConfiguration()); + assertTrue(testDir.depth() > 1); + + logDir = new Path(testDir, "proc-logs"); + procStore = ProcedureTestingUtility.createWalStore(htu.getConfiguration(), fs, logDir); + procStore.start(PROCEDURE_STORE_SLOTS); + procStore.recoverLease(); + } + + @After + public void tearDown() throws IOException { + procStore.stop(false); + fs.delete(logDir, true); + } + + private Iterator storeRestart() throws Exception { + procStore.stop(false); + procStore.start(PROCEDURE_STORE_SLOTS); + procStore.recoverLease(); + return procStore.load(); + } + + @Test + public void testEmptyLogLoad() throws Exception { + Iterator loader = storeRestart(); + assertEquals(0, countProcedures(loader)); + } + + @Test + public void testLoad() throws Exception { + Set procIds = new HashSet<>(); + + // Insert something in the log + Procedure proc1 = new TestSequentialProcedure(); + procIds.add(proc1.getProcId()); + procStore.insert(proc1, null); + + Procedure proc2 = new TestSequentialProcedure(); + Procedure[] child2 = new Procedure[2]; + child2[0] = new TestSequentialProcedure(); + child2[1] = new TestSequentialProcedure(); + + procIds.add(proc2.getProcId()); + procIds.add(child2[0].getProcId()); + procIds.add(child2[1].getProcId()); + procStore.insert(proc2, child2); + + // Verify that everything is there + verifyProcIdsOnRestart(procIds); + + // Update and delete something + procStore.update(proc1); + procStore.update(child2[1]); + procStore.delete(child2[1].getProcId()); + procIds.remove(child2[1].getProcId()); + + // Verify that everything is there + verifyProcIdsOnRestart(procIds); + + // Remove 4 byte from the trailers + procStore.stop(false); + FileStatus[] logs = fs.listStatus(logDir); + assertEquals(3, logs.length); + for (int i = 0; i < logs.length; ++i) { + corruptLog(logs[i], 4); + } + verifyProcIdsOnRestart(procIds); + } + + @Test + public void testCorruptedTrailer() throws Exception { + // Insert something + for (int i = 0; i < 100; ++i) { + procStore.insert(new TestSequentialProcedure(), null); + } + + // Stop the store + procStore.stop(false); + + // Remove 4 byte from the trailer + FileStatus[] logs = fs.listStatus(logDir); + assertEquals(1, logs.length); + corruptLog(logs[0], 4); + + int count = countProcedures(storeRestart()); + assertEquals(100, count); + } + + @Test + public void testCorruptedEntries() throws Exception { + // Insert something + for (int i = 0; i < 100; ++i) { + procStore.insert(new TestSequentialProcedure(), null); + } + + // Stop the store + procStore.stop(false); + + // Remove some byte from the log + // (enough to cut the trailer and corrupt some entries) + FileStatus[] logs = fs.listStatus(logDir); + assertEquals(1, logs.length); + corruptLog(logs[0], 1823); + + int count = countProcedures(storeRestart()); + assertTrue(procStore.getCorruptedLogs() != null); + assertEquals(1, procStore.getCorruptedLogs().size()); + assertEquals(85, count); + } + + private void corruptLog(final FileStatus logFile, final long dropBytes) + throws IOException { + assertTrue(logFile.getLen() > dropBytes); + LOG.debug("corrupt log " + logFile.getPath() + + " size=" + logFile.getLen() + " drop=" + dropBytes); + Path tmpPath = new Path(testDir, "corrupted.log"); + InputStream in = fs.open(logFile.getPath()); + OutputStream out = fs.create(tmpPath); + IOUtils.copyBytes(in, out, logFile.getLen() - dropBytes, true); + fs.rename(tmpPath, logFile.getPath()); + } + + private void verifyProcIdsOnRestart(final Set procIds) throws Exception { + int count = 0; + Iterator loader = storeRestart(); + while (loader.hasNext()) { + Procedure proc = loader.next(); + LOG.debug("loading procId=" + proc.getProcId()); + assertTrue("procId=" + proc.getProcId() + " unexpected", procIds.contains(proc.getProcId())); + count++; + } + assertEquals(procIds.size(), count); + } + + private void assertIsEmpty(Iterator iterator) { + assertEquals(0, countProcedures(iterator)); + } + + private int countProcedures(Iterator iterator) { + int count = 0; + while (iterator.hasNext()) { + Procedure proc = iterator.next(); + LOG.trace("loading procId=" + proc.getProcId()); + count++; + } + return count; + } + + private void assertEmptyLogDir() { + try { + FileStatus[] status = fs.listStatus(logDir); + assertTrue("expected empty state-log dir", status == null || status.length == 0); + } catch (FileNotFoundException e) { + fail("expected the state-log dir to be present: " + logDir); + } catch (IOException e) { + fail("got en exception on state-log dir list: " + e.getMessage()); + } + } + + public static class TestSequentialProcedure extends SequentialProcedure { + private static long seqid = 0; + + public TestSequentialProcedure() { + setProcId(++seqid); + } + + @Override + protected Procedure[] execute(Void env) { return null; } + + @Override + protected void rollback(Void env) { } + + @Override + protected boolean abort(Void env) { return false; } + + @Override + protected void serializeStateData(final OutputStream stream) throws IOException { + long procId = getProcId(); + if (procId % 2 == 0) { + stream.write(Bytes.toBytes(procId)); + } + } + + @Override + protected void deserializeStateData(InputStream stream) throws IOException { + long procId = getProcId(); + if (procId % 2 == 0) { + byte[] bProcId = new byte[8]; + assertEquals(8, stream.read(bProcId)); + assertEquals(procId, Bytes.toLong(bProcId)); + } else { + assertEquals(0, stream.available()); + } + } + } +} \ No newline at end of file diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/util/TestTimeoutBlockingQueue.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/util/TestTimeoutBlockingQueue.java new file mode 100644 index 00000000000..aff536a6fde --- /dev/null +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/util/TestTimeoutBlockingQueue.java @@ -0,0 +1,137 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.procedure2.util; + + +import java.util.Arrays; +import java.util.concurrent.TimeUnit; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.procedure2.util.TimeoutBlockingQueue.TimeoutRetriever; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.testclassification.MasterTests; + +import org.junit.Assert; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +@Category({MasterTests.class, SmallTests.class}) +public class TestTimeoutBlockingQueue { + private static final Log LOG = LogFactory.getLog(TestTimeoutBlockingQueue.class); + + static class TestObject { + private long timeout; + private int seqId; + + public TestObject(int seqId, long timeout) { + this.timeout = timeout; + this.seqId = seqId; + } + + public long getTimeout() { + return timeout; + } + + public String toString() { + return String.format("(%03d, %03d)", seqId, timeout); + } + } + + static class TestObjectTimeoutRetriever implements TimeoutRetriever { + @Override + public long getTimeout(TestObject obj) { + return obj.getTimeout(); + } + + @Override + public TimeUnit getTimeUnit(TestObject obj) { + return TimeUnit.MILLISECONDS; + } + } + + @Test + public void testOrder() { + TimeoutBlockingQueue queue = + new TimeoutBlockingQueue(8, new TestObjectTimeoutRetriever()); + + long[] timeouts = new long[] {500, 200, 700, 300, 600, 600, 200, 800, 500}; + + for (int i = 0; i < timeouts.length; ++i) { + for (int j = 0; j <= i; ++j) { + queue.add(new TestObject(j, timeouts[j])); + queue.dump(); + } + + long prev = 0; + for (int j = 0; j <= i; ++j) { + TestObject obj = queue.poll(); + assertTrue(obj.getTimeout() >= prev); + prev = obj.getTimeout(); + queue.dump(); + } + } + } + + @Test + public void testTimeoutBlockingQueue() { + TimeoutBlockingQueue queue; + + int[][] testArray = new int[][] { + {200, 400, 600}, // append + {200, 400, 100}, // prepend + {200, 400, 300}, // insert + }; + + for (int i = 0; i < testArray.length; ++i) { + int[] sortedArray = Arrays.copyOf(testArray[i], testArray[i].length); + Arrays.sort(sortedArray); + + // test with head == 0 + queue = new TimeoutBlockingQueue(2, new TestObjectTimeoutRetriever()); + for (int j = 0; j < testArray[i].length; ++j) { + queue.add(new TestObject(j, testArray[i][j])); + queue.dump(); + } + + for (int j = 0; !queue.isEmpty(); ++j) { + assertEquals(sortedArray[j], queue.poll().getTimeout()); + } + + queue = new TimeoutBlockingQueue(2, new TestObjectTimeoutRetriever()); + queue.add(new TestObject(0, 50)); + assertEquals(50, queue.poll().getTimeout()); + + // test with head > 0 + for (int j = 0; j < testArray[i].length; ++j) { + queue.add(new TestObject(j, testArray[i][j])); + queue.dump(); + } + + for (int j = 0; !queue.isEmpty(); ++j) { + assertEquals(sortedArray[j], queue.poll().getTimeout()); + } + } + } +} diff --git a/hbase-protocol/README.txt b/hbase-protocol/README.txt index b67f0c3ca59..e651411bf64 100644 --- a/hbase-protocol/README.txt +++ b/hbase-protocol/README.txt @@ -5,7 +5,7 @@ and then checked in. The reasoning is that they change infrequently. To regenerate the classes after making definition file changes, ensure first that the protobuf protoc tool is in your $PATH (You may need to download it and build it first; its part of the protobuf package obtainable from here: -http://code.google.com/p/protobuf/downloads/list). +https://github.com/google/protobuf/releases/tag/v2.5.0). HBase uses hadoop-maven-plugins:protoc goal to invoke the protoc command. You can compile the protoc definitions by invoking maven with profile compile-protobuf or diff --git a/hbase-protocol/pom.xml b/hbase-protocol/pom.xml index 7787c52b43f..fb5e0abfbe4 100644 --- a/hbase-protocol/pom.xml +++ b/hbase-protocol/pom.xml @@ -175,7 +175,9 @@ LoadBalancer.proto MapReduce.proto Master.proto + MasterProcedure.proto MultiRowMutation.proto + Procedure.proto Quota.proto RegionServerStatus.proto RowProcessor.proto diff --git a/hbase-protocol/src/main/asciidoc/.gitignore b/hbase-protocol/src/main/asciidoc/.gitignore deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java index 382874289df..a76936d935f 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java @@ -6440,6 +6440,905 @@ public final class AdminProtos { // @@protoc_insertion_point(class_scope:OpenRegionResponse) } + public interface WarmupRegionRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .RegionInfo regionInfo = 1; + /** + * required .RegionInfo regionInfo = 1; + */ + boolean hasRegionInfo(); + /** + * required .RegionInfo regionInfo = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo(); + /** + * required .RegionInfo regionInfo = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder(); + } + /** + * Protobuf type {@code WarmupRegionRequest} + */ + public static final class WarmupRegionRequest extends + com.google.protobuf.GeneratedMessage + implements WarmupRegionRequestOrBuilder { + // Use WarmupRegionRequest.newBuilder() to construct. + private WarmupRegionRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private WarmupRegionRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final WarmupRegionRequest defaultInstance; + public static WarmupRegionRequest getDefaultInstance() { + return defaultInstance; + } + + public WarmupRegionRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private WarmupRegionRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = regionInfo_.toBuilder(); + } + regionInfo_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(regionInfo_); + regionInfo_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_WarmupRegionRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_WarmupRegionRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest.class, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public WarmupRegionRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new WarmupRegionRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .RegionInfo regionInfo = 1; + public static final int REGIONINFO_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo regionInfo_; + /** + * required .RegionInfo regionInfo = 1; + */ + public boolean hasRegionInfo() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .RegionInfo regionInfo = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo() { + return regionInfo_; + } + /** + * required .RegionInfo regionInfo = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder() { + return regionInfo_; + } + + private void initFields() { + regionInfo_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasRegionInfo()) { + memoizedIsInitialized = 0; + return false; + } + if (!getRegionInfo().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, regionInfo_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, regionInfo_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest other = (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest) obj; + + boolean result = true; + result = result && (hasRegionInfo() == other.hasRegionInfo()); + if (hasRegionInfo()) { + result = result && getRegionInfo() + .equals(other.getRegionInfo()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasRegionInfo()) { + hash = (37 * hash) + REGIONINFO_FIELD_NUMBER; + hash = (53 * hash) + getRegionInfo().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code WarmupRegionRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_WarmupRegionRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_WarmupRegionRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest.class, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getRegionInfoFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (regionInfoBuilder_ == null) { + regionInfo_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance(); + } else { + regionInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_WarmupRegionRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest build() { + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest result = new org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (regionInfoBuilder_ == null) { + result.regionInfo_ = regionInfo_; + } else { + result.regionInfo_ = regionInfoBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest.getDefaultInstance()) return this; + if (other.hasRegionInfo()) { + mergeRegionInfo(other.getRegionInfo()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasRegionInfo()) { + + return false; + } + if (!getRegionInfo().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .RegionInfo regionInfo = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo regionInfo_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> regionInfoBuilder_; + /** + * required .RegionInfo regionInfo = 1; + */ + public boolean hasRegionInfo() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .RegionInfo regionInfo = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo() { + if (regionInfoBuilder_ == null) { + return regionInfo_; + } else { + return regionInfoBuilder_.getMessage(); + } + } + /** + * required .RegionInfo regionInfo = 1; + */ + public Builder setRegionInfo(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) { + if (regionInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + regionInfo_ = value; + onChanged(); + } else { + regionInfoBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .RegionInfo regionInfo = 1; + */ + public Builder setRegionInfo( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) { + if (regionInfoBuilder_ == null) { + regionInfo_ = builderForValue.build(); + onChanged(); + } else { + regionInfoBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .RegionInfo regionInfo = 1; + */ + public Builder mergeRegionInfo(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) { + if (regionInfoBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + regionInfo_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance()) { + regionInfo_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.newBuilder(regionInfo_).mergeFrom(value).buildPartial(); + } else { + regionInfo_ = value; + } + onChanged(); + } else { + regionInfoBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .RegionInfo regionInfo = 1; + */ + public Builder clearRegionInfo() { + if (regionInfoBuilder_ == null) { + regionInfo_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance(); + onChanged(); + } else { + regionInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .RegionInfo regionInfo = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder getRegionInfoBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getRegionInfoFieldBuilder().getBuilder(); + } + /** + * required .RegionInfo regionInfo = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder() { + if (regionInfoBuilder_ != null) { + return regionInfoBuilder_.getMessageOrBuilder(); + } else { + return regionInfo_; + } + } + /** + * required .RegionInfo regionInfo = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> + getRegionInfoFieldBuilder() { + if (regionInfoBuilder_ == null) { + regionInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>( + regionInfo_, + getParentForChildren(), + isClean()); + regionInfo_ = null; + } + return regionInfoBuilder_; + } + + // @@protoc_insertion_point(builder_scope:WarmupRegionRequest) + } + + static { + defaultInstance = new WarmupRegionRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:WarmupRegionRequest) + } + + public interface WarmupRegionResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + /** + * Protobuf type {@code WarmupRegionResponse} + */ + public static final class WarmupRegionResponse extends + com.google.protobuf.GeneratedMessage + implements WarmupRegionResponseOrBuilder { + // Use WarmupRegionResponse.newBuilder() to construct. + private WarmupRegionResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private WarmupRegionResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final WarmupRegionResponse defaultInstance; + public static WarmupRegionResponse getDefaultInstance() { + return defaultInstance; + } + + public WarmupRegionResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private WarmupRegionResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_WarmupRegionResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_WarmupRegionResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionResponse.class, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public WarmupRegionResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new WarmupRegionResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionResponse other = (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionResponse) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code WarmupRegionResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_WarmupRegionResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_WarmupRegionResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionResponse.class, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.internal_static_WarmupRegionResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionResponse build() { + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionResponse result = new org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionResponse(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionResponse.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + // @@protoc_insertion_point(builder_scope:WarmupRegionResponse) + } + + static { + defaultInstance = new WarmupRegionResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:WarmupRegionResponse) + } + public interface CloseRegionRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { @@ -7996,6 +8895,24 @@ public final class AdminProtos { * optional uint64 if_older_than_ts = 2; */ long getIfOlderThanTs(); + + // optional bool write_flush_wal_marker = 3; + /** + * optional bool write_flush_wal_marker = 3; + * + *

      +     * whether to write a marker to WAL even if not flushed
      +     * 
      + */ + boolean hasWriteFlushWalMarker(); + /** + * optional bool write_flush_wal_marker = 3; + * + *
      +     * whether to write a marker to WAL even if not flushed
      +     * 
      + */ + boolean getWriteFlushWalMarker(); } /** * Protobuf type {@code FlushRegionRequest} @@ -8073,6 +8990,11 @@ public final class AdminProtos { ifOlderThanTs_ = input.readUInt64(); break; } + case 24: { + bitField0_ |= 0x00000004; + writeFlushWalMarker_ = input.readBool(); + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -8151,9 +9073,34 @@ public final class AdminProtos { return ifOlderThanTs_; } + // optional bool write_flush_wal_marker = 3; + public static final int WRITE_FLUSH_WAL_MARKER_FIELD_NUMBER = 3; + private boolean writeFlushWalMarker_; + /** + * optional bool write_flush_wal_marker = 3; + * + *
      +     * whether to write a marker to WAL even if not flushed
      +     * 
      + */ + public boolean hasWriteFlushWalMarker() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional bool write_flush_wal_marker = 3; + * + *
      +     * whether to write a marker to WAL even if not flushed
      +     * 
      + */ + public boolean getWriteFlushWalMarker() { + return writeFlushWalMarker_; + } + private void initFields() { region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance(); ifOlderThanTs_ = 0L; + writeFlushWalMarker_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -8181,6 +9128,9 @@ public final class AdminProtos { if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeUInt64(2, ifOlderThanTs_); } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeBool(3, writeFlushWalMarker_); + } getUnknownFields().writeTo(output); } @@ -8198,6 +9148,10 @@ public final class AdminProtos { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(2, ifOlderThanTs_); } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(3, writeFlushWalMarker_); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -8231,6 +9185,11 @@ public final class AdminProtos { result = result && (getIfOlderThanTs() == other.getIfOlderThanTs()); } + result = result && (hasWriteFlushWalMarker() == other.hasWriteFlushWalMarker()); + if (hasWriteFlushWalMarker()) { + result = result && (getWriteFlushWalMarker() + == other.getWriteFlushWalMarker()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -8252,6 +9211,10 @@ public final class AdminProtos { hash = (37 * hash) + IF_OLDER_THAN_TS_FIELD_NUMBER; hash = (53 * hash) + hashLong(getIfOlderThanTs()); } + if (hasWriteFlushWalMarker()) { + hash = (37 * hash) + WRITE_FLUSH_WAL_MARKER_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getWriteFlushWalMarker()); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -8377,6 +9340,8 @@ public final class AdminProtos { bitField0_ = (bitField0_ & ~0x00000001); ifOlderThanTs_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); + writeFlushWalMarker_ = false; + bitField0_ = (bitField0_ & ~0x00000004); return this; } @@ -8417,6 +9382,10 @@ public final class AdminProtos { to_bitField0_ |= 0x00000002; } result.ifOlderThanTs_ = ifOlderThanTs_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.writeFlushWalMarker_ = writeFlushWalMarker_; result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -8439,6 +9408,9 @@ public final class AdminProtos { if (other.hasIfOlderThanTs()) { setIfOlderThanTs(other.getIfOlderThanTs()); } + if (other.hasWriteFlushWalMarker()) { + setWriteFlushWalMarker(other.getWriteFlushWalMarker()); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -8624,6 +9596,55 @@ public final class AdminProtos { return this; } + // optional bool write_flush_wal_marker = 3; + private boolean writeFlushWalMarker_ ; + /** + * optional bool write_flush_wal_marker = 3; + * + *
      +       * whether to write a marker to WAL even if not flushed
      +       * 
      + */ + public boolean hasWriteFlushWalMarker() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional bool write_flush_wal_marker = 3; + * + *
      +       * whether to write a marker to WAL even if not flushed
      +       * 
      + */ + public boolean getWriteFlushWalMarker() { + return writeFlushWalMarker_; + } + /** + * optional bool write_flush_wal_marker = 3; + * + *
      +       * whether to write a marker to WAL even if not flushed
      +       * 
      + */ + public Builder setWriteFlushWalMarker(boolean value) { + bitField0_ |= 0x00000004; + writeFlushWalMarker_ = value; + onChanged(); + return this; + } + /** + * optional bool write_flush_wal_marker = 3; + * + *
      +       * whether to write a marker to WAL even if not flushed
      +       * 
      + */ + public Builder clearWriteFlushWalMarker() { + bitField0_ = (bitField0_ & ~0x00000004); + writeFlushWalMarker_ = false; + onChanged(); + return this; + } + // @@protoc_insertion_point(builder_scope:FlushRegionRequest) } @@ -8657,6 +9678,16 @@ public final class AdminProtos { * optional bool flushed = 2; */ boolean getFlushed(); + + // optional bool wrote_flush_wal_marker = 3; + /** + * optional bool wrote_flush_wal_marker = 3; + */ + boolean hasWroteFlushWalMarker(); + /** + * optional bool wrote_flush_wal_marker = 3; + */ + boolean getWroteFlushWalMarker(); } /** * Protobuf type {@code FlushRegionResponse} @@ -8719,6 +9750,11 @@ public final class AdminProtos { flushed_ = input.readBool(); break; } + case 24: { + bitField0_ |= 0x00000004; + wroteFlushWalMarker_ = input.readBool(); + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -8791,9 +9827,26 @@ public final class AdminProtos { return flushed_; } + // optional bool wrote_flush_wal_marker = 3; + public static final int WROTE_FLUSH_WAL_MARKER_FIELD_NUMBER = 3; + private boolean wroteFlushWalMarker_; + /** + * optional bool wrote_flush_wal_marker = 3; + */ + public boolean hasWroteFlushWalMarker() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional bool wrote_flush_wal_marker = 3; + */ + public boolean getWroteFlushWalMarker() { + return wroteFlushWalMarker_; + } + private void initFields() { lastFlushTime_ = 0L; flushed_ = false; + wroteFlushWalMarker_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -8817,6 +9870,9 @@ public final class AdminProtos { if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeBool(2, flushed_); } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeBool(3, wroteFlushWalMarker_); + } getUnknownFields().writeTo(output); } @@ -8834,6 +9890,10 @@ public final class AdminProtos { size += com.google.protobuf.CodedOutputStream .computeBoolSize(2, flushed_); } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(3, wroteFlushWalMarker_); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -8867,6 +9927,11 @@ public final class AdminProtos { result = result && (getFlushed() == other.getFlushed()); } + result = result && (hasWroteFlushWalMarker() == other.hasWroteFlushWalMarker()); + if (hasWroteFlushWalMarker()) { + result = result && (getWroteFlushWalMarker() + == other.getWroteFlushWalMarker()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -8888,6 +9953,10 @@ public final class AdminProtos { hash = (37 * hash) + FLUSHED_FIELD_NUMBER; hash = (53 * hash) + hashBoolean(getFlushed()); } + if (hasWroteFlushWalMarker()) { + hash = (37 * hash) + WROTE_FLUSH_WAL_MARKER_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getWroteFlushWalMarker()); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -9001,6 +10070,8 @@ public final class AdminProtos { bitField0_ = (bitField0_ & ~0x00000001); flushed_ = false; bitField0_ = (bitField0_ & ~0x00000002); + wroteFlushWalMarker_ = false; + bitField0_ = (bitField0_ & ~0x00000004); return this; } @@ -9037,6 +10108,10 @@ public final class AdminProtos { to_bitField0_ |= 0x00000002; } result.flushed_ = flushed_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.wroteFlushWalMarker_ = wroteFlushWalMarker_; result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -9059,6 +10134,9 @@ public final class AdminProtos { if (other.hasFlushed()) { setFlushed(other.getFlushed()); } + if (other.hasWroteFlushWalMarker()) { + setWroteFlushWalMarker(other.getWroteFlushWalMarker()); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -9156,6 +10234,39 @@ public final class AdminProtos { return this; } + // optional bool wrote_flush_wal_marker = 3; + private boolean wroteFlushWalMarker_ ; + /** + * optional bool wrote_flush_wal_marker = 3; + */ + public boolean hasWroteFlushWalMarker() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional bool wrote_flush_wal_marker = 3; + */ + public boolean getWroteFlushWalMarker() { + return wroteFlushWalMarker_; + } + /** + * optional bool wrote_flush_wal_marker = 3; + */ + public Builder setWroteFlushWalMarker(boolean value) { + bitField0_ |= 0x00000004; + wroteFlushWalMarker_ = value; + onChanged(); + return this; + } + /** + * optional bool wrote_flush_wal_marker = 3; + */ + public Builder clearWroteFlushWalMarker() { + bitField0_ = (bitField0_ & ~0x00000004); + wroteFlushWalMarker_ = false; + onChanged(); + return this; + } + // @@protoc_insertion_point(builder_scope:FlushRegionResponse) } @@ -20607,6 +21718,14 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest request, com.google.protobuf.RpcCallback done); + /** + * rpc WarmupRegion(.WarmupRegionRequest) returns (.WarmupRegionResponse); + */ + public abstract void warmupRegion( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest request, + com.google.protobuf.RpcCallback done); + /** * rpc CloseRegion(.CloseRegionRequest) returns (.CloseRegionResponse); */ @@ -20740,6 +21859,14 @@ public final class AdminProtos { impl.openRegion(controller, request, done); } + @java.lang.Override + public void warmupRegion( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest request, + com.google.protobuf.RpcCallback done) { + impl.warmupRegion(controller, request, done); + } + @java.lang.Override public void closeRegion( com.google.protobuf.RpcController controller, @@ -20867,28 +21994,30 @@ public final class AdminProtos { case 3: return impl.openRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest)request); case 4: - return impl.closeRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest)request); + return impl.warmupRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest)request); case 5: - return impl.flushRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionRequest)request); + return impl.closeRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest)request); case 6: - return impl.splitRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionRequest)request); + return impl.flushRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionRequest)request); case 7: - return impl.compactRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest)request); + return impl.splitRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionRequest)request); case 8: - return impl.mergeRegions(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsRequest)request); + return impl.compactRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest)request); case 9: - return impl.replicateWALEntry(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest)request); + return impl.mergeRegions(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsRequest)request); case 10: - return impl.replay(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest)request); + return impl.replicateWALEntry(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest)request); case 11: - return impl.rollWALWriter(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest)request); + return impl.replay(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest)request); case 12: - return impl.getServerInfo(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest)request); + return impl.rollWALWriter(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest)request); case 13: - return impl.stopServer(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest)request); + return impl.getServerInfo(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest)request); case 14: - return impl.updateFavoredNodes(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest)request); + return impl.stopServer(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest)request); case 15: + return impl.updateFavoredNodes(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest)request); + case 16: return impl.updateConfiguration(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationRequest)request); default: throw new java.lang.AssertionError("Can't get here."); @@ -20913,28 +22042,30 @@ public final class AdminProtos { case 3: return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.getDefaultInstance(); case 4: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest.getDefaultInstance(); case 5: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest.getDefaultInstance(); case 6: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionRequest.getDefaultInstance(); case 7: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionRequest.getDefaultInstance(); case 8: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest.getDefaultInstance(); case 9: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsRequest.getDefaultInstance(); case 10: return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest.getDefaultInstance(); case 11: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest.getDefaultInstance(); case 12: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest.getDefaultInstance(); case 13: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest.getDefaultInstance(); case 14: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest.getDefaultInstance(); case 15: + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.getDefaultInstance(); + case 16: return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); @@ -20959,28 +22090,30 @@ public final class AdminProtos { case 3: return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionResponse.getDefaultInstance(); case 4: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionResponse.getDefaultInstance(); case 5: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse.getDefaultInstance(); case 6: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionResponse.getDefaultInstance(); case 7: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionResponse.getDefaultInstance(); case 8: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionResponse.getDefaultInstance(); case 9: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsResponse.getDefaultInstance(); case 10: return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance(); case 11: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance(); case 12: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse.getDefaultInstance(); case 13: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoResponse.getDefaultInstance(); case 14: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerResponse.getDefaultInstance(); case 15: + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.getDefaultInstance(); + case 16: return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); @@ -21022,6 +22155,14 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest request, com.google.protobuf.RpcCallback done); + /** + * rpc WarmupRegion(.WarmupRegionRequest) returns (.WarmupRegionResponse); + */ + public abstract void warmupRegion( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest request, + com.google.protobuf.RpcCallback done); + /** * rpc CloseRegion(.CloseRegionRequest) returns (.CloseRegionResponse); */ @@ -21161,61 +22302,66 @@ public final class AdminProtos { done)); return; case 4: + this.warmupRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 5: this.closeRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 5: + case 6: this.flushRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 6: + case 7: this.splitRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 7: + case 8: this.compactRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 8: + case 9: this.mergeRegions(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 9: + case 10: this.replicateWALEntry(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 10: + case 11: this.replay(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 11: + case 12: this.rollWALWriter(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 12: + case 13: this.getServerInfo(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 13: + case 14: this.stopServer(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 14: + case 15: this.updateFavoredNodes(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 15: + case 16: this.updateConfiguration(controller, (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); @@ -21243,28 +22389,30 @@ public final class AdminProtos { case 3: return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.getDefaultInstance(); case 4: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest.getDefaultInstance(); case 5: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest.getDefaultInstance(); case 6: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionRequest.getDefaultInstance(); case 7: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionRequest.getDefaultInstance(); case 8: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest.getDefaultInstance(); case 9: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsRequest.getDefaultInstance(); case 10: return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest.getDefaultInstance(); case 11: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest.getDefaultInstance(); case 12: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest.getDefaultInstance(); case 13: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest.getDefaultInstance(); case 14: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest.getDefaultInstance(); case 15: + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.getDefaultInstance(); + case 16: return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); @@ -21289,28 +22437,30 @@ public final class AdminProtos { case 3: return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionResponse.getDefaultInstance(); case 4: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionResponse.getDefaultInstance(); case 5: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse.getDefaultInstance(); case 6: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionResponse.getDefaultInstance(); case 7: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionResponse.getDefaultInstance(); case 8: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionResponse.getDefaultInstance(); case 9: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsResponse.getDefaultInstance(); case 10: return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance(); case 11: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance(); case 12: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse.getDefaultInstance(); case 13: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoResponse.getDefaultInstance(); case 14: - return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerResponse.getDefaultInstance(); case 15: + return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.getDefaultInstance(); + case 16: return org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); @@ -21393,12 +22543,27 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionResponse.getDefaultInstance())); } + public void warmupRegion( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(4), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionResponse.class, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionResponse.getDefaultInstance())); + } + public void closeRegion( com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(4), + getDescriptor().getMethods().get(5), controller, request, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse.getDefaultInstance(), @@ -21413,7 +22578,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(5), + getDescriptor().getMethods().get(6), controller, request, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionResponse.getDefaultInstance(), @@ -21428,7 +22593,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(6), + getDescriptor().getMethods().get(7), controller, request, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionResponse.getDefaultInstance(), @@ -21443,7 +22608,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(7), + getDescriptor().getMethods().get(8), controller, request, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionResponse.getDefaultInstance(), @@ -21458,7 +22623,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(8), + getDescriptor().getMethods().get(9), controller, request, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsResponse.getDefaultInstance(), @@ -21473,7 +22638,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(9), + getDescriptor().getMethods().get(10), controller, request, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance(), @@ -21488,7 +22653,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(10), + getDescriptor().getMethods().get(11), controller, request, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance(), @@ -21503,7 +22668,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(11), + getDescriptor().getMethods().get(12), controller, request, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse.getDefaultInstance(), @@ -21518,7 +22683,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(12), + getDescriptor().getMethods().get(13), controller, request, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoResponse.getDefaultInstance(), @@ -21533,7 +22698,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(13), + getDescriptor().getMethods().get(14), controller, request, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerResponse.getDefaultInstance(), @@ -21548,7 +22713,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(14), + getDescriptor().getMethods().get(15), controller, request, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.getDefaultInstance(), @@ -21563,7 +22728,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(15), + getDescriptor().getMethods().get(16), controller, request, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationResponse.getDefaultInstance(), @@ -21600,6 +22765,11 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest request) throws com.google.protobuf.ServiceException; + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionResponse warmupRegion( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest request) + throws com.google.protobuf.ServiceException; + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse closeRegion( com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest request) @@ -21716,12 +22886,24 @@ public final class AdminProtos { } + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionResponse warmupRegion( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(4), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionResponse.getDefaultInstance()); + } + + public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse closeRegion( com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(4), + getDescriptor().getMethods().get(5), controller, request, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse.getDefaultInstance()); @@ -21733,7 +22915,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(5), + getDescriptor().getMethods().get(6), controller, request, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionResponse.getDefaultInstance()); @@ -21745,7 +22927,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(6), + getDescriptor().getMethods().get(7), controller, request, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionResponse.getDefaultInstance()); @@ -21757,7 +22939,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(7), + getDescriptor().getMethods().get(8), controller, request, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionResponse.getDefaultInstance()); @@ -21769,7 +22951,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(8), + getDescriptor().getMethods().get(9), controller, request, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsResponse.getDefaultInstance()); @@ -21781,7 +22963,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(9), + getDescriptor().getMethods().get(10), controller, request, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance()); @@ -21793,7 +22975,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(10), + getDescriptor().getMethods().get(11), controller, request, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance()); @@ -21805,7 +22987,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(11), + getDescriptor().getMethods().get(12), controller, request, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse.getDefaultInstance()); @@ -21817,7 +22999,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(12), + getDescriptor().getMethods().get(13), controller, request, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoResponse.getDefaultInstance()); @@ -21829,7 +23011,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(13), + getDescriptor().getMethods().get(14), controller, request, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerResponse.getDefaultInstance()); @@ -21841,7 +23023,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(14), + getDescriptor().getMethods().get(15), controller, request, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.getDefaultInstance()); @@ -21853,7 +23035,7 @@ public final class AdminProtos { org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(15), + getDescriptor().getMethods().get(16), controller, request, org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfigurationResponse.getDefaultInstance()); @@ -21909,6 +23091,16 @@ public final class AdminProtos { private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_OpenRegionResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_WarmupRegionRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_WarmupRegionRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_WarmupRegionResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_WarmupRegionResponse_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_CloseRegionRequest_descriptor; private static @@ -22067,72 +23259,77 @@ public final class AdminProtos { "\ropening_state\030\001 \003(\0162&.OpenRegionRespons" + "e.RegionOpeningState\"H\n\022RegionOpeningSta" + "te\022\n\n\006OPENED\020\000\022\022\n\016ALREADY_OPENED\020\001\022\022\n\016FA" + - "ILED_OPENING\020\002\"\271\001\n\022CloseRegionRequest\022 \n" + + "ILED_OPENING\020\002\"6\n\023WarmupRegionRequest\022\037\n" + + "\nregionInfo\030\001 \002(\0132\013.RegionInfo\"\026\n\024Warmup" + + "RegionResponse\"\271\001\n\022CloseRegionRequest\022 \n" + "\006region\030\001 \002(\0132\020.RegionSpecifier\022\037\n\027versi" + "on_of_closing_node\030\002 \001(\r\022\036\n\020transition_i" + - "n_ZK\030\003 \001(\010:\004true\022\'\n\022destination_server\030\004" + + "n_ZK\030\003 \001(\010:\004true\022\'\n\022destination_server\030\004", " \001(\0132\013.ServerName\022\027\n\017serverStartCode\030\005 \001" + - "(\004\"%\n\023CloseRegionResponse\022\016\n\006closed\030\001 \002(", - "\010\"P\n\022FlushRegionRequest\022 \n\006region\030\001 \002(\0132" + + "(\004\"%\n\023CloseRegionResponse\022\016\n\006closed\030\001 \002(" + + "\010\"p\n\022FlushRegionRequest\022 \n\006region\030\001 \002(\0132" + "\020.RegionSpecifier\022\030\n\020if_older_than_ts\030\002 " + - "\001(\004\"?\n\023FlushRegionResponse\022\027\n\017last_flush" + - "_time\030\001 \002(\004\022\017\n\007flushed\030\002 \001(\010\"K\n\022SplitReg" + - "ionRequest\022 \n\006region\030\001 \002(\0132\020.RegionSpeci" + - "fier\022\023\n\013split_point\030\002 \001(\014\"\025\n\023SplitRegion" + - "Response\"W\n\024CompactRegionRequest\022 \n\006regi" + - "on\030\001 \002(\0132\020.RegionSpecifier\022\r\n\005major\030\002 \001(" + - "\010\022\016\n\006family\030\003 \001(\014\"\027\n\025CompactRegionRespon" + - "se\"\262\001\n\031UpdateFavoredNodesRequest\022@\n\013upda", - "te_info\030\001 \003(\0132+.UpdateFavoredNodesReques" + - "t.RegionUpdateInfo\032S\n\020RegionUpdateInfo\022\033" + - "\n\006region\030\001 \002(\0132\013.RegionInfo\022\"\n\rfavored_n" + - "odes\030\002 \003(\0132\013.ServerName\".\n\032UpdateFavored" + - "NodesResponse\022\020\n\010response\030\001 \001(\r\"v\n\023Merge" + - "RegionsRequest\022\"\n\010region_a\030\001 \002(\0132\020.Regio" + - "nSpecifier\022\"\n\010region_b\030\002 \002(\0132\020.RegionSpe" + - "cifier\022\027\n\010forcible\030\003 \001(\010:\005false\"\026\n\024Merge" + - "RegionsResponse\"X\n\010WALEntry\022\024\n\003key\030\001 \002(\013" + - "2\007.WALKey\022\027\n\017key_value_bytes\030\002 \003(\014\022\035\n\025as", - "sociated_cell_count\030\003 \001(\005\"4\n\030ReplicateWA" + - "LEntryRequest\022\030\n\005entry\030\001 \003(\0132\t.WALEntry\"" + - "\033\n\031ReplicateWALEntryResponse\"\026\n\024RollWALW" + - "riterRequest\"0\n\025RollWALWriterResponse\022\027\n" + - "\017region_to_flush\030\001 \003(\014\"#\n\021StopServerRequ" + - "est\022\016\n\006reason\030\001 \002(\t\"\024\n\022StopServerRespons" + - "e\"\026\n\024GetServerInfoRequest\"B\n\nServerInfo\022" + - " \n\013server_name\030\001 \002(\0132\013.ServerName\022\022\n\nweb" + - "ui_port\030\002 \001(\r\"9\n\025GetServerInfoResponse\022 " + - "\n\013server_info\030\001 \002(\0132\013.ServerInfo\"\034\n\032Upda", - "teConfigurationRequest\"\035\n\033UpdateConfigur" + - "ationResponse2\230\010\n\014AdminService\022>\n\rGetReg" + - "ionInfo\022\025.GetRegionInfoRequest\032\026.GetRegi" + - "onInfoResponse\022;\n\014GetStoreFile\022\024.GetStor" + - "eFileRequest\032\025.GetStoreFileResponse\022D\n\017G" + - "etOnlineRegion\022\027.GetOnlineRegionRequest\032" + - "\030.GetOnlineRegionResponse\0225\n\nOpenRegion\022" + - "\022.OpenRegionRequest\032\023.OpenRegionResponse" + - "\0228\n\013CloseRegion\022\023.CloseRegionRequest\032\024.C" + - "loseRegionResponse\0228\n\013FlushRegion\022\023.Flus", - "hRegionRequest\032\024.FlushRegionResponse\0228\n\013" + - "SplitRegion\022\023.SplitRegionRequest\032\024.Split" + - "RegionResponse\022>\n\rCompactRegion\022\025.Compac" + - "tRegionRequest\032\026.CompactRegionResponse\022;" + - "\n\014MergeRegions\022\024.MergeRegionsRequest\032\025.M" + - "ergeRegionsResponse\022J\n\021ReplicateWALEntry" + - "\022\031.ReplicateWALEntryRequest\032\032.ReplicateW" + - "ALEntryResponse\022?\n\006Replay\022\031.ReplicateWAL" + - "EntryRequest\032\032.ReplicateWALEntryResponse" + - "\022>\n\rRollWALWriter\022\025.RollWALWriterRequest", - "\032\026.RollWALWriterResponse\022>\n\rGetServerInf" + - "o\022\025.GetServerInfoRequest\032\026.GetServerInfo" + - "Response\0225\n\nStopServer\022\022.StopServerReque" + - "st\032\023.StopServerResponse\022M\n\022UpdateFavored" + - "Nodes\022\032.UpdateFavoredNodesRequest\032\033.Upda" + - "teFavoredNodesResponse\022P\n\023UpdateConfigur" + - "ation\022\033.UpdateConfigurationRequest\032\034.Upd" + - "ateConfigurationResponseBA\n*org.apache.h" + - "adoop.hbase.protobuf.generatedB\013AdminPro" + - "tosH\001\210\001\001\240\001\001" + "\001(\004\022\036\n\026write_flush_wal_marker\030\003 \001(\010\"_\n\023F" + + "lushRegionResponse\022\027\n\017last_flush_time\030\001 " + + "\002(\004\022\017\n\007flushed\030\002 \001(\010\022\036\n\026wrote_flush_wal_" + + "marker\030\003 \001(\010\"K\n\022SplitRegionRequest\022 \n\006re" + + "gion\030\001 \002(\0132\020.RegionSpecifier\022\023\n\013split_po" + + "int\030\002 \001(\014\"\025\n\023SplitRegionResponse\"W\n\024Comp", + "actRegionRequest\022 \n\006region\030\001 \002(\0132\020.Regio" + + "nSpecifier\022\r\n\005major\030\002 \001(\010\022\016\n\006family\030\003 \001(" + + "\014\"\027\n\025CompactRegionResponse\"\262\001\n\031UpdateFav" + + "oredNodesRequest\022@\n\013update_info\030\001 \003(\0132+." + + "UpdateFavoredNodesRequest.RegionUpdateIn" + + "fo\032S\n\020RegionUpdateInfo\022\033\n\006region\030\001 \002(\0132\013" + + ".RegionInfo\022\"\n\rfavored_nodes\030\002 \003(\0132\013.Ser" + + "verName\".\n\032UpdateFavoredNodesResponse\022\020\n" + + "\010response\030\001 \001(\r\"v\n\023MergeRegionsRequest\022\"" + + "\n\010region_a\030\001 \002(\0132\020.RegionSpecifier\022\"\n\010re", + "gion_b\030\002 \002(\0132\020.RegionSpecifier\022\027\n\010forcib" + + "le\030\003 \001(\010:\005false\"\026\n\024MergeRegionsResponse\"" + + "X\n\010WALEntry\022\024\n\003key\030\001 \002(\0132\007.WALKey\022\027\n\017key" + + "_value_bytes\030\002 \003(\014\022\035\n\025associated_cell_co" + + "unt\030\003 \001(\005\"4\n\030ReplicateWALEntryRequest\022\030\n" + + "\005entry\030\001 \003(\0132\t.WALEntry\"\033\n\031ReplicateWALE" + + "ntryResponse\"\026\n\024RollWALWriterRequest\"0\n\025" + + "RollWALWriterResponse\022\027\n\017region_to_flush" + + "\030\001 \003(\014\"#\n\021StopServerRequest\022\016\n\006reason\030\001 " + + "\002(\t\"\024\n\022StopServerResponse\"\026\n\024GetServerIn", + "foRequest\"B\n\nServerInfo\022 \n\013server_name\030\001" + + " \002(\0132\013.ServerName\022\022\n\nwebui_port\030\002 \001(\r\"9\n" + + "\025GetServerInfoResponse\022 \n\013server_info\030\001 " + + "\002(\0132\013.ServerInfo\"\034\n\032UpdateConfigurationR" + + "equest\"\035\n\033UpdateConfigurationResponse2\325\010" + + "\n\014AdminService\022>\n\rGetRegionInfo\022\025.GetReg" + + "ionInfoRequest\032\026.GetRegionInfoResponse\022;" + + "\n\014GetStoreFile\022\024.GetStoreFileRequest\032\025.G" + + "etStoreFileResponse\022D\n\017GetOnlineRegion\022\027" + + ".GetOnlineRegionRequest\032\030.GetOnlineRegio", + "nResponse\0225\n\nOpenRegion\022\022.OpenRegionRequ" + + "est\032\023.OpenRegionResponse\022;\n\014WarmupRegion" + + "\022\024.WarmupRegionRequest\032\025.WarmupRegionRes" + + "ponse\0228\n\013CloseRegion\022\023.CloseRegionReques" + + "t\032\024.CloseRegionResponse\0228\n\013FlushRegion\022\023" + + ".FlushRegionRequest\032\024.FlushRegionRespons" + + "e\0228\n\013SplitRegion\022\023.SplitRegionRequest\032\024." + + "SplitRegionResponse\022>\n\rCompactRegion\022\025.C" + + "ompactRegionRequest\032\026.CompactRegionRespo" + + "nse\022;\n\014MergeRegions\022\024.MergeRegionsReques", + "t\032\025.MergeRegionsResponse\022J\n\021ReplicateWAL" + + "Entry\022\031.ReplicateWALEntryRequest\032\032.Repli" + + "cateWALEntryResponse\022?\n\006Replay\022\031.Replica" + + "teWALEntryRequest\032\032.ReplicateWALEntryRes" + + "ponse\022>\n\rRollWALWriter\022\025.RollWALWriterRe" + + "quest\032\026.RollWALWriterResponse\022>\n\rGetServ" + + "erInfo\022\025.GetServerInfoRequest\032\026.GetServe" + + "rInfoResponse\0225\n\nStopServer\022\022.StopServer" + + "Request\032\023.StopServerResponse\022M\n\022UpdateFa" + + "voredNodes\022\032.UpdateFavoredNodesRequest\032\033", + ".UpdateFavoredNodesResponse\022P\n\023UpdateCon" + + "figuration\022\033.UpdateConfigurationRequest\032" + + "\034.UpdateConfigurationResponseBA\n*org.apa" + + "che.hadoop.hbase.protobuf.generatedB\013Adm" + + "inProtosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -22193,56 +23390,68 @@ public final class AdminProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_OpenRegionResponse_descriptor, new java.lang.String[] { "OpeningState", }); - internal_static_CloseRegionRequest_descriptor = + internal_static_WarmupRegionRequest_descriptor = getDescriptor().getMessageTypes().get(8); + internal_static_WarmupRegionRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_WarmupRegionRequest_descriptor, + new java.lang.String[] { "RegionInfo", }); + internal_static_WarmupRegionResponse_descriptor = + getDescriptor().getMessageTypes().get(9); + internal_static_WarmupRegionResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_WarmupRegionResponse_descriptor, + new java.lang.String[] { }); + internal_static_CloseRegionRequest_descriptor = + getDescriptor().getMessageTypes().get(10); internal_static_CloseRegionRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_CloseRegionRequest_descriptor, new java.lang.String[] { "Region", "VersionOfClosingNode", "TransitionInZK", "DestinationServer", "ServerStartCode", }); internal_static_CloseRegionResponse_descriptor = - getDescriptor().getMessageTypes().get(9); + getDescriptor().getMessageTypes().get(11); internal_static_CloseRegionResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_CloseRegionResponse_descriptor, new java.lang.String[] { "Closed", }); internal_static_FlushRegionRequest_descriptor = - getDescriptor().getMessageTypes().get(10); + getDescriptor().getMessageTypes().get(12); internal_static_FlushRegionRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_FlushRegionRequest_descriptor, - new java.lang.String[] { "Region", "IfOlderThanTs", }); + new java.lang.String[] { "Region", "IfOlderThanTs", "WriteFlushWalMarker", }); internal_static_FlushRegionResponse_descriptor = - getDescriptor().getMessageTypes().get(11); + getDescriptor().getMessageTypes().get(13); internal_static_FlushRegionResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_FlushRegionResponse_descriptor, - new java.lang.String[] { "LastFlushTime", "Flushed", }); + new java.lang.String[] { "LastFlushTime", "Flushed", "WroteFlushWalMarker", }); internal_static_SplitRegionRequest_descriptor = - getDescriptor().getMessageTypes().get(12); + getDescriptor().getMessageTypes().get(14); internal_static_SplitRegionRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_SplitRegionRequest_descriptor, new java.lang.String[] { "Region", "SplitPoint", }); internal_static_SplitRegionResponse_descriptor = - getDescriptor().getMessageTypes().get(13); + getDescriptor().getMessageTypes().get(15); internal_static_SplitRegionResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_SplitRegionResponse_descriptor, new java.lang.String[] { }); internal_static_CompactRegionRequest_descriptor = - getDescriptor().getMessageTypes().get(14); + getDescriptor().getMessageTypes().get(16); internal_static_CompactRegionRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_CompactRegionRequest_descriptor, new java.lang.String[] { "Region", "Major", "Family", }); internal_static_CompactRegionResponse_descriptor = - getDescriptor().getMessageTypes().get(15); + getDescriptor().getMessageTypes().get(17); internal_static_CompactRegionResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_CompactRegionResponse_descriptor, new java.lang.String[] { }); internal_static_UpdateFavoredNodesRequest_descriptor = - getDescriptor().getMessageTypes().get(16); + getDescriptor().getMessageTypes().get(18); internal_static_UpdateFavoredNodesRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_UpdateFavoredNodesRequest_descriptor, @@ -22254,91 +23463,91 @@ public final class AdminProtos { internal_static_UpdateFavoredNodesRequest_RegionUpdateInfo_descriptor, new java.lang.String[] { "Region", "FavoredNodes", }); internal_static_UpdateFavoredNodesResponse_descriptor = - getDescriptor().getMessageTypes().get(17); + getDescriptor().getMessageTypes().get(19); internal_static_UpdateFavoredNodesResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_UpdateFavoredNodesResponse_descriptor, new java.lang.String[] { "Response", }); internal_static_MergeRegionsRequest_descriptor = - getDescriptor().getMessageTypes().get(18); + getDescriptor().getMessageTypes().get(20); internal_static_MergeRegionsRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_MergeRegionsRequest_descriptor, new java.lang.String[] { "RegionA", "RegionB", "Forcible", }); internal_static_MergeRegionsResponse_descriptor = - getDescriptor().getMessageTypes().get(19); + getDescriptor().getMessageTypes().get(21); internal_static_MergeRegionsResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_MergeRegionsResponse_descriptor, new java.lang.String[] { }); internal_static_WALEntry_descriptor = - getDescriptor().getMessageTypes().get(20); + getDescriptor().getMessageTypes().get(22); internal_static_WALEntry_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_WALEntry_descriptor, new java.lang.String[] { "Key", "KeyValueBytes", "AssociatedCellCount", }); internal_static_ReplicateWALEntryRequest_descriptor = - getDescriptor().getMessageTypes().get(21); + getDescriptor().getMessageTypes().get(23); internal_static_ReplicateWALEntryRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ReplicateWALEntryRequest_descriptor, new java.lang.String[] { "Entry", }); internal_static_ReplicateWALEntryResponse_descriptor = - getDescriptor().getMessageTypes().get(22); + getDescriptor().getMessageTypes().get(24); internal_static_ReplicateWALEntryResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ReplicateWALEntryResponse_descriptor, new java.lang.String[] { }); internal_static_RollWALWriterRequest_descriptor = - getDescriptor().getMessageTypes().get(23); + getDescriptor().getMessageTypes().get(25); internal_static_RollWALWriterRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_RollWALWriterRequest_descriptor, new java.lang.String[] { }); internal_static_RollWALWriterResponse_descriptor = - getDescriptor().getMessageTypes().get(24); + getDescriptor().getMessageTypes().get(26); internal_static_RollWALWriterResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_RollWALWriterResponse_descriptor, new java.lang.String[] { "RegionToFlush", }); internal_static_StopServerRequest_descriptor = - getDescriptor().getMessageTypes().get(25); + getDescriptor().getMessageTypes().get(27); internal_static_StopServerRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_StopServerRequest_descriptor, new java.lang.String[] { "Reason", }); internal_static_StopServerResponse_descriptor = - getDescriptor().getMessageTypes().get(26); + getDescriptor().getMessageTypes().get(28); internal_static_StopServerResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_StopServerResponse_descriptor, new java.lang.String[] { }); internal_static_GetServerInfoRequest_descriptor = - getDescriptor().getMessageTypes().get(27); + getDescriptor().getMessageTypes().get(29); internal_static_GetServerInfoRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_GetServerInfoRequest_descriptor, new java.lang.String[] { }); internal_static_ServerInfo_descriptor = - getDescriptor().getMessageTypes().get(28); + getDescriptor().getMessageTypes().get(30); internal_static_ServerInfo_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ServerInfo_descriptor, new java.lang.String[] { "ServerName", "WebuiPort", }); internal_static_GetServerInfoResponse_descriptor = - getDescriptor().getMessageTypes().get(29); + getDescriptor().getMessageTypes().get(31); internal_static_GetServerInfoResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_GetServerInfoResponse_descriptor, new java.lang.String[] { "ServerInfo", }); internal_static_UpdateConfigurationRequest_descriptor = - getDescriptor().getMessageTypes().get(30); + getDescriptor().getMessageTypes().get(32); internal_static_UpdateConfigurationRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_UpdateConfigurationRequest_descriptor, new java.lang.String[] { }); internal_static_UpdateConfigurationResponse_descriptor = - getDescriptor().getMessageTypes().get(31); + getDescriptor().getMessageTypes().get(33); internal_static_UpdateConfigurationResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_UpdateConfigurationResponse_descriptor, diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java index afd67a1cc53..60ab6515d8d 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java @@ -4247,6 +4247,30 @@ public final class ClientProtos { * */ boolean getStale(); + + // optional bool partial = 5 [default = false]; + /** + * optional bool partial = 5 [default = false]; + * + *
      +     * Whether or not the entire result could be returned. Results will be split when
      +     * the RPC chunk size limit is reached. Partial results contain only a subset of the
      +     * cells for a row and must be combined with a result containing the remaining cells
      +     * to form a complete result
      +     * 
      + */ + boolean hasPartial(); + /** + * optional bool partial = 5 [default = false]; + * + *
      +     * Whether or not the entire result could be returned. Results will be split when
      +     * the RPC chunk size limit is reached. Partial results contain only a subset of the
      +     * cells for a row and must be combined with a result containing the remaining cells
      +     * to form a complete result
      +     * 
      + */ + boolean getPartial(); } /** * Protobuf type {@code Result} @@ -4322,6 +4346,11 @@ public final class ClientProtos { stale_ = input.readBool(); break; } + case 40: { + bitField0_ |= 0x00000008; + partial_ = input.readBool(); + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -4510,11 +4539,42 @@ public final class ClientProtos { return stale_; } + // optional bool partial = 5 [default = false]; + public static final int PARTIAL_FIELD_NUMBER = 5; + private boolean partial_; + /** + * optional bool partial = 5 [default = false]; + * + *
      +     * Whether or not the entire result could be returned. Results will be split when
      +     * the RPC chunk size limit is reached. Partial results contain only a subset of the
      +     * cells for a row and must be combined with a result containing the remaining cells
      +     * to form a complete result
      +     * 
      + */ + public boolean hasPartial() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional bool partial = 5 [default = false]; + * + *
      +     * Whether or not the entire result could be returned. Results will be split when
      +     * the RPC chunk size limit is reached. Partial results contain only a subset of the
      +     * cells for a row and must be combined with a result containing the remaining cells
      +     * to form a complete result
      +     * 
      + */ + public boolean getPartial() { + return partial_; + } + private void initFields() { cell_ = java.util.Collections.emptyList(); associatedCellCount_ = 0; exists_ = false; stale_ = false; + partial_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -4540,6 +4600,9 @@ public final class ClientProtos { if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeBool(4, stale_); } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeBool(5, partial_); + } getUnknownFields().writeTo(output); } @@ -4565,6 +4628,10 @@ public final class ClientProtos { size += com.google.protobuf.CodedOutputStream .computeBoolSize(4, stale_); } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(5, partial_); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -4605,6 +4672,11 @@ public final class ClientProtos { result = result && (getStale() == other.getStale()); } + result = result && (hasPartial() == other.hasPartial()); + if (hasPartial()) { + result = result && (getPartial() + == other.getPartial()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -4634,6 +4706,10 @@ public final class ClientProtos { hash = (37 * hash) + STALE_FIELD_NUMBER; hash = (53 * hash) + hashBoolean(getStale()); } + if (hasPartial()) { + hash = (37 * hash) + PARTIAL_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getPartial()); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -4756,6 +4832,8 @@ public final class ClientProtos { bitField0_ = (bitField0_ & ~0x00000004); stale_ = false; bitField0_ = (bitField0_ & ~0x00000008); + partial_ = false; + bitField0_ = (bitField0_ & ~0x00000010); return this; } @@ -4805,6 +4883,10 @@ public final class ClientProtos { to_bitField0_ |= 0x00000004; } result.stale_ = stale_; + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000008; + } + result.partial_ = partial_; result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -4856,6 +4938,9 @@ public final class ClientProtos { if (other.hasStale()) { setStale(other.getStale()); } + if (other.hasPartial()) { + setPartial(other.getPartial()); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -5384,6 +5469,67 @@ public final class ClientProtos { return this; } + // optional bool partial = 5 [default = false]; + private boolean partial_ ; + /** + * optional bool partial = 5 [default = false]; + * + *
      +       * Whether or not the entire result could be returned. Results will be split when
      +       * the RPC chunk size limit is reached. Partial results contain only a subset of the
      +       * cells for a row and must be combined with a result containing the remaining cells
      +       * to form a complete result
      +       * 
      + */ + public boolean hasPartial() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional bool partial = 5 [default = false]; + * + *
      +       * Whether or not the entire result could be returned. Results will be split when
      +       * the RPC chunk size limit is reached. Partial results contain only a subset of the
      +       * cells for a row and must be combined with a result containing the remaining cells
      +       * to form a complete result
      +       * 
      + */ + public boolean getPartial() { + return partial_; + } + /** + * optional bool partial = 5 [default = false]; + * + *
      +       * Whether or not the entire result could be returned. Results will be split when
      +       * the RPC chunk size limit is reached. Partial results contain only a subset of the
      +       * cells for a row and must be combined with a result containing the remaining cells
      +       * to form a complete result
      +       * 
      + */ + public Builder setPartial(boolean value) { + bitField0_ |= 0x00000010; + partial_ = value; + onChanged(); + return this; + } + /** + * optional bool partial = 5 [default = false]; + * + *
      +       * Whether or not the entire result could be returned. Results will be split when
      +       * the RPC chunk size limit is reached. Partial results contain only a subset of the
      +       * cells for a row and must be combined with a result containing the remaining cells
      +       * to form a complete result
      +       * 
      + */ + public Builder clearPartial() { + bitField0_ = (bitField0_ & ~0x00000010); + partial_ = false; + onChanged(); + return this; + } + // @@protoc_insertion_point(builder_scope:Result) } @@ -16277,6 +16423,16 @@ public final class ClientProtos { * optional uint64 next_call_seq = 6; */ long getNextCallSeq(); + + // optional bool client_handles_partials = 7; + /** + * optional bool client_handles_partials = 7; + */ + boolean hasClientHandlesPartials(); + /** + * optional bool client_handles_partials = 7; + */ + boolean getClientHandlesPartials(); } /** * Protobuf type {@code ScanRequest} @@ -16388,6 +16544,11 @@ public final class ClientProtos { nextCallSeq_ = input.readUInt64(); break; } + case 56: { + bitField0_ |= 0x00000040; + clientHandlesPartials_ = input.readBool(); + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -16536,6 +16697,22 @@ public final class ClientProtos { return nextCallSeq_; } + // optional bool client_handles_partials = 7; + public static final int CLIENT_HANDLES_PARTIALS_FIELD_NUMBER = 7; + private boolean clientHandlesPartials_; + /** + * optional bool client_handles_partials = 7; + */ + public boolean hasClientHandlesPartials() { + return ((bitField0_ & 0x00000040) == 0x00000040); + } + /** + * optional bool client_handles_partials = 7; + */ + public boolean getClientHandlesPartials() { + return clientHandlesPartials_; + } + private void initFields() { region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance(); scan_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.getDefaultInstance(); @@ -16543,6 +16720,7 @@ public final class ClientProtos { numberOfRows_ = 0; closeScanner_ = false; nextCallSeq_ = 0L; + clientHandlesPartials_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -16586,6 +16764,9 @@ public final class ClientProtos { if (((bitField0_ & 0x00000020) == 0x00000020)) { output.writeUInt64(6, nextCallSeq_); } + if (((bitField0_ & 0x00000040) == 0x00000040)) { + output.writeBool(7, clientHandlesPartials_); + } getUnknownFields().writeTo(output); } @@ -16619,6 +16800,10 @@ public final class ClientProtos { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(6, nextCallSeq_); } + if (((bitField0_ & 0x00000040) == 0x00000040)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(7, clientHandlesPartials_); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -16672,6 +16857,11 @@ public final class ClientProtos { result = result && (getNextCallSeq() == other.getNextCallSeq()); } + result = result && (hasClientHandlesPartials() == other.hasClientHandlesPartials()); + if (hasClientHandlesPartials()) { + result = result && (getClientHandlesPartials() + == other.getClientHandlesPartials()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -16709,6 +16899,10 @@ public final class ClientProtos { hash = (37 * hash) + NEXT_CALL_SEQ_FIELD_NUMBER; hash = (53 * hash) + hashLong(getNextCallSeq()); } + if (hasClientHandlesPartials()) { + hash = (37 * hash) + CLIENT_HANDLES_PARTIALS_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getClientHandlesPartials()); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -16853,6 +17047,8 @@ public final class ClientProtos { bitField0_ = (bitField0_ & ~0x00000010); nextCallSeq_ = 0L; bitField0_ = (bitField0_ & ~0x00000020); + clientHandlesPartials_ = false; + bitField0_ = (bitField0_ & ~0x00000040); return this; } @@ -16913,6 +17109,10 @@ public final class ClientProtos { to_bitField0_ |= 0x00000020; } result.nextCallSeq_ = nextCallSeq_; + if (((from_bitField0_ & 0x00000040) == 0x00000040)) { + to_bitField0_ |= 0x00000040; + } + result.clientHandlesPartials_ = clientHandlesPartials_; result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -16947,6 +17147,9 @@ public final class ClientProtos { if (other.hasNextCallSeq()) { setNextCallSeq(other.getNextCallSeq()); } + if (other.hasClientHandlesPartials()) { + setClientHandlesPartials(other.getClientHandlesPartials()); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -17352,6 +17555,39 @@ public final class ClientProtos { return this; } + // optional bool client_handles_partials = 7; + private boolean clientHandlesPartials_ ; + /** + * optional bool client_handles_partials = 7; + */ + public boolean hasClientHandlesPartials() { + return ((bitField0_ & 0x00000040) == 0x00000040); + } + /** + * optional bool client_handles_partials = 7; + */ + public boolean getClientHandlesPartials() { + return clientHandlesPartials_; + } + /** + * optional bool client_handles_partials = 7; + */ + public Builder setClientHandlesPartials(boolean value) { + bitField0_ |= 0x00000040; + clientHandlesPartials_ = value; + onChanged(); + return this; + } + /** + * optional bool client_handles_partials = 7; + */ + public Builder clearClientHandlesPartials() { + bitField0_ = (bitField0_ & ~0x00000040); + clientHandlesPartials_ = false; + onChanged(); + return this; + } + // @@protoc_insertion_point(builder_scope:ScanRequest) } @@ -17504,6 +17740,72 @@ public final class ClientProtos { * optional bool stale = 6; */ boolean getStale(); + + // repeated bool partial_flag_per_result = 7; + /** + * repeated bool partial_flag_per_result = 7; + * + *
      +     * This field is filled in if we are doing cellblocks. In the event that a row
      +     * could not fit all of its cells into a single RPC chunk, the results will be
      +     * returned as partials, and reconstructed into a complete result on the client
      +     * side. This field is a list of flags indicating whether or not the result
      +     * that the cells belong to is a partial result. For example, if this field
      +     * has false, false, true in it, then we know that on the client side, we need to
      +     * make another RPC request since the last result was only a partial.
      +     * 
      + */ + java.util.List getPartialFlagPerResultList(); + /** + * repeated bool partial_flag_per_result = 7; + * + *
      +     * This field is filled in if we are doing cellblocks. In the event that a row
      +     * could not fit all of its cells into a single RPC chunk, the results will be
      +     * returned as partials, and reconstructed into a complete result on the client
      +     * side. This field is a list of flags indicating whether or not the result
      +     * that the cells belong to is a partial result. For example, if this field
      +     * has false, false, true in it, then we know that on the client side, we need to
      +     * make another RPC request since the last result was only a partial.
      +     * 
      + */ + int getPartialFlagPerResultCount(); + /** + * repeated bool partial_flag_per_result = 7; + * + *
      +     * This field is filled in if we are doing cellblocks. In the event that a row
      +     * could not fit all of its cells into a single RPC chunk, the results will be
      +     * returned as partials, and reconstructed into a complete result on the client
      +     * side. This field is a list of flags indicating whether or not the result
      +     * that the cells belong to is a partial result. For example, if this field
      +     * has false, false, true in it, then we know that on the client side, we need to
      +     * make another RPC request since the last result was only a partial.
      +     * 
      + */ + boolean getPartialFlagPerResult(int index); + + // optional bool more_results_in_region = 8; + /** + * optional bool more_results_in_region = 8; + * + *
      +     * A server may choose to limit the number of results returned to the client for
      +     * reasons such as the size in bytes or quantity of results accumulated. This field
      +     * will true when more results exist in the current region.
      +     * 
      + */ + boolean hasMoreResultsInRegion(); + /** + * optional bool more_results_in_region = 8; + * + *
      +     * A server may choose to limit the number of results returned to the client for
      +     * reasons such as the size in bytes or quantity of results accumulated. This field
      +     * will true when more results exist in the current region.
      +     * 
      + */ + boolean getMoreResultsInRegion(); } /** * Protobuf type {@code ScanResponse} @@ -17611,6 +17913,32 @@ public final class ClientProtos { stale_ = input.readBool(); break; } + case 56: { + if (!((mutable_bitField0_ & 0x00000040) == 0x00000040)) { + partialFlagPerResult_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000040; + } + partialFlagPerResult_.add(input.readBool()); + break; + } + case 58: { + int length = input.readRawVarint32(); + int limit = input.pushLimit(length); + if (!((mutable_bitField0_ & 0x00000040) == 0x00000040) && input.getBytesUntilLimit() > 0) { + partialFlagPerResult_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000040; + } + while (input.getBytesUntilLimit() > 0) { + partialFlagPerResult_.add(input.readBool()); + } + input.popLimit(limit); + break; + } + case 64: { + bitField0_ |= 0x00000010; + moreResultsInRegion_ = input.readBool(); + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -17625,6 +17953,9 @@ public final class ClientProtos { if (((mutable_bitField0_ & 0x00000010) == 0x00000010)) { results_ = java.util.Collections.unmodifiableList(results_); } + if (((mutable_bitField0_ & 0x00000040) == 0x00000040)) { + partialFlagPerResult_ = java.util.Collections.unmodifiableList(partialFlagPerResult_); + } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } @@ -17840,6 +18171,87 @@ public final class ClientProtos { return stale_; } + // repeated bool partial_flag_per_result = 7; + public static final int PARTIAL_FLAG_PER_RESULT_FIELD_NUMBER = 7; + private java.util.List partialFlagPerResult_; + /** + * repeated bool partial_flag_per_result = 7; + * + *
      +     * This field is filled in if we are doing cellblocks. In the event that a row
      +     * could not fit all of its cells into a single RPC chunk, the results will be
      +     * returned as partials, and reconstructed into a complete result on the client
      +     * side. This field is a list of flags indicating whether or not the result
      +     * that the cells belong to is a partial result. For example, if this field
      +     * has false, false, true in it, then we know that on the client side, we need to
      +     * make another RPC request since the last result was only a partial.
      +     * 
      + */ + public java.util.List + getPartialFlagPerResultList() { + return partialFlagPerResult_; + } + /** + * repeated bool partial_flag_per_result = 7; + * + *
      +     * This field is filled in if we are doing cellblocks. In the event that a row
      +     * could not fit all of its cells into a single RPC chunk, the results will be
      +     * returned as partials, and reconstructed into a complete result on the client
      +     * side. This field is a list of flags indicating whether or not the result
      +     * that the cells belong to is a partial result. For example, if this field
      +     * has false, false, true in it, then we know that on the client side, we need to
      +     * make another RPC request since the last result was only a partial.
      +     * 
      + */ + public int getPartialFlagPerResultCount() { + return partialFlagPerResult_.size(); + } + /** + * repeated bool partial_flag_per_result = 7; + * + *
      +     * This field is filled in if we are doing cellblocks. In the event that a row
      +     * could not fit all of its cells into a single RPC chunk, the results will be
      +     * returned as partials, and reconstructed into a complete result on the client
      +     * side. This field is a list of flags indicating whether or not the result
      +     * that the cells belong to is a partial result. For example, if this field
      +     * has false, false, true in it, then we know that on the client side, we need to
      +     * make another RPC request since the last result was only a partial.
      +     * 
      + */ + public boolean getPartialFlagPerResult(int index) { + return partialFlagPerResult_.get(index); + } + + // optional bool more_results_in_region = 8; + public static final int MORE_RESULTS_IN_REGION_FIELD_NUMBER = 8; + private boolean moreResultsInRegion_; + /** + * optional bool more_results_in_region = 8; + * + *
      +     * A server may choose to limit the number of results returned to the client for
      +     * reasons such as the size in bytes or quantity of results accumulated. This field
      +     * will true when more results exist in the current region.
      +     * 
      + */ + public boolean hasMoreResultsInRegion() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional bool more_results_in_region = 8; + * + *
      +     * A server may choose to limit the number of results returned to the client for
      +     * reasons such as the size in bytes or quantity of results accumulated. This field
      +     * will true when more results exist in the current region.
      +     * 
      + */ + public boolean getMoreResultsInRegion() { + return moreResultsInRegion_; + } + private void initFields() { cellsPerResult_ = java.util.Collections.emptyList(); scannerId_ = 0L; @@ -17847,6 +18259,8 @@ public final class ClientProtos { ttl_ = 0; results_ = java.util.Collections.emptyList(); stale_ = false; + partialFlagPerResult_ = java.util.Collections.emptyList(); + moreResultsInRegion_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -17878,6 +18292,12 @@ public final class ClientProtos { if (((bitField0_ & 0x00000008) == 0x00000008)) { output.writeBool(6, stale_); } + for (int i = 0; i < partialFlagPerResult_.size(); i++) { + output.writeBool(7, partialFlagPerResult_.get(i)); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeBool(8, moreResultsInRegion_); + } getUnknownFields().writeTo(output); } @@ -17916,6 +18336,16 @@ public final class ClientProtos { size += com.google.protobuf.CodedOutputStream .computeBoolSize(6, stale_); } + { + int dataSize = 0; + dataSize = 1 * getPartialFlagPerResultList().size(); + size += dataSize; + size += 1 * getPartialFlagPerResultList().size(); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(8, moreResultsInRegion_); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -17963,6 +18393,13 @@ public final class ClientProtos { result = result && (getStale() == other.getStale()); } + result = result && getPartialFlagPerResultList() + .equals(other.getPartialFlagPerResultList()); + result = result && (hasMoreResultsInRegion() == other.hasMoreResultsInRegion()); + if (hasMoreResultsInRegion()) { + result = result && (getMoreResultsInRegion() + == other.getMoreResultsInRegion()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -18000,6 +18437,14 @@ public final class ClientProtos { hash = (37 * hash) + STALE_FIELD_NUMBER; hash = (53 * hash) + hashBoolean(getStale()); } + if (getPartialFlagPerResultCount() > 0) { + hash = (37 * hash) + PARTIAL_FLAG_PER_RESULT_FIELD_NUMBER; + hash = (53 * hash) + getPartialFlagPerResultList().hashCode(); + } + if (hasMoreResultsInRegion()) { + hash = (37 * hash) + MORE_RESULTS_IN_REGION_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getMoreResultsInRegion()); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -18132,6 +18577,10 @@ public final class ClientProtos { } stale_ = false; bitField0_ = (bitField0_ & ~0x00000020); + partialFlagPerResult_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000040); + moreResultsInRegion_ = false; + bitField0_ = (bitField0_ & ~0x00000080); return this; } @@ -18190,6 +18639,15 @@ public final class ClientProtos { to_bitField0_ |= 0x00000008; } result.stale_ = stale_; + if (((bitField0_ & 0x00000040) == 0x00000040)) { + partialFlagPerResult_ = java.util.Collections.unmodifiableList(partialFlagPerResult_); + bitField0_ = (bitField0_ & ~0x00000040); + } + result.partialFlagPerResult_ = partialFlagPerResult_; + if (((from_bitField0_ & 0x00000080) == 0x00000080)) { + to_bitField0_ |= 0x00000010; + } + result.moreResultsInRegion_ = moreResultsInRegion_; result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -18254,6 +18712,19 @@ public final class ClientProtos { if (other.hasStale()) { setStale(other.getStale()); } + if (!other.partialFlagPerResult_.isEmpty()) { + if (partialFlagPerResult_.isEmpty()) { + partialFlagPerResult_ = other.partialFlagPerResult_; + bitField0_ = (bitField0_ & ~0x00000040); + } else { + ensurePartialFlagPerResultIsMutable(); + partialFlagPerResult_.addAll(other.partialFlagPerResult_); + } + onChanged(); + } + if (other.hasMoreResultsInRegion()) { + setMoreResultsInRegion(other.getMoreResultsInRegion()); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -18897,6 +19368,199 @@ public final class ClientProtos { return this; } + // repeated bool partial_flag_per_result = 7; + private java.util.List partialFlagPerResult_ = java.util.Collections.emptyList(); + private void ensurePartialFlagPerResultIsMutable() { + if (!((bitField0_ & 0x00000040) == 0x00000040)) { + partialFlagPerResult_ = new java.util.ArrayList(partialFlagPerResult_); + bitField0_ |= 0x00000040; + } + } + /** + * repeated bool partial_flag_per_result = 7; + * + *
      +       * This field is filled in if we are doing cellblocks. In the event that a row
      +       * could not fit all of its cells into a single RPC chunk, the results will be
      +       * returned as partials, and reconstructed into a complete result on the client
      +       * side. This field is a list of flags indicating whether or not the result
      +       * that the cells belong to is a partial result. For example, if this field
      +       * has false, false, true in it, then we know that on the client side, we need to
      +       * make another RPC request since the last result was only a partial.
      +       * 
      + */ + public java.util.List + getPartialFlagPerResultList() { + return java.util.Collections.unmodifiableList(partialFlagPerResult_); + } + /** + * repeated bool partial_flag_per_result = 7; + * + *
      +       * This field is filled in if we are doing cellblocks. In the event that a row
      +       * could not fit all of its cells into a single RPC chunk, the results will be
      +       * returned as partials, and reconstructed into a complete result on the client
      +       * side. This field is a list of flags indicating whether or not the result
      +       * that the cells belong to is a partial result. For example, if this field
      +       * has false, false, true in it, then we know that on the client side, we need to
      +       * make another RPC request since the last result was only a partial.
      +       * 
      + */ + public int getPartialFlagPerResultCount() { + return partialFlagPerResult_.size(); + } + /** + * repeated bool partial_flag_per_result = 7; + * + *
      +       * This field is filled in if we are doing cellblocks. In the event that a row
      +       * could not fit all of its cells into a single RPC chunk, the results will be
      +       * returned as partials, and reconstructed into a complete result on the client
      +       * side. This field is a list of flags indicating whether or not the result
      +       * that the cells belong to is a partial result. For example, if this field
      +       * has false, false, true in it, then we know that on the client side, we need to
      +       * make another RPC request since the last result was only a partial.
      +       * 
      + */ + public boolean getPartialFlagPerResult(int index) { + return partialFlagPerResult_.get(index); + } + /** + * repeated bool partial_flag_per_result = 7; + * + *
      +       * This field is filled in if we are doing cellblocks. In the event that a row
      +       * could not fit all of its cells into a single RPC chunk, the results will be
      +       * returned as partials, and reconstructed into a complete result on the client
      +       * side. This field is a list of flags indicating whether or not the result
      +       * that the cells belong to is a partial result. For example, if this field
      +       * has false, false, true in it, then we know that on the client side, we need to
      +       * make another RPC request since the last result was only a partial.
      +       * 
      + */ + public Builder setPartialFlagPerResult( + int index, boolean value) { + ensurePartialFlagPerResultIsMutable(); + partialFlagPerResult_.set(index, value); + onChanged(); + return this; + } + /** + * repeated bool partial_flag_per_result = 7; + * + *
      +       * This field is filled in if we are doing cellblocks. In the event that a row
      +       * could not fit all of its cells into a single RPC chunk, the results will be
      +       * returned as partials, and reconstructed into a complete result on the client
      +       * side. This field is a list of flags indicating whether or not the result
      +       * that the cells belong to is a partial result. For example, if this field
      +       * has false, false, true in it, then we know that on the client side, we need to
      +       * make another RPC request since the last result was only a partial.
      +       * 
      + */ + public Builder addPartialFlagPerResult(boolean value) { + ensurePartialFlagPerResultIsMutable(); + partialFlagPerResult_.add(value); + onChanged(); + return this; + } + /** + * repeated bool partial_flag_per_result = 7; + * + *
      +       * This field is filled in if we are doing cellblocks. In the event that a row
      +       * could not fit all of its cells into a single RPC chunk, the results will be
      +       * returned as partials, and reconstructed into a complete result on the client
      +       * side. This field is a list of flags indicating whether or not the result
      +       * that the cells belong to is a partial result. For example, if this field
      +       * has false, false, true in it, then we know that on the client side, we need to
      +       * make another RPC request since the last result was only a partial.
      +       * 
      + */ + public Builder addAllPartialFlagPerResult( + java.lang.Iterable values) { + ensurePartialFlagPerResultIsMutable(); + super.addAll(values, partialFlagPerResult_); + onChanged(); + return this; + } + /** + * repeated bool partial_flag_per_result = 7; + * + *
      +       * This field is filled in if we are doing cellblocks. In the event that a row
      +       * could not fit all of its cells into a single RPC chunk, the results will be
      +       * returned as partials, and reconstructed into a complete result on the client
      +       * side. This field is a list of flags indicating whether or not the result
      +       * that the cells belong to is a partial result. For example, if this field
      +       * has false, false, true in it, then we know that on the client side, we need to
      +       * make another RPC request since the last result was only a partial.
      +       * 
      + */ + public Builder clearPartialFlagPerResult() { + partialFlagPerResult_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000040); + onChanged(); + return this; + } + + // optional bool more_results_in_region = 8; + private boolean moreResultsInRegion_ ; + /** + * optional bool more_results_in_region = 8; + * + *
      +       * A server may choose to limit the number of results returned to the client for
      +       * reasons such as the size in bytes or quantity of results accumulated. This field
      +       * will true when more results exist in the current region.
      +       * 
      + */ + public boolean hasMoreResultsInRegion() { + return ((bitField0_ & 0x00000080) == 0x00000080); + } + /** + * optional bool more_results_in_region = 8; + * + *
      +       * A server may choose to limit the number of results returned to the client for
      +       * reasons such as the size in bytes or quantity of results accumulated. This field
      +       * will true when more results exist in the current region.
      +       * 
      + */ + public boolean getMoreResultsInRegion() { + return moreResultsInRegion_; + } + /** + * optional bool more_results_in_region = 8; + * + *
      +       * A server may choose to limit the number of results returned to the client for
      +       * reasons such as the size in bytes or quantity of results accumulated. This field
      +       * will true when more results exist in the current region.
      +       * 
      + */ + public Builder setMoreResultsInRegion(boolean value) { + bitField0_ |= 0x00000080; + moreResultsInRegion_ = value; + onChanged(); + return this; + } + /** + * optional bool more_results_in_region = 8; + * + *
      +       * A server may choose to limit the number of results returned to the client for
      +       * reasons such as the size in bytes or quantity of results accumulated. This field
      +       * will true when more results exist in the current region.
      +       * 
      + */ + public Builder clearMoreResultsInRegion() { + bitField0_ = (bitField0_ & ~0x00000080); + moreResultsInRegion_ = false; + onChanged(); + return this; + } + // @@protoc_insertion_point(builder_scope:ScanResponse) } @@ -31980,105 +32644,109 @@ public final class ClientProtos { "\024\n\014store_offset\030\t \001(\r\022\035\n\016existence_only\030" + "\n \001(\010:\005false\022!\n\022closest_row_before\030\013 \001(\010" + ":\005false\022)\n\013consistency\030\014 \001(\0162\014.Consisten" + - "cy:\006STRONG\"b\n\006Result\022\023\n\004cell\030\001 \003(\0132\005.Cel" + + "cy:\006STRONG\"z\n\006Result\022\023\n\004cell\030\001 \003(\0132\005.Cel" + "l\022\035\n\025associated_cell_count\030\002 \001(\005\022\016\n\006exis" + - "ts\030\003 \001(\010\022\024\n\005stale\030\004 \001(\010:\005false\"A\n\nGetReq" + - "uest\022 \n\006region\030\001 \002(\0132\020.RegionSpecifier\022\021" + - "\n\003get\030\002 \002(\0132\004.Get\"&\n\013GetResponse\022\027\n\006resu" + - "lt\030\001 \001(\0132\007.Result\"\200\001\n\tCondition\022\013\n\003row\030\001" + - " \002(\014\022\016\n\006family\030\002 \002(\014\022\021\n\tqualifier\030\003 \002(\014\022", - "\"\n\014compare_type\030\004 \002(\0162\014.CompareType\022\037\n\nc" + - "omparator\030\005 \002(\0132\013.Comparator\"\265\006\n\rMutatio" + - "nProto\022\013\n\003row\030\001 \001(\014\0220\n\013mutate_type\030\002 \001(\016" + - "2\033.MutationProto.MutationType\0220\n\014column_" + - "value\030\003 \003(\0132\032.MutationProto.ColumnValue\022" + - "\021\n\ttimestamp\030\004 \001(\004\022!\n\tattribute\030\005 \003(\0132\016." + - "NameBytesPair\022:\n\ndurability\030\006 \001(\0162\031.Muta" + - "tionProto.Durability:\013USE_DEFAULT\022\036\n\ntim" + - "e_range\030\007 \001(\0132\n.TimeRange\022\035\n\025associated_" + - "cell_count\030\010 \001(\005\022\r\n\005nonce\030\t \001(\004\032\347\001\n\013Colu", - "mnValue\022\016\n\006family\030\001 \002(\014\022B\n\017qualifier_val" + - "ue\030\002 \003(\0132).MutationProto.ColumnValue.Qua" + - "lifierValue\032\203\001\n\016QualifierValue\022\021\n\tqualif" + - "ier\030\001 \001(\014\022\r\n\005value\030\002 \001(\014\022\021\n\ttimestamp\030\003 " + - "\001(\004\022.\n\013delete_type\030\004 \001(\0162\031.MutationProto" + - ".DeleteType\022\014\n\004tags\030\005 \001(\014\"W\n\nDurability\022" + - "\017\n\013USE_DEFAULT\020\000\022\014\n\010SKIP_WAL\020\001\022\r\n\tASYNC_" + - "WAL\020\002\022\014\n\010SYNC_WAL\020\003\022\r\n\tFSYNC_WAL\020\004\">\n\014Mu" + - "tationType\022\n\n\006APPEND\020\000\022\r\n\tINCREMENT\020\001\022\007\n" + - "\003PUT\020\002\022\n\n\006DELETE\020\003\"p\n\nDeleteType\022\026\n\022DELE", - "TE_ONE_VERSION\020\000\022\034\n\030DELETE_MULTIPLE_VERS" + - "IONS\020\001\022\021\n\rDELETE_FAMILY\020\002\022\031\n\025DELETE_FAMI" + - "LY_VERSION\020\003\"\207\001\n\rMutateRequest\022 \n\006region" + - "\030\001 \002(\0132\020.RegionSpecifier\022 \n\010mutation\030\002 \002" + - "(\0132\016.MutationProto\022\035\n\tcondition\030\003 \001(\0132\n." + - "Condition\022\023\n\013nonce_group\030\004 \001(\004\"<\n\016Mutate" + - "Response\022\027\n\006result\030\001 \001(\0132\007.Result\022\021\n\tpro" + - "cessed\030\002 \001(\010\"\271\003\n\004Scan\022\027\n\006column\030\001 \003(\0132\007." + - "Column\022!\n\tattribute\030\002 \003(\0132\016.NameBytesPai" + - "r\022\021\n\tstart_row\030\003 \001(\014\022\020\n\010stop_row\030\004 \001(\014\022\027", - "\n\006filter\030\005 \001(\0132\007.Filter\022\036\n\ntime_range\030\006 " + - "\001(\0132\n.TimeRange\022\027\n\014max_versions\030\007 \001(\r:\0011" + - "\022\032\n\014cache_blocks\030\010 \001(\010:\004true\022\022\n\nbatch_si" + - "ze\030\t \001(\r\022\027\n\017max_result_size\030\n \001(\004\022\023\n\013sto" + - "re_limit\030\013 \001(\r\022\024\n\014store_offset\030\014 \001(\r\022&\n\036" + - "load_column_families_on_demand\030\r \001(\010\022\r\n\005" + - "small\030\016 \001(\010\022\027\n\010reversed\030\017 \001(\010:\005false\022)\n\013" + - "consistency\030\020 \001(\0162\014.Consistency:\006STRONG\022" + - "\017\n\007caching\030\021 \001(\r\"\236\001\n\013ScanRequest\022 \n\006regi" + - "on\030\001 \001(\0132\020.RegionSpecifier\022\023\n\004scan\030\002 \001(\013", - "2\005.Scan\022\022\n\nscanner_id\030\003 \001(\004\022\026\n\016number_of" + - "_rows\030\004 \001(\r\022\025\n\rclose_scanner\030\005 \001(\010\022\025\n\rne" + - "xt_call_seq\030\006 \001(\004\"\210\001\n\014ScanResponse\022\030\n\020ce" + - "lls_per_result\030\001 \003(\r\022\022\n\nscanner_id\030\002 \001(\004" + - "\022\024\n\014more_results\030\003 \001(\010\022\013\n\003ttl\030\004 \001(\r\022\030\n\007r" + - "esults\030\005 \003(\0132\007.Result\022\r\n\005stale\030\006 \001(\010\"\263\001\n" + - "\024BulkLoadHFileRequest\022 \n\006region\030\001 \002(\0132\020." + - "RegionSpecifier\0225\n\013family_path\030\002 \003(\0132 .B" + - "ulkLoadHFileRequest.FamilyPath\022\026\n\016assign" + - "_seq_num\030\003 \001(\010\032*\n\nFamilyPath\022\016\n\006family\030\001", - " \002(\014\022\014\n\004path\030\002 \002(\t\"\'\n\025BulkLoadHFileRespo" + - "nse\022\016\n\006loaded\030\001 \002(\010\"a\n\026CoprocessorServic" + - "eCall\022\013\n\003row\030\001 \002(\014\022\024\n\014service_name\030\002 \002(\t" + - "\022\023\n\013method_name\030\003 \002(\t\022\017\n\007request\030\004 \002(\014\"9" + - "\n\030CoprocessorServiceResult\022\035\n\005value\030\001 \001(" + - "\0132\016.NameBytesPair\"d\n\031CoprocessorServiceR" + - "equest\022 \n\006region\030\001 \002(\0132\020.RegionSpecifier" + - "\022%\n\004call\030\002 \002(\0132\027.CoprocessorServiceCall\"" + - "]\n\032CoprocessorServiceResponse\022 \n\006region\030" + - "\001 \002(\0132\020.RegionSpecifier\022\035\n\005value\030\002 \002(\0132\016", - ".NameBytesPair\"{\n\006Action\022\r\n\005index\030\001 \001(\r\022" + - " \n\010mutation\030\002 \001(\0132\016.MutationProto\022\021\n\003get" + - "\030\003 \001(\0132\004.Get\022-\n\014service_call\030\004 \001(\0132\027.Cop" + - "rocessorServiceCall\"Y\n\014RegionAction\022 \n\006r" + - "egion\030\001 \002(\0132\020.RegionSpecifier\022\016\n\006atomic\030" + - "\002 \001(\010\022\027\n\006action\030\003 \003(\0132\007.Action\"D\n\017Region" + - "LoadStats\022\027\n\014memstoreLoad\030\001 \001(\005:\0010\022\030\n\rhe" + - "apOccupancy\030\002 \001(\005:\0010\"\266\001\n\021ResultOrExcepti" + - "on\022\r\n\005index\030\001 \001(\r\022\027\n\006result\030\002 \001(\0132\007.Resu" + - "lt\022!\n\texception\030\003 \001(\0132\016.NameBytesPair\0221\n", - "\016service_result\030\004 \001(\0132\031.CoprocessorServi" + - "ceResult\022#\n\tloadStats\030\005 \001(\0132\020.RegionLoad" + - "Stats\"f\n\022RegionActionResult\022-\n\021resultOrE" + - "xception\030\001 \003(\0132\022.ResultOrException\022!\n\tex" + - "ception\030\002 \001(\0132\016.NameBytesPair\"f\n\014MultiRe" + - "quest\022#\n\014regionAction\030\001 \003(\0132\r.RegionActi" + - "on\022\022\n\nnonceGroup\030\002 \001(\004\022\035\n\tcondition\030\003 \001(" + - "\0132\n.Condition\"S\n\rMultiResponse\022/\n\022region" + - "ActionResult\030\001 \003(\0132\023.RegionActionResult\022" + - "\021\n\tprocessed\030\002 \001(\010*\'\n\013Consistency\022\n\n\006STR", - "ONG\020\000\022\014\n\010TIMELINE\020\0012\205\003\n\rClientService\022 \n" + - "\003Get\022\013.GetRequest\032\014.GetResponse\022)\n\006Mutat" + - "e\022\016.MutateRequest\032\017.MutateResponse\022#\n\004Sc" + - "an\022\014.ScanRequest\032\r.ScanResponse\022>\n\rBulkL" + - "oadHFile\022\025.BulkLoadHFileRequest\032\026.BulkLo" + - "adHFileResponse\022F\n\013ExecService\022\032.Coproce" + - "ssorServiceRequest\032\033.CoprocessorServiceR" + - "esponse\022R\n\027ExecRegionServerService\022\032.Cop" + - "rocessorServiceRequest\032\033.CoprocessorServ" + - "iceResponse\022&\n\005Multi\022\r.MultiRequest\032\016.Mu", - "ltiResponseBB\n*org.apache.hadoop.hbase.p" + - "rotobuf.generatedB\014ClientProtosH\001\210\001\001\240\001\001" + "ts\030\003 \001(\010\022\024\n\005stale\030\004 \001(\010:\005false\022\026\n\007partia" + + "l\030\005 \001(\010:\005false\"A\n\nGetRequest\022 \n\006region\030\001" + + " \002(\0132\020.RegionSpecifier\022\021\n\003get\030\002 \002(\0132\004.Ge" + + "t\"&\n\013GetResponse\022\027\n\006result\030\001 \001(\0132\007.Resul" + + "t\"\200\001\n\tCondition\022\013\n\003row\030\001 \002(\014\022\016\n\006family\030\002", + " \002(\014\022\021\n\tqualifier\030\003 \002(\014\022\"\n\014compare_type\030" + + "\004 \002(\0162\014.CompareType\022\037\n\ncomparator\030\005 \002(\0132" + + "\013.Comparator\"\265\006\n\rMutationProto\022\013\n\003row\030\001 " + + "\001(\014\0220\n\013mutate_type\030\002 \001(\0162\033.MutationProto" + + ".MutationType\0220\n\014column_value\030\003 \003(\0132\032.Mu" + + "tationProto.ColumnValue\022\021\n\ttimestamp\030\004 \001" + + "(\004\022!\n\tattribute\030\005 \003(\0132\016.NameBytesPair\022:\n" + + "\ndurability\030\006 \001(\0162\031.MutationProto.Durabi" + + "lity:\013USE_DEFAULT\022\036\n\ntime_range\030\007 \001(\0132\n." + + "TimeRange\022\035\n\025associated_cell_count\030\010 \001(\005", + "\022\r\n\005nonce\030\t \001(\004\032\347\001\n\013ColumnValue\022\016\n\006famil" + + "y\030\001 \002(\014\022B\n\017qualifier_value\030\002 \003(\0132).Mutat" + + "ionProto.ColumnValue.QualifierValue\032\203\001\n\016" + + "QualifierValue\022\021\n\tqualifier\030\001 \001(\014\022\r\n\005val" + + "ue\030\002 \001(\014\022\021\n\ttimestamp\030\003 \001(\004\022.\n\013delete_ty" + + "pe\030\004 \001(\0162\031.MutationProto.DeleteType\022\014\n\004t" + + "ags\030\005 \001(\014\"W\n\nDurability\022\017\n\013USE_DEFAULT\020\000" + + "\022\014\n\010SKIP_WAL\020\001\022\r\n\tASYNC_WAL\020\002\022\014\n\010SYNC_WA" + + "L\020\003\022\r\n\tFSYNC_WAL\020\004\">\n\014MutationType\022\n\n\006AP" + + "PEND\020\000\022\r\n\tINCREMENT\020\001\022\007\n\003PUT\020\002\022\n\n\006DELETE", + "\020\003\"p\n\nDeleteType\022\026\n\022DELETE_ONE_VERSION\020\000" + + "\022\034\n\030DELETE_MULTIPLE_VERSIONS\020\001\022\021\n\rDELETE" + + "_FAMILY\020\002\022\031\n\025DELETE_FAMILY_VERSION\020\003\"\207\001\n" + + "\rMutateRequest\022 \n\006region\030\001 \002(\0132\020.RegionS" + + "pecifier\022 \n\010mutation\030\002 \002(\0132\016.MutationPro" + + "to\022\035\n\tcondition\030\003 \001(\0132\n.Condition\022\023\n\013non" + + "ce_group\030\004 \001(\004\"<\n\016MutateResponse\022\027\n\006resu" + + "lt\030\001 \001(\0132\007.Result\022\021\n\tprocessed\030\002 \001(\010\"\271\003\n" + + "\004Scan\022\027\n\006column\030\001 \003(\0132\007.Column\022!\n\tattrib" + + "ute\030\002 \003(\0132\016.NameBytesPair\022\021\n\tstart_row\030\003", + " \001(\014\022\020\n\010stop_row\030\004 \001(\014\022\027\n\006filter\030\005 \001(\0132\007" + + ".Filter\022\036\n\ntime_range\030\006 \001(\0132\n.TimeRange\022" + + "\027\n\014max_versions\030\007 \001(\r:\0011\022\032\n\014cache_blocks" + + "\030\010 \001(\010:\004true\022\022\n\nbatch_size\030\t \001(\r\022\027\n\017max_" + + "result_size\030\n \001(\004\022\023\n\013store_limit\030\013 \001(\r\022\024" + + "\n\014store_offset\030\014 \001(\r\022&\n\036load_column_fami" + + "lies_on_demand\030\r \001(\010\022\r\n\005small\030\016 \001(\010\022\027\n\010r" + + "eversed\030\017 \001(\010:\005false\022)\n\013consistency\030\020 \001(" + + "\0162\014.Consistency:\006STRONG\022\017\n\007caching\030\021 \001(\r" + + "\"\277\001\n\013ScanRequest\022 \n\006region\030\001 \001(\0132\020.Regio", + "nSpecifier\022\023\n\004scan\030\002 \001(\0132\005.Scan\022\022\n\nscann" + + "er_id\030\003 \001(\004\022\026\n\016number_of_rows\030\004 \001(\r\022\025\n\rc" + + "lose_scanner\030\005 \001(\010\022\025\n\rnext_call_seq\030\006 \001(" + + "\004\022\037\n\027client_handles_partials\030\007 \001(\010\"\311\001\n\014S" + + "canResponse\022\030\n\020cells_per_result\030\001 \003(\r\022\022\n" + + "\nscanner_id\030\002 \001(\004\022\024\n\014more_results\030\003 \001(\010\022" + + "\013\n\003ttl\030\004 \001(\r\022\030\n\007results\030\005 \003(\0132\007.Result\022\r" + + "\n\005stale\030\006 \001(\010\022\037\n\027partial_flag_per_result" + + "\030\007 \003(\010\022\036\n\026more_results_in_region\030\010 \001(\010\"\263" + + "\001\n\024BulkLoadHFileRequest\022 \n\006region\030\001 \002(\0132", + "\020.RegionSpecifier\0225\n\013family_path\030\002 \003(\0132 " + + ".BulkLoadHFileRequest.FamilyPath\022\026\n\016assi" + + "gn_seq_num\030\003 \001(\010\032*\n\nFamilyPath\022\016\n\006family" + + "\030\001 \002(\014\022\014\n\004path\030\002 \002(\t\"\'\n\025BulkLoadHFileRes" + + "ponse\022\016\n\006loaded\030\001 \002(\010\"a\n\026CoprocessorServ" + + "iceCall\022\013\n\003row\030\001 \002(\014\022\024\n\014service_name\030\002 \002" + + "(\t\022\023\n\013method_name\030\003 \002(\t\022\017\n\007request\030\004 \002(\014" + + "\"9\n\030CoprocessorServiceResult\022\035\n\005value\030\001 " + + "\001(\0132\016.NameBytesPair\"d\n\031CoprocessorServic" + + "eRequest\022 \n\006region\030\001 \002(\0132\020.RegionSpecifi", + "er\022%\n\004call\030\002 \002(\0132\027.CoprocessorServiceCal" + + "l\"]\n\032CoprocessorServiceResponse\022 \n\006regio" + + "n\030\001 \002(\0132\020.RegionSpecifier\022\035\n\005value\030\002 \002(\013" + + "2\016.NameBytesPair\"{\n\006Action\022\r\n\005index\030\001 \001(" + + "\r\022 \n\010mutation\030\002 \001(\0132\016.MutationProto\022\021\n\003g" + + "et\030\003 \001(\0132\004.Get\022-\n\014service_call\030\004 \001(\0132\027.C" + + "oprocessorServiceCall\"Y\n\014RegionAction\022 \n" + + "\006region\030\001 \002(\0132\020.RegionSpecifier\022\016\n\006atomi" + + "c\030\002 \001(\010\022\027\n\006action\030\003 \003(\0132\007.Action\"D\n\017Regi" + + "onLoadStats\022\027\n\014memstoreLoad\030\001 \001(\005:\0010\022\030\n\r", + "heapOccupancy\030\002 \001(\005:\0010\"\266\001\n\021ResultOrExcep" + + "tion\022\r\n\005index\030\001 \001(\r\022\027\n\006result\030\002 \001(\0132\007.Re" + + "sult\022!\n\texception\030\003 \001(\0132\016.NameBytesPair\022" + + "1\n\016service_result\030\004 \001(\0132\031.CoprocessorSer" + + "viceResult\022#\n\tloadStats\030\005 \001(\0132\020.RegionLo" + + "adStats\"f\n\022RegionActionResult\022-\n\021resultO" + + "rException\030\001 \003(\0132\022.ResultOrException\022!\n\t" + + "exception\030\002 \001(\0132\016.NameBytesPair\"f\n\014Multi" + + "Request\022#\n\014regionAction\030\001 \003(\0132\r.RegionAc" + + "tion\022\022\n\nnonceGroup\030\002 \001(\004\022\035\n\tcondition\030\003 ", + "\001(\0132\n.Condition\"S\n\rMultiResponse\022/\n\022regi" + + "onActionResult\030\001 \003(\0132\023.RegionActionResul" + + "t\022\021\n\tprocessed\030\002 \001(\010*\'\n\013Consistency\022\n\n\006S" + + "TRONG\020\000\022\014\n\010TIMELINE\020\0012\205\003\n\rClientService\022" + + " \n\003Get\022\013.GetRequest\032\014.GetResponse\022)\n\006Mut" + + "ate\022\016.MutateRequest\032\017.MutateResponse\022#\n\004" + + "Scan\022\014.ScanRequest\032\r.ScanResponse\022>\n\rBul" + + "kLoadHFile\022\025.BulkLoadHFileRequest\032\026.Bulk" + + "LoadHFileResponse\022F\n\013ExecService\022\032.Copro" + + "cessorServiceRequest\032\033.CoprocessorServic", + "eResponse\022R\n\027ExecRegionServerService\022\032.C" + + "oprocessorServiceRequest\032\033.CoprocessorSe" + + "rviceResponse\022&\n\005Multi\022\r.MultiRequest\032\016." + + "MultiResponseBB\n*org.apache.hadoop.hbase" + + ".protobuf.generatedB\014ClientProtosH\001\210\001\001\240\001" + + "\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -32114,7 +32782,7 @@ public final class ClientProtos { internal_static_Result_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_Result_descriptor, - new java.lang.String[] { "Cell", "AssociatedCellCount", "Exists", "Stale", }); + new java.lang.String[] { "Cell", "AssociatedCellCount", "Exists", "Stale", "Partial", }); internal_static_GetRequest_descriptor = getDescriptor().getMessageTypes().get(5); internal_static_GetRequest_fieldAccessorTable = new @@ -32174,13 +32842,13 @@ public final class ClientProtos { internal_static_ScanRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ScanRequest_descriptor, - new java.lang.String[] { "Region", "Scan", "ScannerId", "NumberOfRows", "CloseScanner", "NextCallSeq", }); + new java.lang.String[] { "Region", "Scan", "ScannerId", "NumberOfRows", "CloseScanner", "NextCallSeq", "ClientHandlesPartials", }); internal_static_ScanResponse_descriptor = getDescriptor().getMessageTypes().get(13); internal_static_ScanResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ScanResponse_descriptor, - new java.lang.String[] { "CellsPerResult", "ScannerId", "MoreResults", "Ttl", "Results", "Stale", }); + new java.lang.String[] { "CellsPerResult", "ScannerId", "MoreResults", "Ttl", "Results", "Stale", "PartialFlagPerResult", "MoreResultsInRegion", }); internal_static_BulkLoadHFileRequest_descriptor = getDescriptor().getMessageTypes().get(14); internal_static_BulkLoadHFileRequest_fieldAccessorTable = new diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java index 6dc48fa12db..5fc4f437719 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java @@ -1865,6 +1865,1393 @@ public final class ClusterStatusProtos { // @@protoc_insertion_point(class_scope:RegionInTransition) } + public interface StoreSequenceIdOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required bytes family_name = 1; + /** + * required bytes family_name = 1; + */ + boolean hasFamilyName(); + /** + * required bytes family_name = 1; + */ + com.google.protobuf.ByteString getFamilyName(); + + // required uint64 sequence_id = 2; + /** + * required uint64 sequence_id = 2; + */ + boolean hasSequenceId(); + /** + * required uint64 sequence_id = 2; + */ + long getSequenceId(); + } + /** + * Protobuf type {@code StoreSequenceId} + * + *
      +   **
      +   * sequence Id of a store
      +   * 
      + */ + public static final class StoreSequenceId extends + com.google.protobuf.GeneratedMessage + implements StoreSequenceIdOrBuilder { + // Use StoreSequenceId.newBuilder() to construct. + private StoreSequenceId(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private StoreSequenceId(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final StoreSequenceId defaultInstance; + public static StoreSequenceId getDefaultInstance() { + return defaultInstance; + } + + public StoreSequenceId getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private StoreSequenceId( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + familyName_ = input.readBytes(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + sequenceId_ = input.readUInt64(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_StoreSequenceId_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_StoreSequenceId_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public StoreSequenceId parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new StoreSequenceId(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required bytes family_name = 1; + public static final int FAMILY_NAME_FIELD_NUMBER = 1; + private com.google.protobuf.ByteString familyName_; + /** + * required bytes family_name = 1; + */ + public boolean hasFamilyName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bytes family_name = 1; + */ + public com.google.protobuf.ByteString getFamilyName() { + return familyName_; + } + + // required uint64 sequence_id = 2; + public static final int SEQUENCE_ID_FIELD_NUMBER = 2; + private long sequenceId_; + /** + * required uint64 sequence_id = 2; + */ + public boolean hasSequenceId() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required uint64 sequence_id = 2; + */ + public long getSequenceId() { + return sequenceId_; + } + + private void initFields() { + familyName_ = com.google.protobuf.ByteString.EMPTY; + sequenceId_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasFamilyName()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasSequenceId()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, familyName_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeUInt64(2, sequenceId_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, familyName_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(2, sequenceId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId other = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId) obj; + + boolean result = true; + result = result && (hasFamilyName() == other.hasFamilyName()); + if (hasFamilyName()) { + result = result && getFamilyName() + .equals(other.getFamilyName()); + } + result = result && (hasSequenceId() == other.hasSequenceId()); + if (hasSequenceId()) { + result = result && (getSequenceId() + == other.getSequenceId()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasFamilyName()) { + hash = (37 * hash) + FAMILY_NAME_FIELD_NUMBER; + hash = (53 * hash) + getFamilyName().hashCode(); + } + if (hasSequenceId()) { + hash = (37 * hash) + SEQUENCE_ID_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getSequenceId()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code StoreSequenceId} + * + *
      +     **
      +     * sequence Id of a store
      +     * 
      + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_StoreSequenceId_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_StoreSequenceId_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + familyName_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + sequenceId_ = 0L; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_StoreSequenceId_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId build() { + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId result = new org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.familyName_ = familyName_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.sequenceId_ = sequenceId_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.getDefaultInstance()) return this; + if (other.hasFamilyName()) { + setFamilyName(other.getFamilyName()); + } + if (other.hasSequenceId()) { + setSequenceId(other.getSequenceId()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasFamilyName()) { + + return false; + } + if (!hasSequenceId()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required bytes family_name = 1; + private com.google.protobuf.ByteString familyName_ = com.google.protobuf.ByteString.EMPTY; + /** + * required bytes family_name = 1; + */ + public boolean hasFamilyName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bytes family_name = 1; + */ + public com.google.protobuf.ByteString getFamilyName() { + return familyName_; + } + /** + * required bytes family_name = 1; + */ + public Builder setFamilyName(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + familyName_ = value; + onChanged(); + return this; + } + /** + * required bytes family_name = 1; + */ + public Builder clearFamilyName() { + bitField0_ = (bitField0_ & ~0x00000001); + familyName_ = getDefaultInstance().getFamilyName(); + onChanged(); + return this; + } + + // required uint64 sequence_id = 2; + private long sequenceId_ ; + /** + * required uint64 sequence_id = 2; + */ + public boolean hasSequenceId() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required uint64 sequence_id = 2; + */ + public long getSequenceId() { + return sequenceId_; + } + /** + * required uint64 sequence_id = 2; + */ + public Builder setSequenceId(long value) { + bitField0_ |= 0x00000002; + sequenceId_ = value; + onChanged(); + return this; + } + /** + * required uint64 sequence_id = 2; + */ + public Builder clearSequenceId() { + bitField0_ = (bitField0_ & ~0x00000002); + sequenceId_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:StoreSequenceId) + } + + static { + defaultInstance = new StoreSequenceId(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:StoreSequenceId) + } + + public interface RegionStoreSequenceIdsOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required uint64 last_flushed_sequence_id = 1; + /** + * required uint64 last_flushed_sequence_id = 1; + */ + boolean hasLastFlushedSequenceId(); + /** + * required uint64 last_flushed_sequence_id = 1; + */ + long getLastFlushedSequenceId(); + + // repeated .StoreSequenceId store_sequence_id = 2; + /** + * repeated .StoreSequenceId store_sequence_id = 2; + */ + java.util.List + getStoreSequenceIdList(); + /** + * repeated .StoreSequenceId store_sequence_id = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId getStoreSequenceId(int index); + /** + * repeated .StoreSequenceId store_sequence_id = 2; + */ + int getStoreSequenceIdCount(); + /** + * repeated .StoreSequenceId store_sequence_id = 2; + */ + java.util.List + getStoreSequenceIdOrBuilderList(); + /** + * repeated .StoreSequenceId store_sequence_id = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder getStoreSequenceIdOrBuilder( + int index); + } + /** + * Protobuf type {@code RegionStoreSequenceIds} + * + *
      +   **
      +   * contains a sequence id of a region which should be the minimum of its store sequence ids and
      +   * list of sequence ids of the region's stores
      +   * 
      + */ + public static final class RegionStoreSequenceIds extends + com.google.protobuf.GeneratedMessage + implements RegionStoreSequenceIdsOrBuilder { + // Use RegionStoreSequenceIds.newBuilder() to construct. + private RegionStoreSequenceIds(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private RegionStoreSequenceIds(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final RegionStoreSequenceIds defaultInstance; + public static RegionStoreSequenceIds getDefaultInstance() { + return defaultInstance; + } + + public RegionStoreSequenceIds getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private RegionStoreSequenceIds( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + lastFlushedSequenceId_ = input.readUInt64(); + break; + } + case 18: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + storeSequenceId_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000002; + } + storeSequenceId_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + storeSequenceId_ = java.util.Collections.unmodifiableList(storeSequenceId_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_RegionStoreSequenceIds_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_RegionStoreSequenceIds_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public RegionStoreSequenceIds parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new RegionStoreSequenceIds(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required uint64 last_flushed_sequence_id = 1; + public static final int LAST_FLUSHED_SEQUENCE_ID_FIELD_NUMBER = 1; + private long lastFlushedSequenceId_; + /** + * required uint64 last_flushed_sequence_id = 1; + */ + public boolean hasLastFlushedSequenceId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required uint64 last_flushed_sequence_id = 1; + */ + public long getLastFlushedSequenceId() { + return lastFlushedSequenceId_; + } + + // repeated .StoreSequenceId store_sequence_id = 2; + public static final int STORE_SEQUENCE_ID_FIELD_NUMBER = 2; + private java.util.List storeSequenceId_; + /** + * repeated .StoreSequenceId store_sequence_id = 2; + */ + public java.util.List getStoreSequenceIdList() { + return storeSequenceId_; + } + /** + * repeated .StoreSequenceId store_sequence_id = 2; + */ + public java.util.List + getStoreSequenceIdOrBuilderList() { + return storeSequenceId_; + } + /** + * repeated .StoreSequenceId store_sequence_id = 2; + */ + public int getStoreSequenceIdCount() { + return storeSequenceId_.size(); + } + /** + * repeated .StoreSequenceId store_sequence_id = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId getStoreSequenceId(int index) { + return storeSequenceId_.get(index); + } + /** + * repeated .StoreSequenceId store_sequence_id = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder getStoreSequenceIdOrBuilder( + int index) { + return storeSequenceId_.get(index); + } + + private void initFields() { + lastFlushedSequenceId_ = 0L; + storeSequenceId_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasLastFlushedSequenceId()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getStoreSequenceIdCount(); i++) { + if (!getStoreSequenceId(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeUInt64(1, lastFlushedSequenceId_); + } + for (int i = 0; i < storeSequenceId_.size(); i++) { + output.writeMessage(2, storeSequenceId_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(1, lastFlushedSequenceId_); + } + for (int i = 0; i < storeSequenceId_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, storeSequenceId_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds other = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds) obj; + + boolean result = true; + result = result && (hasLastFlushedSequenceId() == other.hasLastFlushedSequenceId()); + if (hasLastFlushedSequenceId()) { + result = result && (getLastFlushedSequenceId() + == other.getLastFlushedSequenceId()); + } + result = result && getStoreSequenceIdList() + .equals(other.getStoreSequenceIdList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasLastFlushedSequenceId()) { + hash = (37 * hash) + LAST_FLUSHED_SEQUENCE_ID_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getLastFlushedSequenceId()); + } + if (getStoreSequenceIdCount() > 0) { + hash = (37 * hash) + STORE_SEQUENCE_ID_FIELD_NUMBER; + hash = (53 * hash) + getStoreSequenceIdList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code RegionStoreSequenceIds} + * + *
      +     **
      +     * contains a sequence id of a region which should be the minimum of its store sequence ids and
      +     * list of sequence ids of the region's stores
      +     * 
      + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIdsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_RegionStoreSequenceIds_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_RegionStoreSequenceIds_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getStoreSequenceIdFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + lastFlushedSequenceId_ = 0L; + bitField0_ = (bitField0_ & ~0x00000001); + if (storeSequenceIdBuilder_ == null) { + storeSequenceId_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + } else { + storeSequenceIdBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_RegionStoreSequenceIds_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds build() { + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds result = new org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.lastFlushedSequenceId_ = lastFlushedSequenceId_; + if (storeSequenceIdBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { + storeSequenceId_ = java.util.Collections.unmodifiableList(storeSequenceId_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.storeSequenceId_ = storeSequenceId_; + } else { + result.storeSequenceId_ = storeSequenceIdBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds.getDefaultInstance()) return this; + if (other.hasLastFlushedSequenceId()) { + setLastFlushedSequenceId(other.getLastFlushedSequenceId()); + } + if (storeSequenceIdBuilder_ == null) { + if (!other.storeSequenceId_.isEmpty()) { + if (storeSequenceId_.isEmpty()) { + storeSequenceId_ = other.storeSequenceId_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureStoreSequenceIdIsMutable(); + storeSequenceId_.addAll(other.storeSequenceId_); + } + onChanged(); + } + } else { + if (!other.storeSequenceId_.isEmpty()) { + if (storeSequenceIdBuilder_.isEmpty()) { + storeSequenceIdBuilder_.dispose(); + storeSequenceIdBuilder_ = null; + storeSequenceId_ = other.storeSequenceId_; + bitField0_ = (bitField0_ & ~0x00000002); + storeSequenceIdBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getStoreSequenceIdFieldBuilder() : null; + } else { + storeSequenceIdBuilder_.addAllMessages(other.storeSequenceId_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasLastFlushedSequenceId()) { + + return false; + } + for (int i = 0; i < getStoreSequenceIdCount(); i++) { + if (!getStoreSequenceId(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required uint64 last_flushed_sequence_id = 1; + private long lastFlushedSequenceId_ ; + /** + * required uint64 last_flushed_sequence_id = 1; + */ + public boolean hasLastFlushedSequenceId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required uint64 last_flushed_sequence_id = 1; + */ + public long getLastFlushedSequenceId() { + return lastFlushedSequenceId_; + } + /** + * required uint64 last_flushed_sequence_id = 1; + */ + public Builder setLastFlushedSequenceId(long value) { + bitField0_ |= 0x00000001; + lastFlushedSequenceId_ = value; + onChanged(); + return this; + } + /** + * required uint64 last_flushed_sequence_id = 1; + */ + public Builder clearLastFlushedSequenceId() { + bitField0_ = (bitField0_ & ~0x00000001); + lastFlushedSequenceId_ = 0L; + onChanged(); + return this; + } + + // repeated .StoreSequenceId store_sequence_id = 2; + private java.util.List storeSequenceId_ = + java.util.Collections.emptyList(); + private void ensureStoreSequenceIdIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + storeSequenceId_ = new java.util.ArrayList(storeSequenceId_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder> storeSequenceIdBuilder_; + + /** + * repeated .StoreSequenceId store_sequence_id = 2; + */ + public java.util.List getStoreSequenceIdList() { + if (storeSequenceIdBuilder_ == null) { + return java.util.Collections.unmodifiableList(storeSequenceId_); + } else { + return storeSequenceIdBuilder_.getMessageList(); + } + } + /** + * repeated .StoreSequenceId store_sequence_id = 2; + */ + public int getStoreSequenceIdCount() { + if (storeSequenceIdBuilder_ == null) { + return storeSequenceId_.size(); + } else { + return storeSequenceIdBuilder_.getCount(); + } + } + /** + * repeated .StoreSequenceId store_sequence_id = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId getStoreSequenceId(int index) { + if (storeSequenceIdBuilder_ == null) { + return storeSequenceId_.get(index); + } else { + return storeSequenceIdBuilder_.getMessage(index); + } + } + /** + * repeated .StoreSequenceId store_sequence_id = 2; + */ + public Builder setStoreSequenceId( + int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId value) { + if (storeSequenceIdBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStoreSequenceIdIsMutable(); + storeSequenceId_.set(index, value); + onChanged(); + } else { + storeSequenceIdBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .StoreSequenceId store_sequence_id = 2; + */ + public Builder setStoreSequenceId( + int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder builderForValue) { + if (storeSequenceIdBuilder_ == null) { + ensureStoreSequenceIdIsMutable(); + storeSequenceId_.set(index, builderForValue.build()); + onChanged(); + } else { + storeSequenceIdBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .StoreSequenceId store_sequence_id = 2; + */ + public Builder addStoreSequenceId(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId value) { + if (storeSequenceIdBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStoreSequenceIdIsMutable(); + storeSequenceId_.add(value); + onChanged(); + } else { + storeSequenceIdBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .StoreSequenceId store_sequence_id = 2; + */ + public Builder addStoreSequenceId( + int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId value) { + if (storeSequenceIdBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStoreSequenceIdIsMutable(); + storeSequenceId_.add(index, value); + onChanged(); + } else { + storeSequenceIdBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .StoreSequenceId store_sequence_id = 2; + */ + public Builder addStoreSequenceId( + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder builderForValue) { + if (storeSequenceIdBuilder_ == null) { + ensureStoreSequenceIdIsMutable(); + storeSequenceId_.add(builderForValue.build()); + onChanged(); + } else { + storeSequenceIdBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .StoreSequenceId store_sequence_id = 2; + */ + public Builder addStoreSequenceId( + int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder builderForValue) { + if (storeSequenceIdBuilder_ == null) { + ensureStoreSequenceIdIsMutable(); + storeSequenceId_.add(index, builderForValue.build()); + onChanged(); + } else { + storeSequenceIdBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .StoreSequenceId store_sequence_id = 2; + */ + public Builder addAllStoreSequenceId( + java.lang.Iterable values) { + if (storeSequenceIdBuilder_ == null) { + ensureStoreSequenceIdIsMutable(); + super.addAll(values, storeSequenceId_); + onChanged(); + } else { + storeSequenceIdBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .StoreSequenceId store_sequence_id = 2; + */ + public Builder clearStoreSequenceId() { + if (storeSequenceIdBuilder_ == null) { + storeSequenceId_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + storeSequenceIdBuilder_.clear(); + } + return this; + } + /** + * repeated .StoreSequenceId store_sequence_id = 2; + */ + public Builder removeStoreSequenceId(int index) { + if (storeSequenceIdBuilder_ == null) { + ensureStoreSequenceIdIsMutable(); + storeSequenceId_.remove(index); + onChanged(); + } else { + storeSequenceIdBuilder_.remove(index); + } + return this; + } + /** + * repeated .StoreSequenceId store_sequence_id = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder getStoreSequenceIdBuilder( + int index) { + return getStoreSequenceIdFieldBuilder().getBuilder(index); + } + /** + * repeated .StoreSequenceId store_sequence_id = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder getStoreSequenceIdOrBuilder( + int index) { + if (storeSequenceIdBuilder_ == null) { + return storeSequenceId_.get(index); } else { + return storeSequenceIdBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .StoreSequenceId store_sequence_id = 2; + */ + public java.util.List + getStoreSequenceIdOrBuilderList() { + if (storeSequenceIdBuilder_ != null) { + return storeSequenceIdBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(storeSequenceId_); + } + } + /** + * repeated .StoreSequenceId store_sequence_id = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder addStoreSequenceIdBuilder() { + return getStoreSequenceIdFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.getDefaultInstance()); + } + /** + * repeated .StoreSequenceId store_sequence_id = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder addStoreSequenceIdBuilder( + int index) { + return getStoreSequenceIdFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.getDefaultInstance()); + } + /** + * repeated .StoreSequenceId store_sequence_id = 2; + */ + public java.util.List + getStoreSequenceIdBuilderList() { + return getStoreSequenceIdFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder> + getStoreSequenceIdFieldBuilder() { + if (storeSequenceIdBuilder_ == null) { + storeSequenceIdBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder>( + storeSequenceId_, + ((bitField0_ & 0x00000002) == 0x00000002), + getParentForChildren(), + isClean()); + storeSequenceId_ = null; + } + return storeSequenceIdBuilder_; + } + + // @@protoc_insertion_point(builder_scope:RegionStoreSequenceIds) + } + + static { + defaultInstance = new RegionStoreSequenceIds(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:RegionStoreSequenceIds) + } + public interface RegionLoadOrBuilder extends com.google.protobuf.MessageOrBuilder { @@ -2181,6 +3568,51 @@ public final class ClusterStatusProtos { * optional uint64 last_major_compaction_ts = 17 [default = 0]; */ long getLastMajorCompactionTs(); + + // repeated .StoreSequenceId store_complete_sequence_id = 18; + /** + * repeated .StoreSequenceId store_complete_sequence_id = 18; + * + *
      +     ** the most recent sequence Id of store from cache flush 
      +     * 
      + */ + java.util.List + getStoreCompleteSequenceIdList(); + /** + * repeated .StoreSequenceId store_complete_sequence_id = 18; + * + *
      +     ** the most recent sequence Id of store from cache flush 
      +     * 
      + */ + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId getStoreCompleteSequenceId(int index); + /** + * repeated .StoreSequenceId store_complete_sequence_id = 18; + * + *
      +     ** the most recent sequence Id of store from cache flush 
      +     * 
      + */ + int getStoreCompleteSequenceIdCount(); + /** + * repeated .StoreSequenceId store_complete_sequence_id = 18; + * + *
      +     ** the most recent sequence Id of store from cache flush 
      +     * 
      + */ + java.util.List + getStoreCompleteSequenceIdOrBuilderList(); + /** + * repeated .StoreSequenceId store_complete_sequence_id = 18; + * + *
      +     ** the most recent sequence Id of store from cache flush 
      +     * 
      + */ + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder getStoreCompleteSequenceIdOrBuilder( + int index); } /** * Protobuf type {@code RegionLoad} @@ -2326,6 +3758,14 @@ public final class ClusterStatusProtos { lastMajorCompactionTs_ = input.readUInt64(); break; } + case 146: { + if (!((mutable_bitField0_ & 0x00020000) == 0x00020000)) { + storeCompleteSequenceId_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00020000; + } + storeCompleteSequenceId_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.PARSER, extensionRegistry)); + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -2334,6 +3774,9 @@ public final class ClusterStatusProtos { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { + if (((mutable_bitField0_ & 0x00020000) == 0x00020000)) { + storeCompleteSequenceId_ = java.util.Collections.unmodifiableList(storeCompleteSequenceId_); + } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } @@ -2784,6 +4227,62 @@ public final class ClusterStatusProtos { return lastMajorCompactionTs_; } + // repeated .StoreSequenceId store_complete_sequence_id = 18; + public static final int STORE_COMPLETE_SEQUENCE_ID_FIELD_NUMBER = 18; + private java.util.List storeCompleteSequenceId_; + /** + * repeated .StoreSequenceId store_complete_sequence_id = 18; + * + *
      +     ** the most recent sequence Id of store from cache flush 
      +     * 
      + */ + public java.util.List getStoreCompleteSequenceIdList() { + return storeCompleteSequenceId_; + } + /** + * repeated .StoreSequenceId store_complete_sequence_id = 18; + * + *
      +     ** the most recent sequence Id of store from cache flush 
      +     * 
      + */ + public java.util.List + getStoreCompleteSequenceIdOrBuilderList() { + return storeCompleteSequenceId_; + } + /** + * repeated .StoreSequenceId store_complete_sequence_id = 18; + * + *
      +     ** the most recent sequence Id of store from cache flush 
      +     * 
      + */ + public int getStoreCompleteSequenceIdCount() { + return storeCompleteSequenceId_.size(); + } + /** + * repeated .StoreSequenceId store_complete_sequence_id = 18; + * + *
      +     ** the most recent sequence Id of store from cache flush 
      +     * 
      + */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId getStoreCompleteSequenceId(int index) { + return storeCompleteSequenceId_.get(index); + } + /** + * repeated .StoreSequenceId store_complete_sequence_id = 18; + * + *
      +     ** the most recent sequence Id of store from cache flush 
      +     * 
      + */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder getStoreCompleteSequenceIdOrBuilder( + int index) { + return storeCompleteSequenceId_.get(index); + } + private void initFields() { regionSpecifier_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance(); stores_ = 0; @@ -2802,6 +4301,7 @@ public final class ClusterStatusProtos { completeSequenceId_ = 0L; dataLocality_ = 0F; lastMajorCompactionTs_ = 0L; + storeCompleteSequenceId_ = java.util.Collections.emptyList(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -2816,6 +4316,12 @@ public final class ClusterStatusProtos { memoizedIsInitialized = 0; return false; } + for (int i = 0; i < getStoreCompleteSequenceIdCount(); i++) { + if (!getStoreCompleteSequenceId(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } memoizedIsInitialized = 1; return true; } @@ -2874,6 +4380,9 @@ public final class ClusterStatusProtos { if (((bitField0_ & 0x00010000) == 0x00010000)) { output.writeUInt64(17, lastMajorCompactionTs_); } + for (int i = 0; i < storeCompleteSequenceId_.size(); i++) { + output.writeMessage(18, storeCompleteSequenceId_.get(i)); + } getUnknownFields().writeTo(output); } @@ -2951,6 +4460,10 @@ public final class ClusterStatusProtos { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(17, lastMajorCompactionTs_); } + for (int i = 0; i < storeCompleteSequenceId_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(18, storeCompleteSequenceId_.get(i)); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -3058,6 +4571,8 @@ public final class ClusterStatusProtos { result = result && (getLastMajorCompactionTs() == other.getLastMajorCompactionTs()); } + result = result && getStoreCompleteSequenceIdList() + .equals(other.getStoreCompleteSequenceIdList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -3140,6 +4655,10 @@ public final class ClusterStatusProtos { hash = (37 * hash) + LAST_MAJOR_COMPACTION_TS_FIELD_NUMBER; hash = (53 * hash) + hashLong(getLastMajorCompactionTs()); } + if (getStoreCompleteSequenceIdCount() > 0) { + hash = (37 * hash) + STORE_COMPLETE_SEQUENCE_ID_FIELD_NUMBER; + hash = (53 * hash) + getStoreCompleteSequenceIdList().hashCode(); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -3242,6 +4761,7 @@ public final class ClusterStatusProtos { private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getRegionSpecifierFieldBuilder(); + getStoreCompleteSequenceIdFieldBuilder(); } } private static Builder create() { @@ -3288,6 +4808,12 @@ public final class ClusterStatusProtos { bitField0_ = (bitField0_ & ~0x00008000); lastMajorCompactionTs_ = 0L; bitField0_ = (bitField0_ & ~0x00010000); + if (storeCompleteSequenceIdBuilder_ == null) { + storeCompleteSequenceId_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00020000); + } else { + storeCompleteSequenceIdBuilder_.clear(); + } return this; } @@ -3388,6 +4914,15 @@ public final class ClusterStatusProtos { to_bitField0_ |= 0x00010000; } result.lastMajorCompactionTs_ = lastMajorCompactionTs_; + if (storeCompleteSequenceIdBuilder_ == null) { + if (((bitField0_ & 0x00020000) == 0x00020000)) { + storeCompleteSequenceId_ = java.util.Collections.unmodifiableList(storeCompleteSequenceId_); + bitField0_ = (bitField0_ & ~0x00020000); + } + result.storeCompleteSequenceId_ = storeCompleteSequenceId_; + } else { + result.storeCompleteSequenceId_ = storeCompleteSequenceIdBuilder_.build(); + } result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -3455,6 +4990,32 @@ public final class ClusterStatusProtos { if (other.hasLastMajorCompactionTs()) { setLastMajorCompactionTs(other.getLastMajorCompactionTs()); } + if (storeCompleteSequenceIdBuilder_ == null) { + if (!other.storeCompleteSequenceId_.isEmpty()) { + if (storeCompleteSequenceId_.isEmpty()) { + storeCompleteSequenceId_ = other.storeCompleteSequenceId_; + bitField0_ = (bitField0_ & ~0x00020000); + } else { + ensureStoreCompleteSequenceIdIsMutable(); + storeCompleteSequenceId_.addAll(other.storeCompleteSequenceId_); + } + onChanged(); + } + } else { + if (!other.storeCompleteSequenceId_.isEmpty()) { + if (storeCompleteSequenceIdBuilder_.isEmpty()) { + storeCompleteSequenceIdBuilder_.dispose(); + storeCompleteSequenceIdBuilder_ = null; + storeCompleteSequenceId_ = other.storeCompleteSequenceId_; + bitField0_ = (bitField0_ & ~0x00020000); + storeCompleteSequenceIdBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getStoreCompleteSequenceIdFieldBuilder() : null; + } else { + storeCompleteSequenceIdBuilder_.addAllMessages(other.storeCompleteSequenceId_); + } + } + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -3468,6 +5029,12 @@ public final class ClusterStatusProtos { return false; } + for (int i = 0; i < getStoreCompleteSequenceIdCount(); i++) { + if (!getStoreCompleteSequenceId(i).isInitialized()) { + + return false; + } + } return true; } @@ -4427,6 +5994,318 @@ public final class ClusterStatusProtos { return this; } + // repeated .StoreSequenceId store_complete_sequence_id = 18; + private java.util.List storeCompleteSequenceId_ = + java.util.Collections.emptyList(); + private void ensureStoreCompleteSequenceIdIsMutable() { + if (!((bitField0_ & 0x00020000) == 0x00020000)) { + storeCompleteSequenceId_ = new java.util.ArrayList(storeCompleteSequenceId_); + bitField0_ |= 0x00020000; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder> storeCompleteSequenceIdBuilder_; + + /** + * repeated .StoreSequenceId store_complete_sequence_id = 18; + * + *
      +       ** the most recent sequence Id of store from cache flush 
      +       * 
      + */ + public java.util.List getStoreCompleteSequenceIdList() { + if (storeCompleteSequenceIdBuilder_ == null) { + return java.util.Collections.unmodifiableList(storeCompleteSequenceId_); + } else { + return storeCompleteSequenceIdBuilder_.getMessageList(); + } + } + /** + * repeated .StoreSequenceId store_complete_sequence_id = 18; + * + *
      +       ** the most recent sequence Id of store from cache flush 
      +       * 
      + */ + public int getStoreCompleteSequenceIdCount() { + if (storeCompleteSequenceIdBuilder_ == null) { + return storeCompleteSequenceId_.size(); + } else { + return storeCompleteSequenceIdBuilder_.getCount(); + } + } + /** + * repeated .StoreSequenceId store_complete_sequence_id = 18; + * + *
      +       ** the most recent sequence Id of store from cache flush 
      +       * 
      + */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId getStoreCompleteSequenceId(int index) { + if (storeCompleteSequenceIdBuilder_ == null) { + return storeCompleteSequenceId_.get(index); + } else { + return storeCompleteSequenceIdBuilder_.getMessage(index); + } + } + /** + * repeated .StoreSequenceId store_complete_sequence_id = 18; + * + *
      +       ** the most recent sequence Id of store from cache flush 
      +       * 
      + */ + public Builder setStoreCompleteSequenceId( + int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId value) { + if (storeCompleteSequenceIdBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStoreCompleteSequenceIdIsMutable(); + storeCompleteSequenceId_.set(index, value); + onChanged(); + } else { + storeCompleteSequenceIdBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .StoreSequenceId store_complete_sequence_id = 18; + * + *
      +       ** the most recent sequence Id of store from cache flush 
      +       * 
      + */ + public Builder setStoreCompleteSequenceId( + int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder builderForValue) { + if (storeCompleteSequenceIdBuilder_ == null) { + ensureStoreCompleteSequenceIdIsMutable(); + storeCompleteSequenceId_.set(index, builderForValue.build()); + onChanged(); + } else { + storeCompleteSequenceIdBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .StoreSequenceId store_complete_sequence_id = 18; + * + *
      +       ** the most recent sequence Id of store from cache flush 
      +       * 
      + */ + public Builder addStoreCompleteSequenceId(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId value) { + if (storeCompleteSequenceIdBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStoreCompleteSequenceIdIsMutable(); + storeCompleteSequenceId_.add(value); + onChanged(); + } else { + storeCompleteSequenceIdBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .StoreSequenceId store_complete_sequence_id = 18; + * + *
      +       ** the most recent sequence Id of store from cache flush 
      +       * 
      + */ + public Builder addStoreCompleteSequenceId( + int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId value) { + if (storeCompleteSequenceIdBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStoreCompleteSequenceIdIsMutable(); + storeCompleteSequenceId_.add(index, value); + onChanged(); + } else { + storeCompleteSequenceIdBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .StoreSequenceId store_complete_sequence_id = 18; + * + *
      +       ** the most recent sequence Id of store from cache flush 
      +       * 
      + */ + public Builder addStoreCompleteSequenceId( + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder builderForValue) { + if (storeCompleteSequenceIdBuilder_ == null) { + ensureStoreCompleteSequenceIdIsMutable(); + storeCompleteSequenceId_.add(builderForValue.build()); + onChanged(); + } else { + storeCompleteSequenceIdBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .StoreSequenceId store_complete_sequence_id = 18; + * + *
      +       ** the most recent sequence Id of store from cache flush 
      +       * 
      + */ + public Builder addStoreCompleteSequenceId( + int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder builderForValue) { + if (storeCompleteSequenceIdBuilder_ == null) { + ensureStoreCompleteSequenceIdIsMutable(); + storeCompleteSequenceId_.add(index, builderForValue.build()); + onChanged(); + } else { + storeCompleteSequenceIdBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .StoreSequenceId store_complete_sequence_id = 18; + * + *
      +       ** the most recent sequence Id of store from cache flush 
      +       * 
      + */ + public Builder addAllStoreCompleteSequenceId( + java.lang.Iterable values) { + if (storeCompleteSequenceIdBuilder_ == null) { + ensureStoreCompleteSequenceIdIsMutable(); + super.addAll(values, storeCompleteSequenceId_); + onChanged(); + } else { + storeCompleteSequenceIdBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .StoreSequenceId store_complete_sequence_id = 18; + * + *
      +       ** the most recent sequence Id of store from cache flush 
      +       * 
      + */ + public Builder clearStoreCompleteSequenceId() { + if (storeCompleteSequenceIdBuilder_ == null) { + storeCompleteSequenceId_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00020000); + onChanged(); + } else { + storeCompleteSequenceIdBuilder_.clear(); + } + return this; + } + /** + * repeated .StoreSequenceId store_complete_sequence_id = 18; + * + *
      +       ** the most recent sequence Id of store from cache flush 
      +       * 
      + */ + public Builder removeStoreCompleteSequenceId(int index) { + if (storeCompleteSequenceIdBuilder_ == null) { + ensureStoreCompleteSequenceIdIsMutable(); + storeCompleteSequenceId_.remove(index); + onChanged(); + } else { + storeCompleteSequenceIdBuilder_.remove(index); + } + return this; + } + /** + * repeated .StoreSequenceId store_complete_sequence_id = 18; + * + *
      +       ** the most recent sequence Id of store from cache flush 
      +       * 
      + */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder getStoreCompleteSequenceIdBuilder( + int index) { + return getStoreCompleteSequenceIdFieldBuilder().getBuilder(index); + } + /** + * repeated .StoreSequenceId store_complete_sequence_id = 18; + * + *
      +       ** the most recent sequence Id of store from cache flush 
      +       * 
      + */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder getStoreCompleteSequenceIdOrBuilder( + int index) { + if (storeCompleteSequenceIdBuilder_ == null) { + return storeCompleteSequenceId_.get(index); } else { + return storeCompleteSequenceIdBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .StoreSequenceId store_complete_sequence_id = 18; + * + *
      +       ** the most recent sequence Id of store from cache flush 
      +       * 
      + */ + public java.util.List + getStoreCompleteSequenceIdOrBuilderList() { + if (storeCompleteSequenceIdBuilder_ != null) { + return storeCompleteSequenceIdBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(storeCompleteSequenceId_); + } + } + /** + * repeated .StoreSequenceId store_complete_sequence_id = 18; + * + *
      +       ** the most recent sequence Id of store from cache flush 
      +       * 
      + */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder addStoreCompleteSequenceIdBuilder() { + return getStoreCompleteSequenceIdFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.getDefaultInstance()); + } + /** + * repeated .StoreSequenceId store_complete_sequence_id = 18; + * + *
      +       ** the most recent sequence Id of store from cache flush 
      +       * 
      + */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder addStoreCompleteSequenceIdBuilder( + int index) { + return getStoreCompleteSequenceIdFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.getDefaultInstance()); + } + /** + * repeated .StoreSequenceId store_complete_sequence_id = 18; + * + *
      +       ** the most recent sequence Id of store from cache flush 
      +       * 
      + */ + public java.util.List + getStoreCompleteSequenceIdBuilderList() { + return getStoreCompleteSequenceIdFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder> + getStoreCompleteSequenceIdFieldBuilder() { + if (storeCompleteSequenceIdBuilder_ == null) { + storeCompleteSequenceIdBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder>( + storeCompleteSequenceId_, + ((bitField0_ & 0x00020000) == 0x00020000), + getParentForChildren(), + isClean()); + storeCompleteSequenceId_ = null; + } + return storeCompleteSequenceIdBuilder_; + } + // @@protoc_insertion_point(builder_scope:RegionLoad) } @@ -4438,6 +6317,1455 @@ public final class ClusterStatusProtos { // @@protoc_insertion_point(class_scope:RegionLoad) } + public interface ReplicationLoadSinkOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required uint64 ageOfLastAppliedOp = 1; + /** + * required uint64 ageOfLastAppliedOp = 1; + */ + boolean hasAgeOfLastAppliedOp(); + /** + * required uint64 ageOfLastAppliedOp = 1; + */ + long getAgeOfLastAppliedOp(); + + // required uint64 timeStampsOfLastAppliedOp = 2; + /** + * required uint64 timeStampsOfLastAppliedOp = 2; + */ + boolean hasTimeStampsOfLastAppliedOp(); + /** + * required uint64 timeStampsOfLastAppliedOp = 2; + */ + long getTimeStampsOfLastAppliedOp(); + } + /** + * Protobuf type {@code ReplicationLoadSink} + */ + public static final class ReplicationLoadSink extends + com.google.protobuf.GeneratedMessage + implements ReplicationLoadSinkOrBuilder { + // Use ReplicationLoadSink.newBuilder() to construct. + private ReplicationLoadSink(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private ReplicationLoadSink(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final ReplicationLoadSink defaultInstance; + public static ReplicationLoadSink getDefaultInstance() { + return defaultInstance; + } + + public ReplicationLoadSink getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ReplicationLoadSink( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + ageOfLastAppliedOp_ = input.readUInt64(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + timeStampsOfLastAppliedOp_ = input.readUInt64(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ReplicationLoadSink_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ReplicationLoadSink_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ReplicationLoadSink parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ReplicationLoadSink(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required uint64 ageOfLastAppliedOp = 1; + public static final int AGEOFLASTAPPLIEDOP_FIELD_NUMBER = 1; + private long ageOfLastAppliedOp_; + /** + * required uint64 ageOfLastAppliedOp = 1; + */ + public boolean hasAgeOfLastAppliedOp() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required uint64 ageOfLastAppliedOp = 1; + */ + public long getAgeOfLastAppliedOp() { + return ageOfLastAppliedOp_; + } + + // required uint64 timeStampsOfLastAppliedOp = 2; + public static final int TIMESTAMPSOFLASTAPPLIEDOP_FIELD_NUMBER = 2; + private long timeStampsOfLastAppliedOp_; + /** + * required uint64 timeStampsOfLastAppliedOp = 2; + */ + public boolean hasTimeStampsOfLastAppliedOp() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required uint64 timeStampsOfLastAppliedOp = 2; + */ + public long getTimeStampsOfLastAppliedOp() { + return timeStampsOfLastAppliedOp_; + } + + private void initFields() { + ageOfLastAppliedOp_ = 0L; + timeStampsOfLastAppliedOp_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasAgeOfLastAppliedOp()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasTimeStampsOfLastAppliedOp()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeUInt64(1, ageOfLastAppliedOp_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeUInt64(2, timeStampsOfLastAppliedOp_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(1, ageOfLastAppliedOp_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(2, timeStampsOfLastAppliedOp_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink other = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink) obj; + + boolean result = true; + result = result && (hasAgeOfLastAppliedOp() == other.hasAgeOfLastAppliedOp()); + if (hasAgeOfLastAppliedOp()) { + result = result && (getAgeOfLastAppliedOp() + == other.getAgeOfLastAppliedOp()); + } + result = result && (hasTimeStampsOfLastAppliedOp() == other.hasTimeStampsOfLastAppliedOp()); + if (hasTimeStampsOfLastAppliedOp()) { + result = result && (getTimeStampsOfLastAppliedOp() + == other.getTimeStampsOfLastAppliedOp()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasAgeOfLastAppliedOp()) { + hash = (37 * hash) + AGEOFLASTAPPLIEDOP_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getAgeOfLastAppliedOp()); + } + if (hasTimeStampsOfLastAppliedOp()) { + hash = (37 * hash) + TIMESTAMPSOFLASTAPPLIEDOP_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getTimeStampsOfLastAppliedOp()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code ReplicationLoadSink} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSinkOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ReplicationLoadSink_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ReplicationLoadSink_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + ageOfLastAppliedOp_ = 0L; + bitField0_ = (bitField0_ & ~0x00000001); + timeStampsOfLastAppliedOp_ = 0L; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ReplicationLoadSink_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink build() { + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink result = new org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.ageOfLastAppliedOp_ = ageOfLastAppliedOp_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.timeStampsOfLastAppliedOp_ = timeStampsOfLastAppliedOp_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.getDefaultInstance()) return this; + if (other.hasAgeOfLastAppliedOp()) { + setAgeOfLastAppliedOp(other.getAgeOfLastAppliedOp()); + } + if (other.hasTimeStampsOfLastAppliedOp()) { + setTimeStampsOfLastAppliedOp(other.getTimeStampsOfLastAppliedOp()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasAgeOfLastAppliedOp()) { + + return false; + } + if (!hasTimeStampsOfLastAppliedOp()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required uint64 ageOfLastAppliedOp = 1; + private long ageOfLastAppliedOp_ ; + /** + * required uint64 ageOfLastAppliedOp = 1; + */ + public boolean hasAgeOfLastAppliedOp() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required uint64 ageOfLastAppliedOp = 1; + */ + public long getAgeOfLastAppliedOp() { + return ageOfLastAppliedOp_; + } + /** + * required uint64 ageOfLastAppliedOp = 1; + */ + public Builder setAgeOfLastAppliedOp(long value) { + bitField0_ |= 0x00000001; + ageOfLastAppliedOp_ = value; + onChanged(); + return this; + } + /** + * required uint64 ageOfLastAppliedOp = 1; + */ + public Builder clearAgeOfLastAppliedOp() { + bitField0_ = (bitField0_ & ~0x00000001); + ageOfLastAppliedOp_ = 0L; + onChanged(); + return this; + } + + // required uint64 timeStampsOfLastAppliedOp = 2; + private long timeStampsOfLastAppliedOp_ ; + /** + * required uint64 timeStampsOfLastAppliedOp = 2; + */ + public boolean hasTimeStampsOfLastAppliedOp() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required uint64 timeStampsOfLastAppliedOp = 2; + */ + public long getTimeStampsOfLastAppliedOp() { + return timeStampsOfLastAppliedOp_; + } + /** + * required uint64 timeStampsOfLastAppliedOp = 2; + */ + public Builder setTimeStampsOfLastAppliedOp(long value) { + bitField0_ |= 0x00000002; + timeStampsOfLastAppliedOp_ = value; + onChanged(); + return this; + } + /** + * required uint64 timeStampsOfLastAppliedOp = 2; + */ + public Builder clearTimeStampsOfLastAppliedOp() { + bitField0_ = (bitField0_ & ~0x00000002); + timeStampsOfLastAppliedOp_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:ReplicationLoadSink) + } + + static { + defaultInstance = new ReplicationLoadSink(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:ReplicationLoadSink) + } + + public interface ReplicationLoadSourceOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string peerID = 1; + /** + * required string peerID = 1; + */ + boolean hasPeerID(); + /** + * required string peerID = 1; + */ + java.lang.String getPeerID(); + /** + * required string peerID = 1; + */ + com.google.protobuf.ByteString + getPeerIDBytes(); + + // required uint64 ageOfLastShippedOp = 2; + /** + * required uint64 ageOfLastShippedOp = 2; + */ + boolean hasAgeOfLastShippedOp(); + /** + * required uint64 ageOfLastShippedOp = 2; + */ + long getAgeOfLastShippedOp(); + + // required uint32 sizeOfLogQueue = 3; + /** + * required uint32 sizeOfLogQueue = 3; + */ + boolean hasSizeOfLogQueue(); + /** + * required uint32 sizeOfLogQueue = 3; + */ + int getSizeOfLogQueue(); + + // required uint64 timeStampOfLastShippedOp = 4; + /** + * required uint64 timeStampOfLastShippedOp = 4; + */ + boolean hasTimeStampOfLastShippedOp(); + /** + * required uint64 timeStampOfLastShippedOp = 4; + */ + long getTimeStampOfLastShippedOp(); + + // required uint64 replicationLag = 5; + /** + * required uint64 replicationLag = 5; + */ + boolean hasReplicationLag(); + /** + * required uint64 replicationLag = 5; + */ + long getReplicationLag(); + } + /** + * Protobuf type {@code ReplicationLoadSource} + */ + public static final class ReplicationLoadSource extends + com.google.protobuf.GeneratedMessage + implements ReplicationLoadSourceOrBuilder { + // Use ReplicationLoadSource.newBuilder() to construct. + private ReplicationLoadSource(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private ReplicationLoadSource(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final ReplicationLoadSource defaultInstance; + public static ReplicationLoadSource getDefaultInstance() { + return defaultInstance; + } + + public ReplicationLoadSource getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ReplicationLoadSource( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + peerID_ = input.readBytes(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + ageOfLastShippedOp_ = input.readUInt64(); + break; + } + case 24: { + bitField0_ |= 0x00000004; + sizeOfLogQueue_ = input.readUInt32(); + break; + } + case 32: { + bitField0_ |= 0x00000008; + timeStampOfLastShippedOp_ = input.readUInt64(); + break; + } + case 40: { + bitField0_ |= 0x00000010; + replicationLag_ = input.readUInt64(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ReplicationLoadSource_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ReplicationLoadSource_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ReplicationLoadSource parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ReplicationLoadSource(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string peerID = 1; + public static final int PEERID_FIELD_NUMBER = 1; + private java.lang.Object peerID_; + /** + * required string peerID = 1; + */ + public boolean hasPeerID() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string peerID = 1; + */ + public java.lang.String getPeerID() { + java.lang.Object ref = peerID_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + peerID_ = s; + } + return s; + } + } + /** + * required string peerID = 1; + */ + public com.google.protobuf.ByteString + getPeerIDBytes() { + java.lang.Object ref = peerID_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + peerID_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required uint64 ageOfLastShippedOp = 2; + public static final int AGEOFLASTSHIPPEDOP_FIELD_NUMBER = 2; + private long ageOfLastShippedOp_; + /** + * required uint64 ageOfLastShippedOp = 2; + */ + public boolean hasAgeOfLastShippedOp() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required uint64 ageOfLastShippedOp = 2; + */ + public long getAgeOfLastShippedOp() { + return ageOfLastShippedOp_; + } + + // required uint32 sizeOfLogQueue = 3; + public static final int SIZEOFLOGQUEUE_FIELD_NUMBER = 3; + private int sizeOfLogQueue_; + /** + * required uint32 sizeOfLogQueue = 3; + */ + public boolean hasSizeOfLogQueue() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required uint32 sizeOfLogQueue = 3; + */ + public int getSizeOfLogQueue() { + return sizeOfLogQueue_; + } + + // required uint64 timeStampOfLastShippedOp = 4; + public static final int TIMESTAMPOFLASTSHIPPEDOP_FIELD_NUMBER = 4; + private long timeStampOfLastShippedOp_; + /** + * required uint64 timeStampOfLastShippedOp = 4; + */ + public boolean hasTimeStampOfLastShippedOp() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * required uint64 timeStampOfLastShippedOp = 4; + */ + public long getTimeStampOfLastShippedOp() { + return timeStampOfLastShippedOp_; + } + + // required uint64 replicationLag = 5; + public static final int REPLICATIONLAG_FIELD_NUMBER = 5; + private long replicationLag_; + /** + * required uint64 replicationLag = 5; + */ + public boolean hasReplicationLag() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * required uint64 replicationLag = 5; + */ + public long getReplicationLag() { + return replicationLag_; + } + + private void initFields() { + peerID_ = ""; + ageOfLastShippedOp_ = 0L; + sizeOfLogQueue_ = 0; + timeStampOfLastShippedOp_ = 0L; + replicationLag_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasPeerID()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasAgeOfLastShippedOp()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasSizeOfLogQueue()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasTimeStampOfLastShippedOp()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasReplicationLag()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getPeerIDBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeUInt64(2, ageOfLastShippedOp_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeUInt32(3, sizeOfLogQueue_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeUInt64(4, timeStampOfLastShippedOp_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeUInt64(5, replicationLag_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getPeerIDBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(2, ageOfLastShippedOp_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(3, sizeOfLogQueue_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(4, timeStampOfLastShippedOp_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(5, replicationLag_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource other = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource) obj; + + boolean result = true; + result = result && (hasPeerID() == other.hasPeerID()); + if (hasPeerID()) { + result = result && getPeerID() + .equals(other.getPeerID()); + } + result = result && (hasAgeOfLastShippedOp() == other.hasAgeOfLastShippedOp()); + if (hasAgeOfLastShippedOp()) { + result = result && (getAgeOfLastShippedOp() + == other.getAgeOfLastShippedOp()); + } + result = result && (hasSizeOfLogQueue() == other.hasSizeOfLogQueue()); + if (hasSizeOfLogQueue()) { + result = result && (getSizeOfLogQueue() + == other.getSizeOfLogQueue()); + } + result = result && (hasTimeStampOfLastShippedOp() == other.hasTimeStampOfLastShippedOp()); + if (hasTimeStampOfLastShippedOp()) { + result = result && (getTimeStampOfLastShippedOp() + == other.getTimeStampOfLastShippedOp()); + } + result = result && (hasReplicationLag() == other.hasReplicationLag()); + if (hasReplicationLag()) { + result = result && (getReplicationLag() + == other.getReplicationLag()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasPeerID()) { + hash = (37 * hash) + PEERID_FIELD_NUMBER; + hash = (53 * hash) + getPeerID().hashCode(); + } + if (hasAgeOfLastShippedOp()) { + hash = (37 * hash) + AGEOFLASTSHIPPEDOP_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getAgeOfLastShippedOp()); + } + if (hasSizeOfLogQueue()) { + hash = (37 * hash) + SIZEOFLOGQUEUE_FIELD_NUMBER; + hash = (53 * hash) + getSizeOfLogQueue(); + } + if (hasTimeStampOfLastShippedOp()) { + hash = (37 * hash) + TIMESTAMPOFLASTSHIPPEDOP_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getTimeStampOfLastShippedOp()); + } + if (hasReplicationLag()) { + hash = (37 * hash) + REPLICATIONLAG_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getReplicationLag()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code ReplicationLoadSource} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSourceOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ReplicationLoadSource_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ReplicationLoadSource_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + peerID_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + ageOfLastShippedOp_ = 0L; + bitField0_ = (bitField0_ & ~0x00000002); + sizeOfLogQueue_ = 0; + bitField0_ = (bitField0_ & ~0x00000004); + timeStampOfLastShippedOp_ = 0L; + bitField0_ = (bitField0_ & ~0x00000008); + replicationLag_ = 0L; + bitField0_ = (bitField0_ & ~0x00000010); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ReplicationLoadSource_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource build() { + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource result = new org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.peerID_ = peerID_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.ageOfLastShippedOp_ = ageOfLastShippedOp_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.sizeOfLogQueue_ = sizeOfLogQueue_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + result.timeStampOfLastShippedOp_ = timeStampOfLastShippedOp_; + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000010; + } + result.replicationLag_ = replicationLag_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.getDefaultInstance()) return this; + if (other.hasPeerID()) { + bitField0_ |= 0x00000001; + peerID_ = other.peerID_; + onChanged(); + } + if (other.hasAgeOfLastShippedOp()) { + setAgeOfLastShippedOp(other.getAgeOfLastShippedOp()); + } + if (other.hasSizeOfLogQueue()) { + setSizeOfLogQueue(other.getSizeOfLogQueue()); + } + if (other.hasTimeStampOfLastShippedOp()) { + setTimeStampOfLastShippedOp(other.getTimeStampOfLastShippedOp()); + } + if (other.hasReplicationLag()) { + setReplicationLag(other.getReplicationLag()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasPeerID()) { + + return false; + } + if (!hasAgeOfLastShippedOp()) { + + return false; + } + if (!hasSizeOfLogQueue()) { + + return false; + } + if (!hasTimeStampOfLastShippedOp()) { + + return false; + } + if (!hasReplicationLag()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string peerID = 1; + private java.lang.Object peerID_ = ""; + /** + * required string peerID = 1; + */ + public boolean hasPeerID() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string peerID = 1; + */ + public java.lang.String getPeerID() { + java.lang.Object ref = peerID_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + peerID_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string peerID = 1; + */ + public com.google.protobuf.ByteString + getPeerIDBytes() { + java.lang.Object ref = peerID_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + peerID_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string peerID = 1; + */ + public Builder setPeerID( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + peerID_ = value; + onChanged(); + return this; + } + /** + * required string peerID = 1; + */ + public Builder clearPeerID() { + bitField0_ = (bitField0_ & ~0x00000001); + peerID_ = getDefaultInstance().getPeerID(); + onChanged(); + return this; + } + /** + * required string peerID = 1; + */ + public Builder setPeerIDBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + peerID_ = value; + onChanged(); + return this; + } + + // required uint64 ageOfLastShippedOp = 2; + private long ageOfLastShippedOp_ ; + /** + * required uint64 ageOfLastShippedOp = 2; + */ + public boolean hasAgeOfLastShippedOp() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required uint64 ageOfLastShippedOp = 2; + */ + public long getAgeOfLastShippedOp() { + return ageOfLastShippedOp_; + } + /** + * required uint64 ageOfLastShippedOp = 2; + */ + public Builder setAgeOfLastShippedOp(long value) { + bitField0_ |= 0x00000002; + ageOfLastShippedOp_ = value; + onChanged(); + return this; + } + /** + * required uint64 ageOfLastShippedOp = 2; + */ + public Builder clearAgeOfLastShippedOp() { + bitField0_ = (bitField0_ & ~0x00000002); + ageOfLastShippedOp_ = 0L; + onChanged(); + return this; + } + + // required uint32 sizeOfLogQueue = 3; + private int sizeOfLogQueue_ ; + /** + * required uint32 sizeOfLogQueue = 3; + */ + public boolean hasSizeOfLogQueue() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required uint32 sizeOfLogQueue = 3; + */ + public int getSizeOfLogQueue() { + return sizeOfLogQueue_; + } + /** + * required uint32 sizeOfLogQueue = 3; + */ + public Builder setSizeOfLogQueue(int value) { + bitField0_ |= 0x00000004; + sizeOfLogQueue_ = value; + onChanged(); + return this; + } + /** + * required uint32 sizeOfLogQueue = 3; + */ + public Builder clearSizeOfLogQueue() { + bitField0_ = (bitField0_ & ~0x00000004); + sizeOfLogQueue_ = 0; + onChanged(); + return this; + } + + // required uint64 timeStampOfLastShippedOp = 4; + private long timeStampOfLastShippedOp_ ; + /** + * required uint64 timeStampOfLastShippedOp = 4; + */ + public boolean hasTimeStampOfLastShippedOp() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * required uint64 timeStampOfLastShippedOp = 4; + */ + public long getTimeStampOfLastShippedOp() { + return timeStampOfLastShippedOp_; + } + /** + * required uint64 timeStampOfLastShippedOp = 4; + */ + public Builder setTimeStampOfLastShippedOp(long value) { + bitField0_ |= 0x00000008; + timeStampOfLastShippedOp_ = value; + onChanged(); + return this; + } + /** + * required uint64 timeStampOfLastShippedOp = 4; + */ + public Builder clearTimeStampOfLastShippedOp() { + bitField0_ = (bitField0_ & ~0x00000008); + timeStampOfLastShippedOp_ = 0L; + onChanged(); + return this; + } + + // required uint64 replicationLag = 5; + private long replicationLag_ ; + /** + * required uint64 replicationLag = 5; + */ + public boolean hasReplicationLag() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * required uint64 replicationLag = 5; + */ + public long getReplicationLag() { + return replicationLag_; + } + /** + * required uint64 replicationLag = 5; + */ + public Builder setReplicationLag(long value) { + bitField0_ |= 0x00000010; + replicationLag_ = value; + onChanged(); + return this; + } + /** + * required uint64 replicationLag = 5; + */ + public Builder clearReplicationLag() { + bitField0_ = (bitField0_ & ~0x00000010); + replicationLag_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:ReplicationLoadSource) + } + + static { + defaultInstance = new ReplicationLoadSource(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:ReplicationLoadSource) + } + public interface ServerLoadOrBuilder extends com.google.protobuf.MessageOrBuilder { @@ -4685,6 +8013,85 @@ public final class ClusterStatusProtos { * */ int getInfoServerPort(); + + // repeated .ReplicationLoadSource replLoadSource = 10; + /** + * repeated .ReplicationLoadSource replLoadSource = 10; + * + *
      +     **
      +     * The replicationLoadSource for the replication Source status of this region server.
      +     * 
      + */ + java.util.List + getReplLoadSourceList(); + /** + * repeated .ReplicationLoadSource replLoadSource = 10; + * + *
      +     **
      +     * The replicationLoadSource for the replication Source status of this region server.
      +     * 
      + */ + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource getReplLoadSource(int index); + /** + * repeated .ReplicationLoadSource replLoadSource = 10; + * + *
      +     **
      +     * The replicationLoadSource for the replication Source status of this region server.
      +     * 
      + */ + int getReplLoadSourceCount(); + /** + * repeated .ReplicationLoadSource replLoadSource = 10; + * + *
      +     **
      +     * The replicationLoadSource for the replication Source status of this region server.
      +     * 
      + */ + java.util.List + getReplLoadSourceOrBuilderList(); + /** + * repeated .ReplicationLoadSource replLoadSource = 10; + * + *
      +     **
      +     * The replicationLoadSource for the replication Source status of this region server.
      +     * 
      + */ + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSourceOrBuilder getReplLoadSourceOrBuilder( + int index); + + // optional .ReplicationLoadSink replLoadSink = 11; + /** + * optional .ReplicationLoadSink replLoadSink = 11; + * + *
      +     **
      +     * The replicationLoadSink for the replication Sink status of this region server.
      +     * 
      + */ + boolean hasReplLoadSink(); + /** + * optional .ReplicationLoadSink replLoadSink = 11; + * + *
      +     **
      +     * The replicationLoadSink for the replication Sink status of this region server.
      +     * 
      + */ + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink getReplLoadSink(); + /** + * optional .ReplicationLoadSink replLoadSink = 11; + * + *
      +     **
      +     * The replicationLoadSink for the replication Sink status of this region server.
      +     * 
      + */ + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSinkOrBuilder getReplLoadSinkOrBuilder(); } /** * Protobuf type {@code ServerLoad} @@ -4788,6 +8195,27 @@ public final class ClusterStatusProtos { infoServerPort_ = input.readUInt32(); break; } + case 82: { + if (!((mutable_bitField0_ & 0x00000200) == 0x00000200)) { + replLoadSource_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000200; + } + replLoadSource_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.PARSER, extensionRegistry)); + break; + } + case 90: { + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.Builder subBuilder = null; + if (((bitField0_ & 0x00000080) == 0x00000080)) { + subBuilder = replLoadSink_.toBuilder(); + } + replLoadSink_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(replLoadSink_); + replLoadSink_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000080; + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -4802,6 +8230,9 @@ public final class ClusterStatusProtos { if (((mutable_bitField0_ & 0x00000020) == 0x00000020)) { coprocessors_ = java.util.Collections.unmodifiableList(coprocessors_); } + if (((mutable_bitField0_ & 0x00000200) == 0x00000200)) { + replLoadSource_ = java.util.Collections.unmodifiableList(replLoadSource_); + } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } @@ -5143,6 +8574,104 @@ public final class ClusterStatusProtos { return infoServerPort_; } + // repeated .ReplicationLoadSource replLoadSource = 10; + public static final int REPLLOADSOURCE_FIELD_NUMBER = 10; + private java.util.List replLoadSource_; + /** + * repeated .ReplicationLoadSource replLoadSource = 10; + * + *
      +     **
      +     * The replicationLoadSource for the replication Source status of this region server.
      +     * 
      + */ + public java.util.List getReplLoadSourceList() { + return replLoadSource_; + } + /** + * repeated .ReplicationLoadSource replLoadSource = 10; + * + *
      +     **
      +     * The replicationLoadSource for the replication Source status of this region server.
      +     * 
      + */ + public java.util.List + getReplLoadSourceOrBuilderList() { + return replLoadSource_; + } + /** + * repeated .ReplicationLoadSource replLoadSource = 10; + * + *
      +     **
      +     * The replicationLoadSource for the replication Source status of this region server.
      +     * 
      + */ + public int getReplLoadSourceCount() { + return replLoadSource_.size(); + } + /** + * repeated .ReplicationLoadSource replLoadSource = 10; + * + *
      +     **
      +     * The replicationLoadSource for the replication Source status of this region server.
      +     * 
      + */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource getReplLoadSource(int index) { + return replLoadSource_.get(index); + } + /** + * repeated .ReplicationLoadSource replLoadSource = 10; + * + *
      +     **
      +     * The replicationLoadSource for the replication Source status of this region server.
      +     * 
      + */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSourceOrBuilder getReplLoadSourceOrBuilder( + int index) { + return replLoadSource_.get(index); + } + + // optional .ReplicationLoadSink replLoadSink = 11; + public static final int REPLLOADSINK_FIELD_NUMBER = 11; + private org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink replLoadSink_; + /** + * optional .ReplicationLoadSink replLoadSink = 11; + * + *
      +     **
      +     * The replicationLoadSink for the replication Sink status of this region server.
      +     * 
      + */ + public boolean hasReplLoadSink() { + return ((bitField0_ & 0x00000080) == 0x00000080); + } + /** + * optional .ReplicationLoadSink replLoadSink = 11; + * + *
      +     **
      +     * The replicationLoadSink for the replication Sink status of this region server.
      +     * 
      + */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink getReplLoadSink() { + return replLoadSink_; + } + /** + * optional .ReplicationLoadSink replLoadSink = 11; + * + *
      +     **
      +     * The replicationLoadSink for the replication Sink status of this region server.
      +     * 
      + */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSinkOrBuilder getReplLoadSinkOrBuilder() { + return replLoadSink_; + } + private void initFields() { numberOfRequests_ = 0; totalNumberOfRequests_ = 0; @@ -5153,6 +8682,8 @@ public final class ClusterStatusProtos { reportStartTime_ = 0L; reportEndTime_ = 0L; infoServerPort_ = 0; + replLoadSource_ = java.util.Collections.emptyList(); + replLoadSink_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -5171,6 +8702,18 @@ public final class ClusterStatusProtos { return false; } } + for (int i = 0; i < getReplLoadSourceCount(); i++) { + if (!getReplLoadSource(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + if (hasReplLoadSink()) { + if (!getReplLoadSink().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } memoizedIsInitialized = 1; return true; } @@ -5205,6 +8748,12 @@ public final class ClusterStatusProtos { if (((bitField0_ & 0x00000040) == 0x00000040)) { output.writeUInt32(9, infoServerPort_); } + for (int i = 0; i < replLoadSource_.size(); i++) { + output.writeMessage(10, replLoadSource_.get(i)); + } + if (((bitField0_ & 0x00000080) == 0x00000080)) { + output.writeMessage(11, replLoadSink_); + } getUnknownFields().writeTo(output); } @@ -5250,6 +8799,14 @@ public final class ClusterStatusProtos { size += com.google.protobuf.CodedOutputStream .computeUInt32Size(9, infoServerPort_); } + for (int i = 0; i < replLoadSource_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(10, replLoadSource_.get(i)); + } + if (((bitField0_ & 0x00000080) == 0x00000080)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(11, replLoadSink_); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -5312,6 +8869,13 @@ public final class ClusterStatusProtos { result = result && (getInfoServerPort() == other.getInfoServerPort()); } + result = result && getReplLoadSourceList() + .equals(other.getReplLoadSourceList()); + result = result && (hasReplLoadSink() == other.hasReplLoadSink()); + if (hasReplLoadSink()) { + result = result && getReplLoadSink() + .equals(other.getReplLoadSink()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -5361,6 +8925,14 @@ public final class ClusterStatusProtos { hash = (37 * hash) + INFO_SERVER_PORT_FIELD_NUMBER; hash = (53 * hash) + getInfoServerPort(); } + if (getReplLoadSourceCount() > 0) { + hash = (37 * hash) + REPLLOADSOURCE_FIELD_NUMBER; + hash = (53 * hash) + getReplLoadSourceList().hashCode(); + } + if (hasReplLoadSink()) { + hash = (37 * hash) + REPLLOADSINK_FIELD_NUMBER; + hash = (53 * hash) + getReplLoadSink().hashCode(); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -5464,6 +9036,8 @@ public final class ClusterStatusProtos { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getRegionLoadsFieldBuilder(); getCoprocessorsFieldBuilder(); + getReplLoadSourceFieldBuilder(); + getReplLoadSinkFieldBuilder(); } } private static Builder create() { @@ -5498,6 +9072,18 @@ public final class ClusterStatusProtos { bitField0_ = (bitField0_ & ~0x00000080); infoServerPort_ = 0; bitField0_ = (bitField0_ & ~0x00000100); + if (replLoadSourceBuilder_ == null) { + replLoadSource_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000200); + } else { + replLoadSourceBuilder_.clear(); + } + if (replLoadSinkBuilder_ == null) { + replLoadSink_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.getDefaultInstance(); + } else { + replLoadSinkBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000400); return this; } @@ -5572,6 +9158,23 @@ public final class ClusterStatusProtos { to_bitField0_ |= 0x00000040; } result.infoServerPort_ = infoServerPort_; + if (replLoadSourceBuilder_ == null) { + if (((bitField0_ & 0x00000200) == 0x00000200)) { + replLoadSource_ = java.util.Collections.unmodifiableList(replLoadSource_); + bitField0_ = (bitField0_ & ~0x00000200); + } + result.replLoadSource_ = replLoadSource_; + } else { + result.replLoadSource_ = replLoadSourceBuilder_.build(); + } + if (((from_bitField0_ & 0x00000400) == 0x00000400)) { + to_bitField0_ |= 0x00000080; + } + if (replLoadSinkBuilder_ == null) { + result.replLoadSink_ = replLoadSink_; + } else { + result.replLoadSink_ = replLoadSinkBuilder_.build(); + } result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -5661,6 +9264,35 @@ public final class ClusterStatusProtos { if (other.hasInfoServerPort()) { setInfoServerPort(other.getInfoServerPort()); } + if (replLoadSourceBuilder_ == null) { + if (!other.replLoadSource_.isEmpty()) { + if (replLoadSource_.isEmpty()) { + replLoadSource_ = other.replLoadSource_; + bitField0_ = (bitField0_ & ~0x00000200); + } else { + ensureReplLoadSourceIsMutable(); + replLoadSource_.addAll(other.replLoadSource_); + } + onChanged(); + } + } else { + if (!other.replLoadSource_.isEmpty()) { + if (replLoadSourceBuilder_.isEmpty()) { + replLoadSourceBuilder_.dispose(); + replLoadSourceBuilder_ = null; + replLoadSource_ = other.replLoadSource_; + bitField0_ = (bitField0_ & ~0x00000200); + replLoadSourceBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getReplLoadSourceFieldBuilder() : null; + } else { + replLoadSourceBuilder_.addAllMessages(other.replLoadSource_); + } + } + } + if (other.hasReplLoadSink()) { + mergeReplLoadSink(other.getReplLoadSink()); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -5678,6 +9310,18 @@ public final class ClusterStatusProtos { return false; } } + for (int i = 0; i < getReplLoadSourceCount(); i++) { + if (!getReplLoadSource(i).isInitialized()) { + + return false; + } + } + if (hasReplLoadSink()) { + if (!getReplLoadSink().isInitialized()) { + + return false; + } + } return true; } @@ -6749,6 +10393,498 @@ public final class ClusterStatusProtos { return this; } + // repeated .ReplicationLoadSource replLoadSource = 10; + private java.util.List replLoadSource_ = + java.util.Collections.emptyList(); + private void ensureReplLoadSourceIsMutable() { + if (!((bitField0_ & 0x00000200) == 0x00000200)) { + replLoadSource_ = new java.util.ArrayList(replLoadSource_); + bitField0_ |= 0x00000200; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSourceOrBuilder> replLoadSourceBuilder_; + + /** + * repeated .ReplicationLoadSource replLoadSource = 10; + * + *
      +       **
      +       * The replicationLoadSource for the replication Source status of this region server.
      +       * 
      + */ + public java.util.List getReplLoadSourceList() { + if (replLoadSourceBuilder_ == null) { + return java.util.Collections.unmodifiableList(replLoadSource_); + } else { + return replLoadSourceBuilder_.getMessageList(); + } + } + /** + * repeated .ReplicationLoadSource replLoadSource = 10; + * + *
      +       **
      +       * The replicationLoadSource for the replication Source status of this region server.
      +       * 
      + */ + public int getReplLoadSourceCount() { + if (replLoadSourceBuilder_ == null) { + return replLoadSource_.size(); + } else { + return replLoadSourceBuilder_.getCount(); + } + } + /** + * repeated .ReplicationLoadSource replLoadSource = 10; + * + *
      +       **
      +       * The replicationLoadSource for the replication Source status of this region server.
      +       * 
      + */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource getReplLoadSource(int index) { + if (replLoadSourceBuilder_ == null) { + return replLoadSource_.get(index); + } else { + return replLoadSourceBuilder_.getMessage(index); + } + } + /** + * repeated .ReplicationLoadSource replLoadSource = 10; + * + *
      +       **
      +       * The replicationLoadSource for the replication Source status of this region server.
      +       * 
      + */ + public Builder setReplLoadSource( + int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource value) { + if (replLoadSourceBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureReplLoadSourceIsMutable(); + replLoadSource_.set(index, value); + onChanged(); + } else { + replLoadSourceBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .ReplicationLoadSource replLoadSource = 10; + * + *
      +       **
      +       * The replicationLoadSource for the replication Source status of this region server.
      +       * 
      + */ + public Builder setReplLoadSource( + int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder builderForValue) { + if (replLoadSourceBuilder_ == null) { + ensureReplLoadSourceIsMutable(); + replLoadSource_.set(index, builderForValue.build()); + onChanged(); + } else { + replLoadSourceBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .ReplicationLoadSource replLoadSource = 10; + * + *
      +       **
      +       * The replicationLoadSource for the replication Source status of this region server.
      +       * 
      + */ + public Builder addReplLoadSource(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource value) { + if (replLoadSourceBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureReplLoadSourceIsMutable(); + replLoadSource_.add(value); + onChanged(); + } else { + replLoadSourceBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .ReplicationLoadSource replLoadSource = 10; + * + *
      +       **
      +       * The replicationLoadSource for the replication Source status of this region server.
      +       * 
      + */ + public Builder addReplLoadSource( + int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource value) { + if (replLoadSourceBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureReplLoadSourceIsMutable(); + replLoadSource_.add(index, value); + onChanged(); + } else { + replLoadSourceBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .ReplicationLoadSource replLoadSource = 10; + * + *
      +       **
      +       * The replicationLoadSource for the replication Source status of this region server.
      +       * 
      + */ + public Builder addReplLoadSource( + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder builderForValue) { + if (replLoadSourceBuilder_ == null) { + ensureReplLoadSourceIsMutable(); + replLoadSource_.add(builderForValue.build()); + onChanged(); + } else { + replLoadSourceBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .ReplicationLoadSource replLoadSource = 10; + * + *
      +       **
      +       * The replicationLoadSource for the replication Source status of this region server.
      +       * 
      + */ + public Builder addReplLoadSource( + int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder builderForValue) { + if (replLoadSourceBuilder_ == null) { + ensureReplLoadSourceIsMutable(); + replLoadSource_.add(index, builderForValue.build()); + onChanged(); + } else { + replLoadSourceBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .ReplicationLoadSource replLoadSource = 10; + * + *
      +       **
      +       * The replicationLoadSource for the replication Source status of this region server.
      +       * 
      + */ + public Builder addAllReplLoadSource( + java.lang.Iterable values) { + if (replLoadSourceBuilder_ == null) { + ensureReplLoadSourceIsMutable(); + super.addAll(values, replLoadSource_); + onChanged(); + } else { + replLoadSourceBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .ReplicationLoadSource replLoadSource = 10; + * + *
      +       **
      +       * The replicationLoadSource for the replication Source status of this region server.
      +       * 
      + */ + public Builder clearReplLoadSource() { + if (replLoadSourceBuilder_ == null) { + replLoadSource_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000200); + onChanged(); + } else { + replLoadSourceBuilder_.clear(); + } + return this; + } + /** + * repeated .ReplicationLoadSource replLoadSource = 10; + * + *
      +       **
      +       * The replicationLoadSource for the replication Source status of this region server.
      +       * 
      + */ + public Builder removeReplLoadSource(int index) { + if (replLoadSourceBuilder_ == null) { + ensureReplLoadSourceIsMutable(); + replLoadSource_.remove(index); + onChanged(); + } else { + replLoadSourceBuilder_.remove(index); + } + return this; + } + /** + * repeated .ReplicationLoadSource replLoadSource = 10; + * + *
      +       **
      +       * The replicationLoadSource for the replication Source status of this region server.
      +       * 
      + */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder getReplLoadSourceBuilder( + int index) { + return getReplLoadSourceFieldBuilder().getBuilder(index); + } + /** + * repeated .ReplicationLoadSource replLoadSource = 10; + * + *
      +       **
      +       * The replicationLoadSource for the replication Source status of this region server.
      +       * 
      + */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSourceOrBuilder getReplLoadSourceOrBuilder( + int index) { + if (replLoadSourceBuilder_ == null) { + return replLoadSource_.get(index); } else { + return replLoadSourceBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .ReplicationLoadSource replLoadSource = 10; + * + *
      +       **
      +       * The replicationLoadSource for the replication Source status of this region server.
      +       * 
      + */ + public java.util.List + getReplLoadSourceOrBuilderList() { + if (replLoadSourceBuilder_ != null) { + return replLoadSourceBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(replLoadSource_); + } + } + /** + * repeated .ReplicationLoadSource replLoadSource = 10; + * + *
      +       **
      +       * The replicationLoadSource for the replication Source status of this region server.
      +       * 
      + */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder addReplLoadSourceBuilder() { + return getReplLoadSourceFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.getDefaultInstance()); + } + /** + * repeated .ReplicationLoadSource replLoadSource = 10; + * + *
      +       **
      +       * The replicationLoadSource for the replication Source status of this region server.
      +       * 
      + */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder addReplLoadSourceBuilder( + int index) { + return getReplLoadSourceFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.getDefaultInstance()); + } + /** + * repeated .ReplicationLoadSource replLoadSource = 10; + * + *
      +       **
      +       * The replicationLoadSource for the replication Source status of this region server.
      +       * 
      + */ + public java.util.List + getReplLoadSourceBuilderList() { + return getReplLoadSourceFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSourceOrBuilder> + getReplLoadSourceFieldBuilder() { + if (replLoadSourceBuilder_ == null) { + replLoadSourceBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSourceOrBuilder>( + replLoadSource_, + ((bitField0_ & 0x00000200) == 0x00000200), + getParentForChildren(), + isClean()); + replLoadSource_ = null; + } + return replLoadSourceBuilder_; + } + + // optional .ReplicationLoadSink replLoadSink = 11; + private org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink replLoadSink_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSinkOrBuilder> replLoadSinkBuilder_; + /** + * optional .ReplicationLoadSink replLoadSink = 11; + * + *
      +       **
      +       * The replicationLoadSink for the replication Sink status of this region server.
      +       * 
      + */ + public boolean hasReplLoadSink() { + return ((bitField0_ & 0x00000400) == 0x00000400); + } + /** + * optional .ReplicationLoadSink replLoadSink = 11; + * + *
      +       **
      +       * The replicationLoadSink for the replication Sink status of this region server.
      +       * 
      + */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink getReplLoadSink() { + if (replLoadSinkBuilder_ == null) { + return replLoadSink_; + } else { + return replLoadSinkBuilder_.getMessage(); + } + } + /** + * optional .ReplicationLoadSink replLoadSink = 11; + * + *
      +       **
      +       * The replicationLoadSink for the replication Sink status of this region server.
      +       * 
      + */ + public Builder setReplLoadSink(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink value) { + if (replLoadSinkBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + replLoadSink_ = value; + onChanged(); + } else { + replLoadSinkBuilder_.setMessage(value); + } + bitField0_ |= 0x00000400; + return this; + } + /** + * optional .ReplicationLoadSink replLoadSink = 11; + * + *
      +       **
      +       * The replicationLoadSink for the replication Sink status of this region server.
      +       * 
      + */ + public Builder setReplLoadSink( + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.Builder builderForValue) { + if (replLoadSinkBuilder_ == null) { + replLoadSink_ = builderForValue.build(); + onChanged(); + } else { + replLoadSinkBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000400; + return this; + } + /** + * optional .ReplicationLoadSink replLoadSink = 11; + * + *
      +       **
      +       * The replicationLoadSink for the replication Sink status of this region server.
      +       * 
      + */ + public Builder mergeReplLoadSink(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink value) { + if (replLoadSinkBuilder_ == null) { + if (((bitField0_ & 0x00000400) == 0x00000400) && + replLoadSink_ != org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.getDefaultInstance()) { + replLoadSink_ = + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.newBuilder(replLoadSink_).mergeFrom(value).buildPartial(); + } else { + replLoadSink_ = value; + } + onChanged(); + } else { + replLoadSinkBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000400; + return this; + } + /** + * optional .ReplicationLoadSink replLoadSink = 11; + * + *
      +       **
      +       * The replicationLoadSink for the replication Sink status of this region server.
      +       * 
      + */ + public Builder clearReplLoadSink() { + if (replLoadSinkBuilder_ == null) { + replLoadSink_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.getDefaultInstance(); + onChanged(); + } else { + replLoadSinkBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000400); + return this; + } + /** + * optional .ReplicationLoadSink replLoadSink = 11; + * + *
      +       **
      +       * The replicationLoadSink for the replication Sink status of this region server.
      +       * 
      + */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.Builder getReplLoadSinkBuilder() { + bitField0_ |= 0x00000400; + onChanged(); + return getReplLoadSinkFieldBuilder().getBuilder(); + } + /** + * optional .ReplicationLoadSink replLoadSink = 11; + * + *
      +       **
      +       * The replicationLoadSink for the replication Sink status of this region server.
      +       * 
      + */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSinkOrBuilder getReplLoadSinkOrBuilder() { + if (replLoadSinkBuilder_ != null) { + return replLoadSinkBuilder_.getMessageOrBuilder(); + } else { + return replLoadSink_; + } + } + /** + * optional .ReplicationLoadSink replLoadSink = 11; + * + *
      +       **
      +       * The replicationLoadSink for the replication Sink status of this region server.
      +       * 
      + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSinkOrBuilder> + getReplLoadSinkFieldBuilder() { + if (replLoadSinkBuilder_ == null) { + replLoadSinkBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSinkOrBuilder>( + replLoadSink_, + getParentForChildren(), + isClean()); + replLoadSink_ = null; + } + return replLoadSinkBuilder_; + } + // @@protoc_insertion_point(builder_scope:ServerLoad) } @@ -10521,11 +14657,31 @@ public final class ClusterStatusProtos { private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_RegionInTransition_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_StoreSequenceId_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_StoreSequenceId_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_RegionStoreSequenceIds_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_RegionStoreSequenceIds_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_RegionLoad_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_RegionLoad_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_ReplicationLoadSink_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_ReplicationLoadSink_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_ReplicationLoadSource_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_ReplicationLoadSource_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_ServerLoad_descriptor; private static @@ -10562,39 +14718,53 @@ public final class ClusterStatusProtos { "PLITTING_NEW\020\r\022\017\n\013MERGING_NEW\020\016\"X\n\022Regio", "nInTransition\022\036\n\004spec\030\001 \002(\0132\020.RegionSpec" + "ifier\022\"\n\014region_state\030\002 \002(\0132\014.RegionStat" + - "e\"\214\004\n\nRegionLoad\022*\n\020region_specifier\030\001 \002" + - "(\0132\020.RegionSpecifier\022\016\n\006stores\030\002 \001(\r\022\022\n\n" + - "storefiles\030\003 \001(\r\022\"\n\032store_uncompressed_s" + - "ize_MB\030\004 \001(\r\022\031\n\021storefile_size_MB\030\005 \001(\r\022" + - "\030\n\020memstore_size_MB\030\006 \001(\r\022\037\n\027storefile_i" + - "ndex_size_MB\030\007 \001(\r\022\033\n\023read_requests_coun" + - "t\030\010 \001(\004\022\034\n\024write_requests_count\030\t \001(\004\022\034\n" + - "\024total_compacting_KVs\030\n \001(\004\022\035\n\025current_c", - "ompacted_KVs\030\013 \001(\004\022\032\n\022root_index_size_KB" + - "\030\014 \001(\r\022\"\n\032total_static_index_size_KB\030\r \001" + - "(\r\022\"\n\032total_static_bloom_size_KB\030\016 \001(\r\022\034" + - "\n\024complete_sequence_id\030\017 \001(\004\022\025\n\rdata_loc" + - "ality\030\020 \001(\002\022#\n\030last_major_compaction_ts\030" + - "\021 \001(\004:\0010\"\212\002\n\nServerLoad\022\032\n\022number_of_req" + - "uests\030\001 \001(\r\022 \n\030total_number_of_requests\030" + - "\002 \001(\r\022\024\n\014used_heap_MB\030\003 \001(\r\022\023\n\013max_heap_" + - "MB\030\004 \001(\r\022!\n\014region_loads\030\005 \003(\0132\013.RegionL" + - "oad\022\"\n\014coprocessors\030\006 \003(\0132\014.Coprocessor\022", - "\031\n\021report_start_time\030\007 \001(\004\022\027\n\017report_end" + - "_time\030\010 \001(\004\022\030\n\020info_server_port\030\t \001(\r\"O\n" + - "\016LiveServerInfo\022\033\n\006server\030\001 \002(\0132\013.Server" + - "Name\022 \n\013server_load\030\002 \002(\0132\013.ServerLoad\"\340" + - "\002\n\rClusterStatus\022/\n\rhbase_version\030\001 \001(\0132" + - "\030.HBaseVersionFileContent\022%\n\014live_server" + - "s\030\002 \003(\0132\017.LiveServerInfo\022!\n\014dead_servers" + - "\030\003 \003(\0132\013.ServerName\0222\n\025regions_in_transi" + - "tion\030\004 \003(\0132\023.RegionInTransition\022\036\n\nclust" + - "er_id\030\005 \001(\0132\n.ClusterId\022)\n\023master_coproc", - "essors\030\006 \003(\0132\014.Coprocessor\022\033\n\006master\030\007 \001" + - "(\0132\013.ServerName\022#\n\016backup_masters\030\010 \003(\0132" + - "\013.ServerName\022\023\n\013balancer_on\030\t \001(\010BF\n*org" + - ".apache.hadoop.hbase.protobuf.generatedB" + - "\023ClusterStatusProtosH\001\240\001\001" + "e\";\n\017StoreSequenceId\022\023\n\013family_name\030\001 \002(" + + "\014\022\023\n\013sequence_id\030\002 \002(\004\"g\n\026RegionStoreSeq" + + "uenceIds\022 \n\030last_flushed_sequence_id\030\001 \002" + + "(\004\022+\n\021store_sequence_id\030\002 \003(\0132\020.StoreSeq" + + "uenceId\"\302\004\n\nRegionLoad\022*\n\020region_specifi" + + "er\030\001 \002(\0132\020.RegionSpecifier\022\016\n\006stores\030\002 \001" + + "(\r\022\022\n\nstorefiles\030\003 \001(\r\022\"\n\032store_uncompre" + + "ssed_size_MB\030\004 \001(\r\022\031\n\021storefile_size_MB\030", + "\005 \001(\r\022\030\n\020memstore_size_MB\030\006 \001(\r\022\037\n\027store" + + "file_index_size_MB\030\007 \001(\r\022\033\n\023read_request" + + "s_count\030\010 \001(\004\022\034\n\024write_requests_count\030\t " + + "\001(\004\022\034\n\024total_compacting_KVs\030\n \001(\004\022\035\n\025cur" + + "rent_compacted_KVs\030\013 \001(\004\022\032\n\022root_index_s" + + "ize_KB\030\014 \001(\r\022\"\n\032total_static_index_size_" + + "KB\030\r \001(\r\022\"\n\032total_static_bloom_size_KB\030\016" + + " \001(\r\022\034\n\024complete_sequence_id\030\017 \001(\004\022\025\n\rda" + + "ta_locality\030\020 \001(\002\022#\n\030last_major_compacti" + + "on_ts\030\021 \001(\004:\0010\0224\n\032store_complete_sequenc", + "e_id\030\022 \003(\0132\020.StoreSequenceId\"T\n\023Replicat" + + "ionLoadSink\022\032\n\022ageOfLastAppliedOp\030\001 \002(\004\022" + + "!\n\031timeStampsOfLastAppliedOp\030\002 \002(\004\"\225\001\n\025R" + + "eplicationLoadSource\022\016\n\006peerID\030\001 \002(\t\022\032\n\022" + + "ageOfLastShippedOp\030\002 \002(\004\022\026\n\016sizeOfLogQue" + + "ue\030\003 \002(\r\022 \n\030timeStampOfLastShippedOp\030\004 \002" + + "(\004\022\026\n\016replicationLag\030\005 \002(\004\"\346\002\n\nServerLoa" + + "d\022\032\n\022number_of_requests\030\001 \001(\r\022 \n\030total_n" + + "umber_of_requests\030\002 \001(\r\022\024\n\014used_heap_MB\030" + + "\003 \001(\r\022\023\n\013max_heap_MB\030\004 \001(\r\022!\n\014region_loa", + "ds\030\005 \003(\0132\013.RegionLoad\022\"\n\014coprocessors\030\006 " + + "\003(\0132\014.Coprocessor\022\031\n\021report_start_time\030\007" + + " \001(\004\022\027\n\017report_end_time\030\010 \001(\004\022\030\n\020info_se" + + "rver_port\030\t \001(\r\022.\n\016replLoadSource\030\n \003(\0132" + + "\026.ReplicationLoadSource\022*\n\014replLoadSink\030" + + "\013 \001(\0132\024.ReplicationLoadSink\"O\n\016LiveServe" + + "rInfo\022\033\n\006server\030\001 \002(\0132\013.ServerName\022 \n\013se" + + "rver_load\030\002 \002(\0132\013.ServerLoad\"\340\002\n\rCluster" + + "Status\022/\n\rhbase_version\030\001 \001(\0132\030.HBaseVer" + + "sionFileContent\022%\n\014live_servers\030\002 \003(\0132\017.", + "LiveServerInfo\022!\n\014dead_servers\030\003 \003(\0132\013.S" + + "erverName\0222\n\025regions_in_transition\030\004 \003(\013" + + "2\023.RegionInTransition\022\036\n\ncluster_id\030\005 \001(" + + "\0132\n.ClusterId\022)\n\023master_coprocessors\030\006 \003" + + "(\0132\014.Coprocessor\022\033\n\006master\030\007 \001(\0132\013.Serve" + + "rName\022#\n\016backup_masters\030\010 \003(\0132\013.ServerNa" + + "me\022\023\n\013balancer_on\030\t \001(\010BF\n*org.apache.ha" + + "doop.hbase.protobuf.generatedB\023ClusterSt" + + "atusProtosH\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -10613,26 +14783,50 @@ public final class ClusterStatusProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_RegionInTransition_descriptor, new java.lang.String[] { "Spec", "RegionState", }); - internal_static_RegionLoad_descriptor = + internal_static_StoreSequenceId_descriptor = getDescriptor().getMessageTypes().get(2); + internal_static_StoreSequenceId_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_StoreSequenceId_descriptor, + new java.lang.String[] { "FamilyName", "SequenceId", }); + internal_static_RegionStoreSequenceIds_descriptor = + getDescriptor().getMessageTypes().get(3); + internal_static_RegionStoreSequenceIds_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_RegionStoreSequenceIds_descriptor, + new java.lang.String[] { "LastFlushedSequenceId", "StoreSequenceId", }); + internal_static_RegionLoad_descriptor = + getDescriptor().getMessageTypes().get(4); internal_static_RegionLoad_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_RegionLoad_descriptor, - new java.lang.String[] { "RegionSpecifier", "Stores", "Storefiles", "StoreUncompressedSizeMB", "StorefileSizeMB", "MemstoreSizeMB", "StorefileIndexSizeMB", "ReadRequestsCount", "WriteRequestsCount", "TotalCompactingKVs", "CurrentCompactedKVs", "RootIndexSizeKB", "TotalStaticIndexSizeKB", "TotalStaticBloomSizeKB", "CompleteSequenceId", "DataLocality", "LastMajorCompactionTs", }); + new java.lang.String[] { "RegionSpecifier", "Stores", "Storefiles", "StoreUncompressedSizeMB", "StorefileSizeMB", "MemstoreSizeMB", "StorefileIndexSizeMB", "ReadRequestsCount", "WriteRequestsCount", "TotalCompactingKVs", "CurrentCompactedKVs", "RootIndexSizeKB", "TotalStaticIndexSizeKB", "TotalStaticBloomSizeKB", "CompleteSequenceId", "DataLocality", "LastMajorCompactionTs", "StoreCompleteSequenceId", }); + internal_static_ReplicationLoadSink_descriptor = + getDescriptor().getMessageTypes().get(5); + internal_static_ReplicationLoadSink_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_ReplicationLoadSink_descriptor, + new java.lang.String[] { "AgeOfLastAppliedOp", "TimeStampsOfLastAppliedOp", }); + internal_static_ReplicationLoadSource_descriptor = + getDescriptor().getMessageTypes().get(6); + internal_static_ReplicationLoadSource_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_ReplicationLoadSource_descriptor, + new java.lang.String[] { "PeerID", "AgeOfLastShippedOp", "SizeOfLogQueue", "TimeStampOfLastShippedOp", "ReplicationLag", }); internal_static_ServerLoad_descriptor = - getDescriptor().getMessageTypes().get(3); + getDescriptor().getMessageTypes().get(7); internal_static_ServerLoad_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ServerLoad_descriptor, - new java.lang.String[] { "NumberOfRequests", "TotalNumberOfRequests", "UsedHeapMB", "MaxHeapMB", "RegionLoads", "Coprocessors", "ReportStartTime", "ReportEndTime", "InfoServerPort", }); + new java.lang.String[] { "NumberOfRequests", "TotalNumberOfRequests", "UsedHeapMB", "MaxHeapMB", "RegionLoads", "Coprocessors", "ReportStartTime", "ReportEndTime", "InfoServerPort", "ReplLoadSource", "ReplLoadSink", }); internal_static_LiveServerInfo_descriptor = - getDescriptor().getMessageTypes().get(4); + getDescriptor().getMessageTypes().get(8); internal_static_LiveServerInfo_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_LiveServerInfo_descriptor, new java.lang.String[] { "Server", "ServerLoad", }); internal_static_ClusterStatus_descriptor = - getDescriptor().getMessageTypes().get(5); + getDescriptor().getMessageTypes().get(9); internal_static_ClusterStatus_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ClusterStatus_descriptor, diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java new file mode 100644 index 00000000000..e0a4775aaee --- /dev/null +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProcedureProtos.java @@ -0,0 +1,11424 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: MasterProcedure.proto + +package org.apache.hadoop.hbase.protobuf.generated; + +public final class MasterProcedureProtos { + private MasterProcedureProtos() {} + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistry registry) { + } + /** + * Protobuf enum {@code CreateTableState} + */ + public enum CreateTableState + implements com.google.protobuf.ProtocolMessageEnum { + /** + * CREATE_TABLE_PRE_OPERATION = 1; + */ + CREATE_TABLE_PRE_OPERATION(0, 1), + /** + * CREATE_TABLE_WRITE_FS_LAYOUT = 2; + */ + CREATE_TABLE_WRITE_FS_LAYOUT(1, 2), + /** + * CREATE_TABLE_ADD_TO_META = 3; + */ + CREATE_TABLE_ADD_TO_META(2, 3), + /** + * CREATE_TABLE_ASSIGN_REGIONS = 4; + */ + CREATE_TABLE_ASSIGN_REGIONS(3, 4), + /** + * CREATE_TABLE_UPDATE_DESC_CACHE = 5; + */ + CREATE_TABLE_UPDATE_DESC_CACHE(4, 5), + /** + * CREATE_TABLE_POST_OPERATION = 6; + */ + CREATE_TABLE_POST_OPERATION(5, 6), + ; + + /** + * CREATE_TABLE_PRE_OPERATION = 1; + */ + public static final int CREATE_TABLE_PRE_OPERATION_VALUE = 1; + /** + * CREATE_TABLE_WRITE_FS_LAYOUT = 2; + */ + public static final int CREATE_TABLE_WRITE_FS_LAYOUT_VALUE = 2; + /** + * CREATE_TABLE_ADD_TO_META = 3; + */ + public static final int CREATE_TABLE_ADD_TO_META_VALUE = 3; + /** + * CREATE_TABLE_ASSIGN_REGIONS = 4; + */ + public static final int CREATE_TABLE_ASSIGN_REGIONS_VALUE = 4; + /** + * CREATE_TABLE_UPDATE_DESC_CACHE = 5; + */ + public static final int CREATE_TABLE_UPDATE_DESC_CACHE_VALUE = 5; + /** + * CREATE_TABLE_POST_OPERATION = 6; + */ + public static final int CREATE_TABLE_POST_OPERATION_VALUE = 6; + + + public final int getNumber() { return value; } + + public static CreateTableState valueOf(int value) { + switch (value) { + case 1: return CREATE_TABLE_PRE_OPERATION; + case 2: return CREATE_TABLE_WRITE_FS_LAYOUT; + case 3: return CREATE_TABLE_ADD_TO_META; + case 4: return CREATE_TABLE_ASSIGN_REGIONS; + case 5: return CREATE_TABLE_UPDATE_DESC_CACHE; + case 6: return CREATE_TABLE_POST_OPERATION; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public CreateTableState findValueByNumber(int number) { + return CreateTableState.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(0); + } + + private static final CreateTableState[] VALUES = values(); + + public static CreateTableState valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private CreateTableState(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:CreateTableState) + } + + /** + * Protobuf enum {@code ModifyTableState} + */ + public enum ModifyTableState + implements com.google.protobuf.ProtocolMessageEnum { + /** + * MODIFY_TABLE_PREPARE = 1; + */ + MODIFY_TABLE_PREPARE(0, 1), + /** + * MODIFY_TABLE_PRE_OPERATION = 2; + */ + MODIFY_TABLE_PRE_OPERATION(1, 2), + /** + * MODIFY_TABLE_UPDATE_TABLE_DESCRIPTOR = 3; + */ + MODIFY_TABLE_UPDATE_TABLE_DESCRIPTOR(2, 3), + /** + * MODIFY_TABLE_REMOVE_REPLICA_COLUMN = 4; + */ + MODIFY_TABLE_REMOVE_REPLICA_COLUMN(3, 4), + /** + * MODIFY_TABLE_DELETE_FS_LAYOUT = 5; + */ + MODIFY_TABLE_DELETE_FS_LAYOUT(4, 5), + /** + * MODIFY_TABLE_POST_OPERATION = 6; + */ + MODIFY_TABLE_POST_OPERATION(5, 6), + /** + * MODIFY_TABLE_REOPEN_ALL_REGIONS = 7; + */ + MODIFY_TABLE_REOPEN_ALL_REGIONS(6, 7), + ; + + /** + * MODIFY_TABLE_PREPARE = 1; + */ + public static final int MODIFY_TABLE_PREPARE_VALUE = 1; + /** + * MODIFY_TABLE_PRE_OPERATION = 2; + */ + public static final int MODIFY_TABLE_PRE_OPERATION_VALUE = 2; + /** + * MODIFY_TABLE_UPDATE_TABLE_DESCRIPTOR = 3; + */ + public static final int MODIFY_TABLE_UPDATE_TABLE_DESCRIPTOR_VALUE = 3; + /** + * MODIFY_TABLE_REMOVE_REPLICA_COLUMN = 4; + */ + public static final int MODIFY_TABLE_REMOVE_REPLICA_COLUMN_VALUE = 4; + /** + * MODIFY_TABLE_DELETE_FS_LAYOUT = 5; + */ + public static final int MODIFY_TABLE_DELETE_FS_LAYOUT_VALUE = 5; + /** + * MODIFY_TABLE_POST_OPERATION = 6; + */ + public static final int MODIFY_TABLE_POST_OPERATION_VALUE = 6; + /** + * MODIFY_TABLE_REOPEN_ALL_REGIONS = 7; + */ + public static final int MODIFY_TABLE_REOPEN_ALL_REGIONS_VALUE = 7; + + + public final int getNumber() { return value; } + + public static ModifyTableState valueOf(int value) { + switch (value) { + case 1: return MODIFY_TABLE_PREPARE; + case 2: return MODIFY_TABLE_PRE_OPERATION; + case 3: return MODIFY_TABLE_UPDATE_TABLE_DESCRIPTOR; + case 4: return MODIFY_TABLE_REMOVE_REPLICA_COLUMN; + case 5: return MODIFY_TABLE_DELETE_FS_LAYOUT; + case 6: return MODIFY_TABLE_POST_OPERATION; + case 7: return MODIFY_TABLE_REOPEN_ALL_REGIONS; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public ModifyTableState findValueByNumber(int number) { + return ModifyTableState.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(1); + } + + private static final ModifyTableState[] VALUES = values(); + + public static ModifyTableState valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private ModifyTableState(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:ModifyTableState) + } + + /** + * Protobuf enum {@code TruncateTableState} + */ + public enum TruncateTableState + implements com.google.protobuf.ProtocolMessageEnum { + /** + * TRUNCATE_TABLE_PRE_OPERATION = 1; + */ + TRUNCATE_TABLE_PRE_OPERATION(0, 1), + /** + * TRUNCATE_TABLE_REMOVE_FROM_META = 2; + */ + TRUNCATE_TABLE_REMOVE_FROM_META(1, 2), + /** + * TRUNCATE_TABLE_CLEAR_FS_LAYOUT = 3; + */ + TRUNCATE_TABLE_CLEAR_FS_LAYOUT(2, 3), + /** + * TRUNCATE_TABLE_CREATE_FS_LAYOUT = 4; + */ + TRUNCATE_TABLE_CREATE_FS_LAYOUT(3, 4), + /** + * TRUNCATE_TABLE_ADD_TO_META = 5; + */ + TRUNCATE_TABLE_ADD_TO_META(4, 5), + /** + * TRUNCATE_TABLE_ASSIGN_REGIONS = 6; + */ + TRUNCATE_TABLE_ASSIGN_REGIONS(5, 6), + /** + * TRUNCATE_TABLE_POST_OPERATION = 7; + */ + TRUNCATE_TABLE_POST_OPERATION(6, 7), + ; + + /** + * TRUNCATE_TABLE_PRE_OPERATION = 1; + */ + public static final int TRUNCATE_TABLE_PRE_OPERATION_VALUE = 1; + /** + * TRUNCATE_TABLE_REMOVE_FROM_META = 2; + */ + public static final int TRUNCATE_TABLE_REMOVE_FROM_META_VALUE = 2; + /** + * TRUNCATE_TABLE_CLEAR_FS_LAYOUT = 3; + */ + public static final int TRUNCATE_TABLE_CLEAR_FS_LAYOUT_VALUE = 3; + /** + * TRUNCATE_TABLE_CREATE_FS_LAYOUT = 4; + */ + public static final int TRUNCATE_TABLE_CREATE_FS_LAYOUT_VALUE = 4; + /** + * TRUNCATE_TABLE_ADD_TO_META = 5; + */ + public static final int TRUNCATE_TABLE_ADD_TO_META_VALUE = 5; + /** + * TRUNCATE_TABLE_ASSIGN_REGIONS = 6; + */ + public static final int TRUNCATE_TABLE_ASSIGN_REGIONS_VALUE = 6; + /** + * TRUNCATE_TABLE_POST_OPERATION = 7; + */ + public static final int TRUNCATE_TABLE_POST_OPERATION_VALUE = 7; + + + public final int getNumber() { return value; } + + public static TruncateTableState valueOf(int value) { + switch (value) { + case 1: return TRUNCATE_TABLE_PRE_OPERATION; + case 2: return TRUNCATE_TABLE_REMOVE_FROM_META; + case 3: return TRUNCATE_TABLE_CLEAR_FS_LAYOUT; + case 4: return TRUNCATE_TABLE_CREATE_FS_LAYOUT; + case 5: return TRUNCATE_TABLE_ADD_TO_META; + case 6: return TRUNCATE_TABLE_ASSIGN_REGIONS; + case 7: return TRUNCATE_TABLE_POST_OPERATION; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public TruncateTableState findValueByNumber(int number) { + return TruncateTableState.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(2); + } + + private static final TruncateTableState[] VALUES = values(); + + public static TruncateTableState valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private TruncateTableState(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:TruncateTableState) + } + + /** + * Protobuf enum {@code DeleteTableState} + */ + public enum DeleteTableState + implements com.google.protobuf.ProtocolMessageEnum { + /** + * DELETE_TABLE_PRE_OPERATION = 1; + */ + DELETE_TABLE_PRE_OPERATION(0, 1), + /** + * DELETE_TABLE_REMOVE_FROM_META = 2; + */ + DELETE_TABLE_REMOVE_FROM_META(1, 2), + /** + * DELETE_TABLE_CLEAR_FS_LAYOUT = 3; + */ + DELETE_TABLE_CLEAR_FS_LAYOUT(2, 3), + /** + * DELETE_TABLE_UPDATE_DESC_CACHE = 4; + */ + DELETE_TABLE_UPDATE_DESC_CACHE(3, 4), + /** + * DELETE_TABLE_UNASSIGN_REGIONS = 5; + */ + DELETE_TABLE_UNASSIGN_REGIONS(4, 5), + /** + * DELETE_TABLE_POST_OPERATION = 6; + */ + DELETE_TABLE_POST_OPERATION(5, 6), + ; + + /** + * DELETE_TABLE_PRE_OPERATION = 1; + */ + public static final int DELETE_TABLE_PRE_OPERATION_VALUE = 1; + /** + * DELETE_TABLE_REMOVE_FROM_META = 2; + */ + public static final int DELETE_TABLE_REMOVE_FROM_META_VALUE = 2; + /** + * DELETE_TABLE_CLEAR_FS_LAYOUT = 3; + */ + public static final int DELETE_TABLE_CLEAR_FS_LAYOUT_VALUE = 3; + /** + * DELETE_TABLE_UPDATE_DESC_CACHE = 4; + */ + public static final int DELETE_TABLE_UPDATE_DESC_CACHE_VALUE = 4; + /** + * DELETE_TABLE_UNASSIGN_REGIONS = 5; + */ + public static final int DELETE_TABLE_UNASSIGN_REGIONS_VALUE = 5; + /** + * DELETE_TABLE_POST_OPERATION = 6; + */ + public static final int DELETE_TABLE_POST_OPERATION_VALUE = 6; + + + public final int getNumber() { return value; } + + public static DeleteTableState valueOf(int value) { + switch (value) { + case 1: return DELETE_TABLE_PRE_OPERATION; + case 2: return DELETE_TABLE_REMOVE_FROM_META; + case 3: return DELETE_TABLE_CLEAR_FS_LAYOUT; + case 4: return DELETE_TABLE_UPDATE_DESC_CACHE; + case 5: return DELETE_TABLE_UNASSIGN_REGIONS; + case 6: return DELETE_TABLE_POST_OPERATION; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public DeleteTableState findValueByNumber(int number) { + return DeleteTableState.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(3); + } + + private static final DeleteTableState[] VALUES = values(); + + public static DeleteTableState valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private DeleteTableState(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:DeleteTableState) + } + + /** + * Protobuf enum {@code AddColumnFamilyState} + */ + public enum AddColumnFamilyState + implements com.google.protobuf.ProtocolMessageEnum { + /** + * ADD_COLUMN_FAMILY_PREPARE = 1; + */ + ADD_COLUMN_FAMILY_PREPARE(0, 1), + /** + * ADD_COLUMN_FAMILY_PRE_OPERATION = 2; + */ + ADD_COLUMN_FAMILY_PRE_OPERATION(1, 2), + /** + * ADD_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR = 3; + */ + ADD_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR(2, 3), + /** + * ADD_COLUMN_FAMILY_POST_OPERATION = 4; + */ + ADD_COLUMN_FAMILY_POST_OPERATION(3, 4), + /** + * ADD_COLUMN_FAMILY_REOPEN_ALL_REGIONS = 5; + */ + ADD_COLUMN_FAMILY_REOPEN_ALL_REGIONS(4, 5), + ; + + /** + * ADD_COLUMN_FAMILY_PREPARE = 1; + */ + public static final int ADD_COLUMN_FAMILY_PREPARE_VALUE = 1; + /** + * ADD_COLUMN_FAMILY_PRE_OPERATION = 2; + */ + public static final int ADD_COLUMN_FAMILY_PRE_OPERATION_VALUE = 2; + /** + * ADD_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR = 3; + */ + public static final int ADD_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR_VALUE = 3; + /** + * ADD_COLUMN_FAMILY_POST_OPERATION = 4; + */ + public static final int ADD_COLUMN_FAMILY_POST_OPERATION_VALUE = 4; + /** + * ADD_COLUMN_FAMILY_REOPEN_ALL_REGIONS = 5; + */ + public static final int ADD_COLUMN_FAMILY_REOPEN_ALL_REGIONS_VALUE = 5; + + + public final int getNumber() { return value; } + + public static AddColumnFamilyState valueOf(int value) { + switch (value) { + case 1: return ADD_COLUMN_FAMILY_PREPARE; + case 2: return ADD_COLUMN_FAMILY_PRE_OPERATION; + case 3: return ADD_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR; + case 4: return ADD_COLUMN_FAMILY_POST_OPERATION; + case 5: return ADD_COLUMN_FAMILY_REOPEN_ALL_REGIONS; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public AddColumnFamilyState findValueByNumber(int number) { + return AddColumnFamilyState.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(4); + } + + private static final AddColumnFamilyState[] VALUES = values(); + + public static AddColumnFamilyState valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private AddColumnFamilyState(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:AddColumnFamilyState) + } + + /** + * Protobuf enum {@code ModifyColumnFamilyState} + */ + public enum ModifyColumnFamilyState + implements com.google.protobuf.ProtocolMessageEnum { + /** + * MODIFY_COLUMN_FAMILY_PREPARE = 1; + */ + MODIFY_COLUMN_FAMILY_PREPARE(0, 1), + /** + * MODIFY_COLUMN_FAMILY_PRE_OPERATION = 2; + */ + MODIFY_COLUMN_FAMILY_PRE_OPERATION(1, 2), + /** + * MODIFY_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR = 3; + */ + MODIFY_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR(2, 3), + /** + * MODIFY_COLUMN_FAMILY_POST_OPERATION = 4; + */ + MODIFY_COLUMN_FAMILY_POST_OPERATION(3, 4), + /** + * MODIFY_COLUMN_FAMILY_REOPEN_ALL_REGIONS = 5; + */ + MODIFY_COLUMN_FAMILY_REOPEN_ALL_REGIONS(4, 5), + ; + + /** + * MODIFY_COLUMN_FAMILY_PREPARE = 1; + */ + public static final int MODIFY_COLUMN_FAMILY_PREPARE_VALUE = 1; + /** + * MODIFY_COLUMN_FAMILY_PRE_OPERATION = 2; + */ + public static final int MODIFY_COLUMN_FAMILY_PRE_OPERATION_VALUE = 2; + /** + * MODIFY_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR = 3; + */ + public static final int MODIFY_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR_VALUE = 3; + /** + * MODIFY_COLUMN_FAMILY_POST_OPERATION = 4; + */ + public static final int MODIFY_COLUMN_FAMILY_POST_OPERATION_VALUE = 4; + /** + * MODIFY_COLUMN_FAMILY_REOPEN_ALL_REGIONS = 5; + */ + public static final int MODIFY_COLUMN_FAMILY_REOPEN_ALL_REGIONS_VALUE = 5; + + + public final int getNumber() { return value; } + + public static ModifyColumnFamilyState valueOf(int value) { + switch (value) { + case 1: return MODIFY_COLUMN_FAMILY_PREPARE; + case 2: return MODIFY_COLUMN_FAMILY_PRE_OPERATION; + case 3: return MODIFY_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR; + case 4: return MODIFY_COLUMN_FAMILY_POST_OPERATION; + case 5: return MODIFY_COLUMN_FAMILY_REOPEN_ALL_REGIONS; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public ModifyColumnFamilyState findValueByNumber(int number) { + return ModifyColumnFamilyState.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(5); + } + + private static final ModifyColumnFamilyState[] VALUES = values(); + + public static ModifyColumnFamilyState valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private ModifyColumnFamilyState(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:ModifyColumnFamilyState) + } + + /** + * Protobuf enum {@code DeleteColumnFamilyState} + */ + public enum DeleteColumnFamilyState + implements com.google.protobuf.ProtocolMessageEnum { + /** + * DELETE_COLUMN_FAMILY_PREPARE = 1; + */ + DELETE_COLUMN_FAMILY_PREPARE(0, 1), + /** + * DELETE_COLUMN_FAMILY_PRE_OPERATION = 2; + */ + DELETE_COLUMN_FAMILY_PRE_OPERATION(1, 2), + /** + * DELETE_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR = 3; + */ + DELETE_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR(2, 3), + /** + * DELETE_COLUMN_FAMILY_DELETE_FS_LAYOUT = 4; + */ + DELETE_COLUMN_FAMILY_DELETE_FS_LAYOUT(3, 4), + /** + * DELETE_COLUMN_FAMILY_POST_OPERATION = 5; + */ + DELETE_COLUMN_FAMILY_POST_OPERATION(4, 5), + /** + * DELETE_COLUMN_FAMILY_REOPEN_ALL_REGIONS = 6; + */ + DELETE_COLUMN_FAMILY_REOPEN_ALL_REGIONS(5, 6), + ; + + /** + * DELETE_COLUMN_FAMILY_PREPARE = 1; + */ + public static final int DELETE_COLUMN_FAMILY_PREPARE_VALUE = 1; + /** + * DELETE_COLUMN_FAMILY_PRE_OPERATION = 2; + */ + public static final int DELETE_COLUMN_FAMILY_PRE_OPERATION_VALUE = 2; + /** + * DELETE_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR = 3; + */ + public static final int DELETE_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR_VALUE = 3; + /** + * DELETE_COLUMN_FAMILY_DELETE_FS_LAYOUT = 4; + */ + public static final int DELETE_COLUMN_FAMILY_DELETE_FS_LAYOUT_VALUE = 4; + /** + * DELETE_COLUMN_FAMILY_POST_OPERATION = 5; + */ + public static final int DELETE_COLUMN_FAMILY_POST_OPERATION_VALUE = 5; + /** + * DELETE_COLUMN_FAMILY_REOPEN_ALL_REGIONS = 6; + */ + public static final int DELETE_COLUMN_FAMILY_REOPEN_ALL_REGIONS_VALUE = 6; + + + public final int getNumber() { return value; } + + public static DeleteColumnFamilyState valueOf(int value) { + switch (value) { + case 1: return DELETE_COLUMN_FAMILY_PREPARE; + case 2: return DELETE_COLUMN_FAMILY_PRE_OPERATION; + case 3: return DELETE_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR; + case 4: return DELETE_COLUMN_FAMILY_DELETE_FS_LAYOUT; + case 5: return DELETE_COLUMN_FAMILY_POST_OPERATION; + case 6: return DELETE_COLUMN_FAMILY_REOPEN_ALL_REGIONS; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public DeleteColumnFamilyState findValueByNumber(int number) { + return DeleteColumnFamilyState.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(6); + } + + private static final DeleteColumnFamilyState[] VALUES = values(); + + public static DeleteColumnFamilyState valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private DeleteColumnFamilyState(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:DeleteColumnFamilyState) + } + + /** + * Protobuf enum {@code EnableTableState} + */ + public enum EnableTableState + implements com.google.protobuf.ProtocolMessageEnum { + /** + * ENABLE_TABLE_PREPARE = 1; + */ + ENABLE_TABLE_PREPARE(0, 1), + /** + * ENABLE_TABLE_PRE_OPERATION = 2; + */ + ENABLE_TABLE_PRE_OPERATION(1, 2), + /** + * ENABLE_TABLE_SET_ENABLING_TABLE_STATE = 3; + */ + ENABLE_TABLE_SET_ENABLING_TABLE_STATE(2, 3), + /** + * ENABLE_TABLE_MARK_REGIONS_ONLINE = 4; + */ + ENABLE_TABLE_MARK_REGIONS_ONLINE(3, 4), + /** + * ENABLE_TABLE_SET_ENABLED_TABLE_STATE = 5; + */ + ENABLE_TABLE_SET_ENABLED_TABLE_STATE(4, 5), + /** + * ENABLE_TABLE_POST_OPERATION = 6; + */ + ENABLE_TABLE_POST_OPERATION(5, 6), + ; + + /** + * ENABLE_TABLE_PREPARE = 1; + */ + public static final int ENABLE_TABLE_PREPARE_VALUE = 1; + /** + * ENABLE_TABLE_PRE_OPERATION = 2; + */ + public static final int ENABLE_TABLE_PRE_OPERATION_VALUE = 2; + /** + * ENABLE_TABLE_SET_ENABLING_TABLE_STATE = 3; + */ + public static final int ENABLE_TABLE_SET_ENABLING_TABLE_STATE_VALUE = 3; + /** + * ENABLE_TABLE_MARK_REGIONS_ONLINE = 4; + */ + public static final int ENABLE_TABLE_MARK_REGIONS_ONLINE_VALUE = 4; + /** + * ENABLE_TABLE_SET_ENABLED_TABLE_STATE = 5; + */ + public static final int ENABLE_TABLE_SET_ENABLED_TABLE_STATE_VALUE = 5; + /** + * ENABLE_TABLE_POST_OPERATION = 6; + */ + public static final int ENABLE_TABLE_POST_OPERATION_VALUE = 6; + + + public final int getNumber() { return value; } + + public static EnableTableState valueOf(int value) { + switch (value) { + case 1: return ENABLE_TABLE_PREPARE; + case 2: return ENABLE_TABLE_PRE_OPERATION; + case 3: return ENABLE_TABLE_SET_ENABLING_TABLE_STATE; + case 4: return ENABLE_TABLE_MARK_REGIONS_ONLINE; + case 5: return ENABLE_TABLE_SET_ENABLED_TABLE_STATE; + case 6: return ENABLE_TABLE_POST_OPERATION; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public EnableTableState findValueByNumber(int number) { + return EnableTableState.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(7); + } + + private static final EnableTableState[] VALUES = values(); + + public static EnableTableState valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private EnableTableState(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:EnableTableState) + } + + /** + * Protobuf enum {@code DisableTableState} + */ + public enum DisableTableState + implements com.google.protobuf.ProtocolMessageEnum { + /** + * DISABLE_TABLE_PREPARE = 1; + */ + DISABLE_TABLE_PREPARE(0, 1), + /** + * DISABLE_TABLE_PRE_OPERATION = 2; + */ + DISABLE_TABLE_PRE_OPERATION(1, 2), + /** + * DISABLE_TABLE_SET_DISABLING_TABLE_STATE = 3; + */ + DISABLE_TABLE_SET_DISABLING_TABLE_STATE(2, 3), + /** + * DISABLE_TABLE_MARK_REGIONS_OFFLINE = 4; + */ + DISABLE_TABLE_MARK_REGIONS_OFFLINE(3, 4), + /** + * DISABLE_TABLE_SET_DISABLED_TABLE_STATE = 5; + */ + DISABLE_TABLE_SET_DISABLED_TABLE_STATE(4, 5), + /** + * DISABLE_TABLE_POST_OPERATION = 6; + */ + DISABLE_TABLE_POST_OPERATION(5, 6), + ; + + /** + * DISABLE_TABLE_PREPARE = 1; + */ + public static final int DISABLE_TABLE_PREPARE_VALUE = 1; + /** + * DISABLE_TABLE_PRE_OPERATION = 2; + */ + public static final int DISABLE_TABLE_PRE_OPERATION_VALUE = 2; + /** + * DISABLE_TABLE_SET_DISABLING_TABLE_STATE = 3; + */ + public static final int DISABLE_TABLE_SET_DISABLING_TABLE_STATE_VALUE = 3; + /** + * DISABLE_TABLE_MARK_REGIONS_OFFLINE = 4; + */ + public static final int DISABLE_TABLE_MARK_REGIONS_OFFLINE_VALUE = 4; + /** + * DISABLE_TABLE_SET_DISABLED_TABLE_STATE = 5; + */ + public static final int DISABLE_TABLE_SET_DISABLED_TABLE_STATE_VALUE = 5; + /** + * DISABLE_TABLE_POST_OPERATION = 6; + */ + public static final int DISABLE_TABLE_POST_OPERATION_VALUE = 6; + + + public final int getNumber() { return value; } + + public static DisableTableState valueOf(int value) { + switch (value) { + case 1: return DISABLE_TABLE_PREPARE; + case 2: return DISABLE_TABLE_PRE_OPERATION; + case 3: return DISABLE_TABLE_SET_DISABLING_TABLE_STATE; + case 4: return DISABLE_TABLE_MARK_REGIONS_OFFLINE; + case 5: return DISABLE_TABLE_SET_DISABLED_TABLE_STATE; + case 6: return DISABLE_TABLE_POST_OPERATION; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public DisableTableState findValueByNumber(int number) { + return DisableTableState.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(8); + } + + private static final DisableTableState[] VALUES = values(); + + public static DisableTableState valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private DisableTableState(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:DisableTableState) + } + + public interface CreateTableStateDataOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .UserInformation user_info = 1; + /** + * required .UserInformation user_info = 1; + */ + boolean hasUserInfo(); + /** + * required .UserInformation user_info = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo(); + /** + * required .UserInformation user_info = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder(); + + // required .TableSchema table_schema = 2; + /** + * required .TableSchema table_schema = 2; + */ + boolean hasTableSchema(); + /** + * required .TableSchema table_schema = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getTableSchema(); + /** + * required .TableSchema table_schema = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getTableSchemaOrBuilder(); + + // repeated .RegionInfo region_info = 3; + /** + * repeated .RegionInfo region_info = 3; + */ + java.util.List + getRegionInfoList(); + /** + * repeated .RegionInfo region_info = 3; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo(int index); + /** + * repeated .RegionInfo region_info = 3; + */ + int getRegionInfoCount(); + /** + * repeated .RegionInfo region_info = 3; + */ + java.util.List + getRegionInfoOrBuilderList(); + /** + * repeated .RegionInfo region_info = 3; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder( + int index); + } + /** + * Protobuf type {@code CreateTableStateData} + */ + public static final class CreateTableStateData extends + com.google.protobuf.GeneratedMessage + implements CreateTableStateDataOrBuilder { + // Use CreateTableStateData.newBuilder() to construct. + private CreateTableStateData(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private CreateTableStateData(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final CreateTableStateData defaultInstance; + public static CreateTableStateData getDefaultInstance() { + return defaultInstance; + } + + public CreateTableStateData getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private CreateTableStateData( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = userInfo_.toBuilder(); + } + userInfo_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(userInfo_); + userInfo_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 18: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder subBuilder = null; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + subBuilder = tableSchema_.toBuilder(); + } + tableSchema_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(tableSchema_); + tableSchema_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000002; + break; + } + case 26: { + if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + regionInfo_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000004; + } + regionInfo_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + regionInfo_ = java.util.Collections.unmodifiableList(regionInfo_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_CreateTableStateData_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_CreateTableStateData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public CreateTableStateData parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new CreateTableStateData(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .UserInformation user_info = 1; + public static final int USER_INFO_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation userInfo_; + /** + * required .UserInformation user_info = 1; + */ + public boolean hasUserInfo() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo() { + return userInfo_; + } + /** + * required .UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder() { + return userInfo_; + } + + // required .TableSchema table_schema = 2; + public static final int TABLE_SCHEMA_FIELD_NUMBER = 2; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema tableSchema_; + /** + * required .TableSchema table_schema = 2; + */ + public boolean hasTableSchema() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .TableSchema table_schema = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getTableSchema() { + return tableSchema_; + } + /** + * required .TableSchema table_schema = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getTableSchemaOrBuilder() { + return tableSchema_; + } + + // repeated .RegionInfo region_info = 3; + public static final int REGION_INFO_FIELD_NUMBER = 3; + private java.util.List regionInfo_; + /** + * repeated .RegionInfo region_info = 3; + */ + public java.util.List getRegionInfoList() { + return regionInfo_; + } + /** + * repeated .RegionInfo region_info = 3; + */ + public java.util.List + getRegionInfoOrBuilderList() { + return regionInfo_; + } + /** + * repeated .RegionInfo region_info = 3; + */ + public int getRegionInfoCount() { + return regionInfo_.size(); + } + /** + * repeated .RegionInfo region_info = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo(int index) { + return regionInfo_.get(index); + } + /** + * repeated .RegionInfo region_info = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder( + int index) { + return regionInfo_.get(index); + } + + private void initFields() { + userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance(); + tableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); + regionInfo_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasUserInfo()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasTableSchema()) { + memoizedIsInitialized = 0; + return false; + } + if (!getUserInfo().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + if (!getTableSchema().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getRegionInfoCount(); i++) { + if (!getRegionInfo(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, userInfo_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(2, tableSchema_); + } + for (int i = 0; i < regionInfo_.size(); i++) { + output.writeMessage(3, regionInfo_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, userInfo_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, tableSchema_); + } + for (int i = 0; i < regionInfo_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, regionInfo_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData other = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData) obj; + + boolean result = true; + result = result && (hasUserInfo() == other.hasUserInfo()); + if (hasUserInfo()) { + result = result && getUserInfo() + .equals(other.getUserInfo()); + } + result = result && (hasTableSchema() == other.hasTableSchema()); + if (hasTableSchema()) { + result = result && getTableSchema() + .equals(other.getTableSchema()); + } + result = result && getRegionInfoList() + .equals(other.getRegionInfoList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasUserInfo()) { + hash = (37 * hash) + USER_INFO_FIELD_NUMBER; + hash = (53 * hash) + getUserInfo().hashCode(); + } + if (hasTableSchema()) { + hash = (37 * hash) + TABLE_SCHEMA_FIELD_NUMBER; + hash = (53 * hash) + getTableSchema().hashCode(); + } + if (getRegionInfoCount() > 0) { + hash = (37 * hash) + REGION_INFO_FIELD_NUMBER; + hash = (53 * hash) + getRegionInfoList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code CreateTableStateData} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateDataOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_CreateTableStateData_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_CreateTableStateData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getUserInfoFieldBuilder(); + getTableSchemaFieldBuilder(); + getRegionInfoFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (userInfoBuilder_ == null) { + userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance(); + } else { + userInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + if (tableSchemaBuilder_ == null) { + tableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); + } else { + tableSchemaBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + if (regionInfoBuilder_ == null) { + regionInfo_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + } else { + regionInfoBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_CreateTableStateData_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData result = new org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (userInfoBuilder_ == null) { + result.userInfo_ = userInfo_; + } else { + result.userInfo_ = userInfoBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + if (tableSchemaBuilder_ == null) { + result.tableSchema_ = tableSchema_; + } else { + result.tableSchema_ = tableSchemaBuilder_.build(); + } + if (regionInfoBuilder_ == null) { + if (((bitField0_ & 0x00000004) == 0x00000004)) { + regionInfo_ = java.util.Collections.unmodifiableList(regionInfo_); + bitField0_ = (bitField0_ & ~0x00000004); + } + result.regionInfo_ = regionInfo_; + } else { + result.regionInfo_ = regionInfoBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData.getDefaultInstance()) return this; + if (other.hasUserInfo()) { + mergeUserInfo(other.getUserInfo()); + } + if (other.hasTableSchema()) { + mergeTableSchema(other.getTableSchema()); + } + if (regionInfoBuilder_ == null) { + if (!other.regionInfo_.isEmpty()) { + if (regionInfo_.isEmpty()) { + regionInfo_ = other.regionInfo_; + bitField0_ = (bitField0_ & ~0x00000004); + } else { + ensureRegionInfoIsMutable(); + regionInfo_.addAll(other.regionInfo_); + } + onChanged(); + } + } else { + if (!other.regionInfo_.isEmpty()) { + if (regionInfoBuilder_.isEmpty()) { + regionInfoBuilder_.dispose(); + regionInfoBuilder_ = null; + regionInfo_ = other.regionInfo_; + bitField0_ = (bitField0_ & ~0x00000004); + regionInfoBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getRegionInfoFieldBuilder() : null; + } else { + regionInfoBuilder_.addAllMessages(other.regionInfo_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasUserInfo()) { + + return false; + } + if (!hasTableSchema()) { + + return false; + } + if (!getUserInfo().isInitialized()) { + + return false; + } + if (!getTableSchema().isInitialized()) { + + return false; + } + for (int i = 0; i < getRegionInfoCount(); i++) { + if (!getRegionInfo(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableStateData) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .UserInformation user_info = 1; + private org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder> userInfoBuilder_; + /** + * required .UserInformation user_info = 1; + */ + public boolean hasUserInfo() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo() { + if (userInfoBuilder_ == null) { + return userInfo_; + } else { + return userInfoBuilder_.getMessage(); + } + } + /** + * required .UserInformation user_info = 1; + */ + public Builder setUserInfo(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation value) { + if (userInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + userInfo_ = value; + onChanged(); + } else { + userInfoBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .UserInformation user_info = 1; + */ + public Builder setUserInfo( + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder builderForValue) { + if (userInfoBuilder_ == null) { + userInfo_ = builderForValue.build(); + onChanged(); + } else { + userInfoBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .UserInformation user_info = 1; + */ + public Builder mergeUserInfo(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation value) { + if (userInfoBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + userInfo_ != org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance()) { + userInfo_ = + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.newBuilder(userInfo_).mergeFrom(value).buildPartial(); + } else { + userInfo_ = value; + } + onChanged(); + } else { + userInfoBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .UserInformation user_info = 1; + */ + public Builder clearUserInfo() { + if (userInfoBuilder_ == null) { + userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance(); + onChanged(); + } else { + userInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder getUserInfoBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getUserInfoFieldBuilder().getBuilder(); + } + /** + * required .UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder() { + if (userInfoBuilder_ != null) { + return userInfoBuilder_.getMessageOrBuilder(); + } else { + return userInfo_; + } + } + /** + * required .UserInformation user_info = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder> + getUserInfoFieldBuilder() { + if (userInfoBuilder_ == null) { + userInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder>( + userInfo_, + getParentForChildren(), + isClean()); + userInfo_ = null; + } + return userInfoBuilder_; + } + + // required .TableSchema table_schema = 2; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema tableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder> tableSchemaBuilder_; + /** + * required .TableSchema table_schema = 2; + */ + public boolean hasTableSchema() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .TableSchema table_schema = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getTableSchema() { + if (tableSchemaBuilder_ == null) { + return tableSchema_; + } else { + return tableSchemaBuilder_.getMessage(); + } + } + /** + * required .TableSchema table_schema = 2; + */ + public Builder setTableSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) { + if (tableSchemaBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + tableSchema_ = value; + onChanged(); + } else { + tableSchemaBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .TableSchema table_schema = 2; + */ + public Builder setTableSchema( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder builderForValue) { + if (tableSchemaBuilder_ == null) { + tableSchema_ = builderForValue.build(); + onChanged(); + } else { + tableSchemaBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .TableSchema table_schema = 2; + */ + public Builder mergeTableSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) { + if (tableSchemaBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + tableSchema_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance()) { + tableSchema_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.newBuilder(tableSchema_).mergeFrom(value).buildPartial(); + } else { + tableSchema_ = value; + } + onChanged(); + } else { + tableSchemaBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .TableSchema table_schema = 2; + */ + public Builder clearTableSchema() { + if (tableSchemaBuilder_ == null) { + tableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); + onChanged(); + } else { + tableSchemaBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + /** + * required .TableSchema table_schema = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder getTableSchemaBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getTableSchemaFieldBuilder().getBuilder(); + } + /** + * required .TableSchema table_schema = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getTableSchemaOrBuilder() { + if (tableSchemaBuilder_ != null) { + return tableSchemaBuilder_.getMessageOrBuilder(); + } else { + return tableSchema_; + } + } + /** + * required .TableSchema table_schema = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder> + getTableSchemaFieldBuilder() { + if (tableSchemaBuilder_ == null) { + tableSchemaBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder>( + tableSchema_, + getParentForChildren(), + isClean()); + tableSchema_ = null; + } + return tableSchemaBuilder_; + } + + // repeated .RegionInfo region_info = 3; + private java.util.List regionInfo_ = + java.util.Collections.emptyList(); + private void ensureRegionInfoIsMutable() { + if (!((bitField0_ & 0x00000004) == 0x00000004)) { + regionInfo_ = new java.util.ArrayList(regionInfo_); + bitField0_ |= 0x00000004; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> regionInfoBuilder_; + + /** + * repeated .RegionInfo region_info = 3; + */ + public java.util.List getRegionInfoList() { + if (regionInfoBuilder_ == null) { + return java.util.Collections.unmodifiableList(regionInfo_); + } else { + return regionInfoBuilder_.getMessageList(); + } + } + /** + * repeated .RegionInfo region_info = 3; + */ + public int getRegionInfoCount() { + if (regionInfoBuilder_ == null) { + return regionInfo_.size(); + } else { + return regionInfoBuilder_.getCount(); + } + } + /** + * repeated .RegionInfo region_info = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo(int index) { + if (regionInfoBuilder_ == null) { + return regionInfo_.get(index); + } else { + return regionInfoBuilder_.getMessage(index); + } + } + /** + * repeated .RegionInfo region_info = 3; + */ + public Builder setRegionInfo( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) { + if (regionInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRegionInfoIsMutable(); + regionInfo_.set(index, value); + onChanged(); + } else { + regionInfoBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .RegionInfo region_info = 3; + */ + public Builder setRegionInfo( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) { + if (regionInfoBuilder_ == null) { + ensureRegionInfoIsMutable(); + regionInfo_.set(index, builderForValue.build()); + onChanged(); + } else { + regionInfoBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .RegionInfo region_info = 3; + */ + public Builder addRegionInfo(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) { + if (regionInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRegionInfoIsMutable(); + regionInfo_.add(value); + onChanged(); + } else { + regionInfoBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .RegionInfo region_info = 3; + */ + public Builder addRegionInfo( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) { + if (regionInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRegionInfoIsMutable(); + regionInfo_.add(index, value); + onChanged(); + } else { + regionInfoBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .RegionInfo region_info = 3; + */ + public Builder addRegionInfo( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) { + if (regionInfoBuilder_ == null) { + ensureRegionInfoIsMutable(); + regionInfo_.add(builderForValue.build()); + onChanged(); + } else { + regionInfoBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .RegionInfo region_info = 3; + */ + public Builder addRegionInfo( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) { + if (regionInfoBuilder_ == null) { + ensureRegionInfoIsMutable(); + regionInfo_.add(index, builderForValue.build()); + onChanged(); + } else { + regionInfoBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .RegionInfo region_info = 3; + */ + public Builder addAllRegionInfo( + java.lang.Iterable values) { + if (regionInfoBuilder_ == null) { + ensureRegionInfoIsMutable(); + super.addAll(values, regionInfo_); + onChanged(); + } else { + regionInfoBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .RegionInfo region_info = 3; + */ + public Builder clearRegionInfo() { + if (regionInfoBuilder_ == null) { + regionInfo_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + } else { + regionInfoBuilder_.clear(); + } + return this; + } + /** + * repeated .RegionInfo region_info = 3; + */ + public Builder removeRegionInfo(int index) { + if (regionInfoBuilder_ == null) { + ensureRegionInfoIsMutable(); + regionInfo_.remove(index); + onChanged(); + } else { + regionInfoBuilder_.remove(index); + } + return this; + } + /** + * repeated .RegionInfo region_info = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder getRegionInfoBuilder( + int index) { + return getRegionInfoFieldBuilder().getBuilder(index); + } + /** + * repeated .RegionInfo region_info = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder( + int index) { + if (regionInfoBuilder_ == null) { + return regionInfo_.get(index); } else { + return regionInfoBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .RegionInfo region_info = 3; + */ + public java.util.List + getRegionInfoOrBuilderList() { + if (regionInfoBuilder_ != null) { + return regionInfoBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(regionInfo_); + } + } + /** + * repeated .RegionInfo region_info = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder addRegionInfoBuilder() { + return getRegionInfoFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance()); + } + /** + * repeated .RegionInfo region_info = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder addRegionInfoBuilder( + int index) { + return getRegionInfoFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance()); + } + /** + * repeated .RegionInfo region_info = 3; + */ + public java.util.List + getRegionInfoBuilderList() { + return getRegionInfoFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> + getRegionInfoFieldBuilder() { + if (regionInfoBuilder_ == null) { + regionInfoBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>( + regionInfo_, + ((bitField0_ & 0x00000004) == 0x00000004), + getParentForChildren(), + isClean()); + regionInfo_ = null; + } + return regionInfoBuilder_; + } + + // @@protoc_insertion_point(builder_scope:CreateTableStateData) + } + + static { + defaultInstance = new CreateTableStateData(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:CreateTableStateData) + } + + public interface ModifyTableStateDataOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .UserInformation user_info = 1; + /** + * required .UserInformation user_info = 1; + */ + boolean hasUserInfo(); + /** + * required .UserInformation user_info = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo(); + /** + * required .UserInformation user_info = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder(); + + // optional .TableSchema unmodified_table_schema = 2; + /** + * optional .TableSchema unmodified_table_schema = 2; + */ + boolean hasUnmodifiedTableSchema(); + /** + * optional .TableSchema unmodified_table_schema = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getUnmodifiedTableSchema(); + /** + * optional .TableSchema unmodified_table_schema = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getUnmodifiedTableSchemaOrBuilder(); + + // required .TableSchema modified_table_schema = 3; + /** + * required .TableSchema modified_table_schema = 3; + */ + boolean hasModifiedTableSchema(); + /** + * required .TableSchema modified_table_schema = 3; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getModifiedTableSchema(); + /** + * required .TableSchema modified_table_schema = 3; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getModifiedTableSchemaOrBuilder(); + + // required bool delete_column_family_in_modify = 4; + /** + * required bool delete_column_family_in_modify = 4; + */ + boolean hasDeleteColumnFamilyInModify(); + /** + * required bool delete_column_family_in_modify = 4; + */ + boolean getDeleteColumnFamilyInModify(); + } + /** + * Protobuf type {@code ModifyTableStateData} + */ + public static final class ModifyTableStateData extends + com.google.protobuf.GeneratedMessage + implements ModifyTableStateDataOrBuilder { + // Use ModifyTableStateData.newBuilder() to construct. + private ModifyTableStateData(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private ModifyTableStateData(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final ModifyTableStateData defaultInstance; + public static ModifyTableStateData getDefaultInstance() { + return defaultInstance; + } + + public ModifyTableStateData getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ModifyTableStateData( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = userInfo_.toBuilder(); + } + userInfo_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(userInfo_); + userInfo_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 18: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder subBuilder = null; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + subBuilder = unmodifiedTableSchema_.toBuilder(); + } + unmodifiedTableSchema_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(unmodifiedTableSchema_); + unmodifiedTableSchema_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000002; + break; + } + case 26: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder subBuilder = null; + if (((bitField0_ & 0x00000004) == 0x00000004)) { + subBuilder = modifiedTableSchema_.toBuilder(); + } + modifiedTableSchema_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(modifiedTableSchema_); + modifiedTableSchema_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000004; + break; + } + case 32: { + bitField0_ |= 0x00000008; + deleteColumnFamilyInModify_ = input.readBool(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_ModifyTableStateData_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_ModifyTableStateData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableStateData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableStateData.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ModifyTableStateData parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ModifyTableStateData(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .UserInformation user_info = 1; + public static final int USER_INFO_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation userInfo_; + /** + * required .UserInformation user_info = 1; + */ + public boolean hasUserInfo() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo() { + return userInfo_; + } + /** + * required .UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder() { + return userInfo_; + } + + // optional .TableSchema unmodified_table_schema = 2; + public static final int UNMODIFIED_TABLE_SCHEMA_FIELD_NUMBER = 2; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema unmodifiedTableSchema_; + /** + * optional .TableSchema unmodified_table_schema = 2; + */ + public boolean hasUnmodifiedTableSchema() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional .TableSchema unmodified_table_schema = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getUnmodifiedTableSchema() { + return unmodifiedTableSchema_; + } + /** + * optional .TableSchema unmodified_table_schema = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getUnmodifiedTableSchemaOrBuilder() { + return unmodifiedTableSchema_; + } + + // required .TableSchema modified_table_schema = 3; + public static final int MODIFIED_TABLE_SCHEMA_FIELD_NUMBER = 3; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema modifiedTableSchema_; + /** + * required .TableSchema modified_table_schema = 3; + */ + public boolean hasModifiedTableSchema() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required .TableSchema modified_table_schema = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getModifiedTableSchema() { + return modifiedTableSchema_; + } + /** + * required .TableSchema modified_table_schema = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getModifiedTableSchemaOrBuilder() { + return modifiedTableSchema_; + } + + // required bool delete_column_family_in_modify = 4; + public static final int DELETE_COLUMN_FAMILY_IN_MODIFY_FIELD_NUMBER = 4; + private boolean deleteColumnFamilyInModify_; + /** + * required bool delete_column_family_in_modify = 4; + */ + public boolean hasDeleteColumnFamilyInModify() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * required bool delete_column_family_in_modify = 4; + */ + public boolean getDeleteColumnFamilyInModify() { + return deleteColumnFamilyInModify_; + } + + private void initFields() { + userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance(); + unmodifiedTableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); + modifiedTableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); + deleteColumnFamilyInModify_ = false; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasUserInfo()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasModifiedTableSchema()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasDeleteColumnFamilyInModify()) { + memoizedIsInitialized = 0; + return false; + } + if (!getUserInfo().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + if (hasUnmodifiedTableSchema()) { + if (!getUnmodifiedTableSchema().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + if (!getModifiedTableSchema().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, userInfo_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(2, unmodifiedTableSchema_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeMessage(3, modifiedTableSchema_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeBool(4, deleteColumnFamilyInModify_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, userInfo_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, unmodifiedTableSchema_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, modifiedTableSchema_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(4, deleteColumnFamilyInModify_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableStateData)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableStateData other = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableStateData) obj; + + boolean result = true; + result = result && (hasUserInfo() == other.hasUserInfo()); + if (hasUserInfo()) { + result = result && getUserInfo() + .equals(other.getUserInfo()); + } + result = result && (hasUnmodifiedTableSchema() == other.hasUnmodifiedTableSchema()); + if (hasUnmodifiedTableSchema()) { + result = result && getUnmodifiedTableSchema() + .equals(other.getUnmodifiedTableSchema()); + } + result = result && (hasModifiedTableSchema() == other.hasModifiedTableSchema()); + if (hasModifiedTableSchema()) { + result = result && getModifiedTableSchema() + .equals(other.getModifiedTableSchema()); + } + result = result && (hasDeleteColumnFamilyInModify() == other.hasDeleteColumnFamilyInModify()); + if (hasDeleteColumnFamilyInModify()) { + result = result && (getDeleteColumnFamilyInModify() + == other.getDeleteColumnFamilyInModify()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasUserInfo()) { + hash = (37 * hash) + USER_INFO_FIELD_NUMBER; + hash = (53 * hash) + getUserInfo().hashCode(); + } + if (hasUnmodifiedTableSchema()) { + hash = (37 * hash) + UNMODIFIED_TABLE_SCHEMA_FIELD_NUMBER; + hash = (53 * hash) + getUnmodifiedTableSchema().hashCode(); + } + if (hasModifiedTableSchema()) { + hash = (37 * hash) + MODIFIED_TABLE_SCHEMA_FIELD_NUMBER; + hash = (53 * hash) + getModifiedTableSchema().hashCode(); + } + if (hasDeleteColumnFamilyInModify()) { + hash = (37 * hash) + DELETE_COLUMN_FAMILY_IN_MODIFY_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getDeleteColumnFamilyInModify()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableStateData parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableStateData parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableStateData parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableStateData parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableStateData parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableStateData parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableStateData parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableStateData parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableStateData parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableStateData parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableStateData prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code ModifyTableStateData} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableStateDataOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_ModifyTableStateData_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_ModifyTableStateData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableStateData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableStateData.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableStateData.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getUserInfoFieldBuilder(); + getUnmodifiedTableSchemaFieldBuilder(); + getModifiedTableSchemaFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (userInfoBuilder_ == null) { + userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance(); + } else { + userInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + if (unmodifiedTableSchemaBuilder_ == null) { + unmodifiedTableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); + } else { + unmodifiedTableSchemaBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + if (modifiedTableSchemaBuilder_ == null) { + modifiedTableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); + } else { + modifiedTableSchemaBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + deleteColumnFamilyInModify_ = false; + bitField0_ = (bitField0_ & ~0x00000008); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_ModifyTableStateData_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableStateData getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableStateData.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableStateData build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableStateData result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableStateData buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableStateData result = new org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableStateData(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (userInfoBuilder_ == null) { + result.userInfo_ = userInfo_; + } else { + result.userInfo_ = userInfoBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + if (unmodifiedTableSchemaBuilder_ == null) { + result.unmodifiedTableSchema_ = unmodifiedTableSchema_; + } else { + result.unmodifiedTableSchema_ = unmodifiedTableSchemaBuilder_.build(); + } + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + if (modifiedTableSchemaBuilder_ == null) { + result.modifiedTableSchema_ = modifiedTableSchema_; + } else { + result.modifiedTableSchema_ = modifiedTableSchemaBuilder_.build(); + } + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + result.deleteColumnFamilyInModify_ = deleteColumnFamilyInModify_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableStateData) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableStateData)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableStateData other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableStateData.getDefaultInstance()) return this; + if (other.hasUserInfo()) { + mergeUserInfo(other.getUserInfo()); + } + if (other.hasUnmodifiedTableSchema()) { + mergeUnmodifiedTableSchema(other.getUnmodifiedTableSchema()); + } + if (other.hasModifiedTableSchema()) { + mergeModifiedTableSchema(other.getModifiedTableSchema()); + } + if (other.hasDeleteColumnFamilyInModify()) { + setDeleteColumnFamilyInModify(other.getDeleteColumnFamilyInModify()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasUserInfo()) { + + return false; + } + if (!hasModifiedTableSchema()) { + + return false; + } + if (!hasDeleteColumnFamilyInModify()) { + + return false; + } + if (!getUserInfo().isInitialized()) { + + return false; + } + if (hasUnmodifiedTableSchema()) { + if (!getUnmodifiedTableSchema().isInitialized()) { + + return false; + } + } + if (!getModifiedTableSchema().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableStateData parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableStateData) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .UserInformation user_info = 1; + private org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder> userInfoBuilder_; + /** + * required .UserInformation user_info = 1; + */ + public boolean hasUserInfo() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo() { + if (userInfoBuilder_ == null) { + return userInfo_; + } else { + return userInfoBuilder_.getMessage(); + } + } + /** + * required .UserInformation user_info = 1; + */ + public Builder setUserInfo(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation value) { + if (userInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + userInfo_ = value; + onChanged(); + } else { + userInfoBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .UserInformation user_info = 1; + */ + public Builder setUserInfo( + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder builderForValue) { + if (userInfoBuilder_ == null) { + userInfo_ = builderForValue.build(); + onChanged(); + } else { + userInfoBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .UserInformation user_info = 1; + */ + public Builder mergeUserInfo(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation value) { + if (userInfoBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + userInfo_ != org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance()) { + userInfo_ = + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.newBuilder(userInfo_).mergeFrom(value).buildPartial(); + } else { + userInfo_ = value; + } + onChanged(); + } else { + userInfoBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .UserInformation user_info = 1; + */ + public Builder clearUserInfo() { + if (userInfoBuilder_ == null) { + userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance(); + onChanged(); + } else { + userInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder getUserInfoBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getUserInfoFieldBuilder().getBuilder(); + } + /** + * required .UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder() { + if (userInfoBuilder_ != null) { + return userInfoBuilder_.getMessageOrBuilder(); + } else { + return userInfo_; + } + } + /** + * required .UserInformation user_info = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder> + getUserInfoFieldBuilder() { + if (userInfoBuilder_ == null) { + userInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder>( + userInfo_, + getParentForChildren(), + isClean()); + userInfo_ = null; + } + return userInfoBuilder_; + } + + // optional .TableSchema unmodified_table_schema = 2; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema unmodifiedTableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder> unmodifiedTableSchemaBuilder_; + /** + * optional .TableSchema unmodified_table_schema = 2; + */ + public boolean hasUnmodifiedTableSchema() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional .TableSchema unmodified_table_schema = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getUnmodifiedTableSchema() { + if (unmodifiedTableSchemaBuilder_ == null) { + return unmodifiedTableSchema_; + } else { + return unmodifiedTableSchemaBuilder_.getMessage(); + } + } + /** + * optional .TableSchema unmodified_table_schema = 2; + */ + public Builder setUnmodifiedTableSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) { + if (unmodifiedTableSchemaBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + unmodifiedTableSchema_ = value; + onChanged(); + } else { + unmodifiedTableSchemaBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * optional .TableSchema unmodified_table_schema = 2; + */ + public Builder setUnmodifiedTableSchema( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder builderForValue) { + if (unmodifiedTableSchemaBuilder_ == null) { + unmodifiedTableSchema_ = builderForValue.build(); + onChanged(); + } else { + unmodifiedTableSchemaBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * optional .TableSchema unmodified_table_schema = 2; + */ + public Builder mergeUnmodifiedTableSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) { + if (unmodifiedTableSchemaBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + unmodifiedTableSchema_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance()) { + unmodifiedTableSchema_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.newBuilder(unmodifiedTableSchema_).mergeFrom(value).buildPartial(); + } else { + unmodifiedTableSchema_ = value; + } + onChanged(); + } else { + unmodifiedTableSchemaBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * optional .TableSchema unmodified_table_schema = 2; + */ + public Builder clearUnmodifiedTableSchema() { + if (unmodifiedTableSchemaBuilder_ == null) { + unmodifiedTableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); + onChanged(); + } else { + unmodifiedTableSchemaBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + /** + * optional .TableSchema unmodified_table_schema = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder getUnmodifiedTableSchemaBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getUnmodifiedTableSchemaFieldBuilder().getBuilder(); + } + /** + * optional .TableSchema unmodified_table_schema = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getUnmodifiedTableSchemaOrBuilder() { + if (unmodifiedTableSchemaBuilder_ != null) { + return unmodifiedTableSchemaBuilder_.getMessageOrBuilder(); + } else { + return unmodifiedTableSchema_; + } + } + /** + * optional .TableSchema unmodified_table_schema = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder> + getUnmodifiedTableSchemaFieldBuilder() { + if (unmodifiedTableSchemaBuilder_ == null) { + unmodifiedTableSchemaBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder>( + unmodifiedTableSchema_, + getParentForChildren(), + isClean()); + unmodifiedTableSchema_ = null; + } + return unmodifiedTableSchemaBuilder_; + } + + // required .TableSchema modified_table_schema = 3; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema modifiedTableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder> modifiedTableSchemaBuilder_; + /** + * required .TableSchema modified_table_schema = 3; + */ + public boolean hasModifiedTableSchema() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required .TableSchema modified_table_schema = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getModifiedTableSchema() { + if (modifiedTableSchemaBuilder_ == null) { + return modifiedTableSchema_; + } else { + return modifiedTableSchemaBuilder_.getMessage(); + } + } + /** + * required .TableSchema modified_table_schema = 3; + */ + public Builder setModifiedTableSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) { + if (modifiedTableSchemaBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + modifiedTableSchema_ = value; + onChanged(); + } else { + modifiedTableSchemaBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * required .TableSchema modified_table_schema = 3; + */ + public Builder setModifiedTableSchema( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder builderForValue) { + if (modifiedTableSchemaBuilder_ == null) { + modifiedTableSchema_ = builderForValue.build(); + onChanged(); + } else { + modifiedTableSchemaBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * required .TableSchema modified_table_schema = 3; + */ + public Builder mergeModifiedTableSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) { + if (modifiedTableSchemaBuilder_ == null) { + if (((bitField0_ & 0x00000004) == 0x00000004) && + modifiedTableSchema_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance()) { + modifiedTableSchema_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.newBuilder(modifiedTableSchema_).mergeFrom(value).buildPartial(); + } else { + modifiedTableSchema_ = value; + } + onChanged(); + } else { + modifiedTableSchemaBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * required .TableSchema modified_table_schema = 3; + */ + public Builder clearModifiedTableSchema() { + if (modifiedTableSchemaBuilder_ == null) { + modifiedTableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); + onChanged(); + } else { + modifiedTableSchemaBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + /** + * required .TableSchema modified_table_schema = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder getModifiedTableSchemaBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return getModifiedTableSchemaFieldBuilder().getBuilder(); + } + /** + * required .TableSchema modified_table_schema = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getModifiedTableSchemaOrBuilder() { + if (modifiedTableSchemaBuilder_ != null) { + return modifiedTableSchemaBuilder_.getMessageOrBuilder(); + } else { + return modifiedTableSchema_; + } + } + /** + * required .TableSchema modified_table_schema = 3; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder> + getModifiedTableSchemaFieldBuilder() { + if (modifiedTableSchemaBuilder_ == null) { + modifiedTableSchemaBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder>( + modifiedTableSchema_, + getParentForChildren(), + isClean()); + modifiedTableSchema_ = null; + } + return modifiedTableSchemaBuilder_; + } + + // required bool delete_column_family_in_modify = 4; + private boolean deleteColumnFamilyInModify_ ; + /** + * required bool delete_column_family_in_modify = 4; + */ + public boolean hasDeleteColumnFamilyInModify() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * required bool delete_column_family_in_modify = 4; + */ + public boolean getDeleteColumnFamilyInModify() { + return deleteColumnFamilyInModify_; + } + /** + * required bool delete_column_family_in_modify = 4; + */ + public Builder setDeleteColumnFamilyInModify(boolean value) { + bitField0_ |= 0x00000008; + deleteColumnFamilyInModify_ = value; + onChanged(); + return this; + } + /** + * required bool delete_column_family_in_modify = 4; + */ + public Builder clearDeleteColumnFamilyInModify() { + bitField0_ = (bitField0_ & ~0x00000008); + deleteColumnFamilyInModify_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:ModifyTableStateData) + } + + static { + defaultInstance = new ModifyTableStateData(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:ModifyTableStateData) + } + + public interface TruncateTableStateDataOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .UserInformation user_info = 1; + /** + * required .UserInformation user_info = 1; + */ + boolean hasUserInfo(); + /** + * required .UserInformation user_info = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo(); + /** + * required .UserInformation user_info = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder(); + + // required bool preserve_splits = 2; + /** + * required bool preserve_splits = 2; + */ + boolean hasPreserveSplits(); + /** + * required bool preserve_splits = 2; + */ + boolean getPreserveSplits(); + + // optional .TableName table_name = 3; + /** + * optional .TableName table_name = 3; + */ + boolean hasTableName(); + /** + * optional .TableName table_name = 3; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName(); + /** + * optional .TableName table_name = 3; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder(); + + // optional .TableSchema table_schema = 4; + /** + * optional .TableSchema table_schema = 4; + */ + boolean hasTableSchema(); + /** + * optional .TableSchema table_schema = 4; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getTableSchema(); + /** + * optional .TableSchema table_schema = 4; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getTableSchemaOrBuilder(); + + // repeated .RegionInfo region_info = 5; + /** + * repeated .RegionInfo region_info = 5; + */ + java.util.List + getRegionInfoList(); + /** + * repeated .RegionInfo region_info = 5; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo(int index); + /** + * repeated .RegionInfo region_info = 5; + */ + int getRegionInfoCount(); + /** + * repeated .RegionInfo region_info = 5; + */ + java.util.List + getRegionInfoOrBuilderList(); + /** + * repeated .RegionInfo region_info = 5; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder( + int index); + } + /** + * Protobuf type {@code TruncateTableStateData} + */ + public static final class TruncateTableStateData extends + com.google.protobuf.GeneratedMessage + implements TruncateTableStateDataOrBuilder { + // Use TruncateTableStateData.newBuilder() to construct. + private TruncateTableStateData(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private TruncateTableStateData(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final TruncateTableStateData defaultInstance; + public static TruncateTableStateData getDefaultInstance() { + return defaultInstance; + } + + public TruncateTableStateData getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private TruncateTableStateData( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = userInfo_.toBuilder(); + } + userInfo_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(userInfo_); + userInfo_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 16: { + bitField0_ |= 0x00000002; + preserveSplits_ = input.readBool(); + break; + } + case 26: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = null; + if (((bitField0_ & 0x00000004) == 0x00000004)) { + subBuilder = tableName_.toBuilder(); + } + tableName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(tableName_); + tableName_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000004; + break; + } + case 34: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder subBuilder = null; + if (((bitField0_ & 0x00000008) == 0x00000008)) { + subBuilder = tableSchema_.toBuilder(); + } + tableSchema_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(tableSchema_); + tableSchema_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000008; + break; + } + case 42: { + if (!((mutable_bitField0_ & 0x00000010) == 0x00000010)) { + regionInfo_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000010; + } + regionInfo_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000010) == 0x00000010)) { + regionInfo_ = java.util.Collections.unmodifiableList(regionInfo_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_TruncateTableStateData_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_TruncateTableStateData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public TruncateTableStateData parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new TruncateTableStateData(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .UserInformation user_info = 1; + public static final int USER_INFO_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation userInfo_; + /** + * required .UserInformation user_info = 1; + */ + public boolean hasUserInfo() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo() { + return userInfo_; + } + /** + * required .UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder() { + return userInfo_; + } + + // required bool preserve_splits = 2; + public static final int PRESERVE_SPLITS_FIELD_NUMBER = 2; + private boolean preserveSplits_; + /** + * required bool preserve_splits = 2; + */ + public boolean hasPreserveSplits() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required bool preserve_splits = 2; + */ + public boolean getPreserveSplits() { + return preserveSplits_; + } + + // optional .TableName table_name = 3; + public static final int TABLE_NAME_FIELD_NUMBER = 3; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_; + /** + * optional .TableName table_name = 3; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional .TableName table_name = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + return tableName_; + } + /** + * optional .TableName table_name = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { + return tableName_; + } + + // optional .TableSchema table_schema = 4; + public static final int TABLE_SCHEMA_FIELD_NUMBER = 4; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema tableSchema_; + /** + * optional .TableSchema table_schema = 4; + */ + public boolean hasTableSchema() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional .TableSchema table_schema = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getTableSchema() { + return tableSchema_; + } + /** + * optional .TableSchema table_schema = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getTableSchemaOrBuilder() { + return tableSchema_; + } + + // repeated .RegionInfo region_info = 5; + public static final int REGION_INFO_FIELD_NUMBER = 5; + private java.util.List regionInfo_; + /** + * repeated .RegionInfo region_info = 5; + */ + public java.util.List getRegionInfoList() { + return regionInfo_; + } + /** + * repeated .RegionInfo region_info = 5; + */ + public java.util.List + getRegionInfoOrBuilderList() { + return regionInfo_; + } + /** + * repeated .RegionInfo region_info = 5; + */ + public int getRegionInfoCount() { + return regionInfo_.size(); + } + /** + * repeated .RegionInfo region_info = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo(int index) { + return regionInfo_.get(index); + } + /** + * repeated .RegionInfo region_info = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder( + int index) { + return regionInfo_.get(index); + } + + private void initFields() { + userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance(); + preserveSplits_ = false; + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + tableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); + regionInfo_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasUserInfo()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasPreserveSplits()) { + memoizedIsInitialized = 0; + return false; + } + if (!getUserInfo().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + if (hasTableName()) { + if (!getTableName().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + if (hasTableSchema()) { + if (!getTableSchema().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + for (int i = 0; i < getRegionInfoCount(); i++) { + if (!getRegionInfo(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, userInfo_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBool(2, preserveSplits_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeMessage(3, tableName_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeMessage(4, tableSchema_); + } + for (int i = 0; i < regionInfo_.size(); i++) { + output.writeMessage(5, regionInfo_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, userInfo_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(2, preserveSplits_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, tableName_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(4, tableSchema_); + } + for (int i = 0; i < regionInfo_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(5, regionInfo_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData other = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData) obj; + + boolean result = true; + result = result && (hasUserInfo() == other.hasUserInfo()); + if (hasUserInfo()) { + result = result && getUserInfo() + .equals(other.getUserInfo()); + } + result = result && (hasPreserveSplits() == other.hasPreserveSplits()); + if (hasPreserveSplits()) { + result = result && (getPreserveSplits() + == other.getPreserveSplits()); + } + result = result && (hasTableName() == other.hasTableName()); + if (hasTableName()) { + result = result && getTableName() + .equals(other.getTableName()); + } + result = result && (hasTableSchema() == other.hasTableSchema()); + if (hasTableSchema()) { + result = result && getTableSchema() + .equals(other.getTableSchema()); + } + result = result && getRegionInfoList() + .equals(other.getRegionInfoList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasUserInfo()) { + hash = (37 * hash) + USER_INFO_FIELD_NUMBER; + hash = (53 * hash) + getUserInfo().hashCode(); + } + if (hasPreserveSplits()) { + hash = (37 * hash) + PRESERVE_SPLITS_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getPreserveSplits()); + } + if (hasTableName()) { + hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER; + hash = (53 * hash) + getTableName().hashCode(); + } + if (hasTableSchema()) { + hash = (37 * hash) + TABLE_SCHEMA_FIELD_NUMBER; + hash = (53 * hash) + getTableSchema().hashCode(); + } + if (getRegionInfoCount() > 0) { + hash = (37 * hash) + REGION_INFO_FIELD_NUMBER; + hash = (53 * hash) + getRegionInfoList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code TruncateTableStateData} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateDataOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_TruncateTableStateData_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_TruncateTableStateData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getUserInfoFieldBuilder(); + getTableNameFieldBuilder(); + getTableSchemaFieldBuilder(); + getRegionInfoFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (userInfoBuilder_ == null) { + userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance(); + } else { + userInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + preserveSplits_ = false; + bitField0_ = (bitField0_ & ~0x00000002); + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + } else { + tableNameBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + if (tableSchemaBuilder_ == null) { + tableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); + } else { + tableSchemaBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000008); + if (regionInfoBuilder_ == null) { + regionInfo_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000010); + } else { + regionInfoBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_TruncateTableStateData_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData result = new org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (userInfoBuilder_ == null) { + result.userInfo_ = userInfo_; + } else { + result.userInfo_ = userInfoBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.preserveSplits_ = preserveSplits_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + if (tableNameBuilder_ == null) { + result.tableName_ = tableName_; + } else { + result.tableName_ = tableNameBuilder_.build(); + } + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + if (tableSchemaBuilder_ == null) { + result.tableSchema_ = tableSchema_; + } else { + result.tableSchema_ = tableSchemaBuilder_.build(); + } + if (regionInfoBuilder_ == null) { + if (((bitField0_ & 0x00000010) == 0x00000010)) { + regionInfo_ = java.util.Collections.unmodifiableList(regionInfo_); + bitField0_ = (bitField0_ & ~0x00000010); + } + result.regionInfo_ = regionInfo_; + } else { + result.regionInfo_ = regionInfoBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData.getDefaultInstance()) return this; + if (other.hasUserInfo()) { + mergeUserInfo(other.getUserInfo()); + } + if (other.hasPreserveSplits()) { + setPreserveSplits(other.getPreserveSplits()); + } + if (other.hasTableName()) { + mergeTableName(other.getTableName()); + } + if (other.hasTableSchema()) { + mergeTableSchema(other.getTableSchema()); + } + if (regionInfoBuilder_ == null) { + if (!other.regionInfo_.isEmpty()) { + if (regionInfo_.isEmpty()) { + regionInfo_ = other.regionInfo_; + bitField0_ = (bitField0_ & ~0x00000010); + } else { + ensureRegionInfoIsMutable(); + regionInfo_.addAll(other.regionInfo_); + } + onChanged(); + } + } else { + if (!other.regionInfo_.isEmpty()) { + if (regionInfoBuilder_.isEmpty()) { + regionInfoBuilder_.dispose(); + regionInfoBuilder_ = null; + regionInfo_ = other.regionInfo_; + bitField0_ = (bitField0_ & ~0x00000010); + regionInfoBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getRegionInfoFieldBuilder() : null; + } else { + regionInfoBuilder_.addAllMessages(other.regionInfo_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasUserInfo()) { + + return false; + } + if (!hasPreserveSplits()) { + + return false; + } + if (!getUserInfo().isInitialized()) { + + return false; + } + if (hasTableName()) { + if (!getTableName().isInitialized()) { + + return false; + } + } + if (hasTableSchema()) { + if (!getTableSchema().isInitialized()) { + + return false; + } + } + for (int i = 0; i < getRegionInfoCount(); i++) { + if (!getRegionInfo(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableStateData) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .UserInformation user_info = 1; + private org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder> userInfoBuilder_; + /** + * required .UserInformation user_info = 1; + */ + public boolean hasUserInfo() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo() { + if (userInfoBuilder_ == null) { + return userInfo_; + } else { + return userInfoBuilder_.getMessage(); + } + } + /** + * required .UserInformation user_info = 1; + */ + public Builder setUserInfo(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation value) { + if (userInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + userInfo_ = value; + onChanged(); + } else { + userInfoBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .UserInformation user_info = 1; + */ + public Builder setUserInfo( + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder builderForValue) { + if (userInfoBuilder_ == null) { + userInfo_ = builderForValue.build(); + onChanged(); + } else { + userInfoBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .UserInformation user_info = 1; + */ + public Builder mergeUserInfo(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation value) { + if (userInfoBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + userInfo_ != org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance()) { + userInfo_ = + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.newBuilder(userInfo_).mergeFrom(value).buildPartial(); + } else { + userInfo_ = value; + } + onChanged(); + } else { + userInfoBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .UserInformation user_info = 1; + */ + public Builder clearUserInfo() { + if (userInfoBuilder_ == null) { + userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance(); + onChanged(); + } else { + userInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder getUserInfoBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getUserInfoFieldBuilder().getBuilder(); + } + /** + * required .UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder() { + if (userInfoBuilder_ != null) { + return userInfoBuilder_.getMessageOrBuilder(); + } else { + return userInfo_; + } + } + /** + * required .UserInformation user_info = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder> + getUserInfoFieldBuilder() { + if (userInfoBuilder_ == null) { + userInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder>( + userInfo_, + getParentForChildren(), + isClean()); + userInfo_ = null; + } + return userInfoBuilder_; + } + + // required bool preserve_splits = 2; + private boolean preserveSplits_ ; + /** + * required bool preserve_splits = 2; + */ + public boolean hasPreserveSplits() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required bool preserve_splits = 2; + */ + public boolean getPreserveSplits() { + return preserveSplits_; + } + /** + * required bool preserve_splits = 2; + */ + public Builder setPreserveSplits(boolean value) { + bitField0_ |= 0x00000002; + preserveSplits_ = value; + onChanged(); + return this; + } + /** + * required bool preserve_splits = 2; + */ + public Builder clearPreserveSplits() { + bitField0_ = (bitField0_ & ~0x00000002); + preserveSplits_ = false; + onChanged(); + return this; + } + + // optional .TableName table_name = 3; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_; + /** + * optional .TableName table_name = 3; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional .TableName table_name = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + if (tableNameBuilder_ == null) { + return tableName_; + } else { + return tableNameBuilder_.getMessage(); + } + } + /** + * optional .TableName table_name = 3; + */ + public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + tableName_ = value; + onChanged(); + } else { + tableNameBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * optional .TableName table_name = 3; + */ + public Builder setTableName( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + tableName_ = builderForValue.build(); + onChanged(); + } else { + tableNameBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * optional .TableName table_name = 3; + */ + public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (((bitField0_ & 0x00000004) == 0x00000004) && + tableName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) { + tableName_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial(); + } else { + tableName_ = value; + } + onChanged(); + } else { + tableNameBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * optional .TableName table_name = 3; + */ + public Builder clearTableName() { + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + onChanged(); + } else { + tableNameBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + /** + * optional .TableName table_name = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return getTableNameFieldBuilder().getBuilder(); + } + /** + * optional .TableName table_name = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { + if (tableNameBuilder_ != null) { + return tableNameBuilder_.getMessageOrBuilder(); + } else { + return tableName_; + } + } + /** + * optional .TableName table_name = 3; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTableNameFieldBuilder() { + if (tableNameBuilder_ == null) { + tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + tableName_, + getParentForChildren(), + isClean()); + tableName_ = null; + } + return tableNameBuilder_; + } + + // optional .TableSchema table_schema = 4; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema tableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder> tableSchemaBuilder_; + /** + * optional .TableSchema table_schema = 4; + */ + public boolean hasTableSchema() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional .TableSchema table_schema = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getTableSchema() { + if (tableSchemaBuilder_ == null) { + return tableSchema_; + } else { + return tableSchemaBuilder_.getMessage(); + } + } + /** + * optional .TableSchema table_schema = 4; + */ + public Builder setTableSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) { + if (tableSchemaBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + tableSchema_ = value; + onChanged(); + } else { + tableSchemaBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + return this; + } + /** + * optional .TableSchema table_schema = 4; + */ + public Builder setTableSchema( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder builderForValue) { + if (tableSchemaBuilder_ == null) { + tableSchema_ = builderForValue.build(); + onChanged(); + } else { + tableSchemaBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + return this; + } + /** + * optional .TableSchema table_schema = 4; + */ + public Builder mergeTableSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) { + if (tableSchemaBuilder_ == null) { + if (((bitField0_ & 0x00000008) == 0x00000008) && + tableSchema_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance()) { + tableSchema_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.newBuilder(tableSchema_).mergeFrom(value).buildPartial(); + } else { + tableSchema_ = value; + } + onChanged(); + } else { + tableSchemaBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000008; + return this; + } + /** + * optional .TableSchema table_schema = 4; + */ + public Builder clearTableSchema() { + if (tableSchemaBuilder_ == null) { + tableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); + onChanged(); + } else { + tableSchemaBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000008); + return this; + } + /** + * optional .TableSchema table_schema = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder getTableSchemaBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return getTableSchemaFieldBuilder().getBuilder(); + } + /** + * optional .TableSchema table_schema = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getTableSchemaOrBuilder() { + if (tableSchemaBuilder_ != null) { + return tableSchemaBuilder_.getMessageOrBuilder(); + } else { + return tableSchema_; + } + } + /** + * optional .TableSchema table_schema = 4; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder> + getTableSchemaFieldBuilder() { + if (tableSchemaBuilder_ == null) { + tableSchemaBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder>( + tableSchema_, + getParentForChildren(), + isClean()); + tableSchema_ = null; + } + return tableSchemaBuilder_; + } + + // repeated .RegionInfo region_info = 5; + private java.util.List regionInfo_ = + java.util.Collections.emptyList(); + private void ensureRegionInfoIsMutable() { + if (!((bitField0_ & 0x00000010) == 0x00000010)) { + regionInfo_ = new java.util.ArrayList(regionInfo_); + bitField0_ |= 0x00000010; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> regionInfoBuilder_; + + /** + * repeated .RegionInfo region_info = 5; + */ + public java.util.List getRegionInfoList() { + if (regionInfoBuilder_ == null) { + return java.util.Collections.unmodifiableList(regionInfo_); + } else { + return regionInfoBuilder_.getMessageList(); + } + } + /** + * repeated .RegionInfo region_info = 5; + */ + public int getRegionInfoCount() { + if (regionInfoBuilder_ == null) { + return regionInfo_.size(); + } else { + return regionInfoBuilder_.getCount(); + } + } + /** + * repeated .RegionInfo region_info = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo(int index) { + if (regionInfoBuilder_ == null) { + return regionInfo_.get(index); + } else { + return regionInfoBuilder_.getMessage(index); + } + } + /** + * repeated .RegionInfo region_info = 5; + */ + public Builder setRegionInfo( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) { + if (regionInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRegionInfoIsMutable(); + regionInfo_.set(index, value); + onChanged(); + } else { + regionInfoBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .RegionInfo region_info = 5; + */ + public Builder setRegionInfo( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) { + if (regionInfoBuilder_ == null) { + ensureRegionInfoIsMutable(); + regionInfo_.set(index, builderForValue.build()); + onChanged(); + } else { + regionInfoBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .RegionInfo region_info = 5; + */ + public Builder addRegionInfo(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) { + if (regionInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRegionInfoIsMutable(); + regionInfo_.add(value); + onChanged(); + } else { + regionInfoBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .RegionInfo region_info = 5; + */ + public Builder addRegionInfo( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) { + if (regionInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRegionInfoIsMutable(); + regionInfo_.add(index, value); + onChanged(); + } else { + regionInfoBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .RegionInfo region_info = 5; + */ + public Builder addRegionInfo( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) { + if (regionInfoBuilder_ == null) { + ensureRegionInfoIsMutable(); + regionInfo_.add(builderForValue.build()); + onChanged(); + } else { + regionInfoBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .RegionInfo region_info = 5; + */ + public Builder addRegionInfo( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) { + if (regionInfoBuilder_ == null) { + ensureRegionInfoIsMutable(); + regionInfo_.add(index, builderForValue.build()); + onChanged(); + } else { + regionInfoBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .RegionInfo region_info = 5; + */ + public Builder addAllRegionInfo( + java.lang.Iterable values) { + if (regionInfoBuilder_ == null) { + ensureRegionInfoIsMutable(); + super.addAll(values, regionInfo_); + onChanged(); + } else { + regionInfoBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .RegionInfo region_info = 5; + */ + public Builder clearRegionInfo() { + if (regionInfoBuilder_ == null) { + regionInfo_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000010); + onChanged(); + } else { + regionInfoBuilder_.clear(); + } + return this; + } + /** + * repeated .RegionInfo region_info = 5; + */ + public Builder removeRegionInfo(int index) { + if (regionInfoBuilder_ == null) { + ensureRegionInfoIsMutable(); + regionInfo_.remove(index); + onChanged(); + } else { + regionInfoBuilder_.remove(index); + } + return this; + } + /** + * repeated .RegionInfo region_info = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder getRegionInfoBuilder( + int index) { + return getRegionInfoFieldBuilder().getBuilder(index); + } + /** + * repeated .RegionInfo region_info = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder( + int index) { + if (regionInfoBuilder_ == null) { + return regionInfo_.get(index); } else { + return regionInfoBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .RegionInfo region_info = 5; + */ + public java.util.List + getRegionInfoOrBuilderList() { + if (regionInfoBuilder_ != null) { + return regionInfoBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(regionInfo_); + } + } + /** + * repeated .RegionInfo region_info = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder addRegionInfoBuilder() { + return getRegionInfoFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance()); + } + /** + * repeated .RegionInfo region_info = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder addRegionInfoBuilder( + int index) { + return getRegionInfoFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance()); + } + /** + * repeated .RegionInfo region_info = 5; + */ + public java.util.List + getRegionInfoBuilderList() { + return getRegionInfoFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> + getRegionInfoFieldBuilder() { + if (regionInfoBuilder_ == null) { + regionInfoBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>( + regionInfo_, + ((bitField0_ & 0x00000010) == 0x00000010), + getParentForChildren(), + isClean()); + regionInfo_ = null; + } + return regionInfoBuilder_; + } + + // @@protoc_insertion_point(builder_scope:TruncateTableStateData) + } + + static { + defaultInstance = new TruncateTableStateData(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:TruncateTableStateData) + } + + public interface DeleteTableStateDataOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .UserInformation user_info = 1; + /** + * required .UserInformation user_info = 1; + */ + boolean hasUserInfo(); + /** + * required .UserInformation user_info = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo(); + /** + * required .UserInformation user_info = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder(); + + // required .TableName table_name = 2; + /** + * required .TableName table_name = 2; + */ + boolean hasTableName(); + /** + * required .TableName table_name = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName(); + /** + * required .TableName table_name = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder(); + + // repeated .RegionInfo region_info = 3; + /** + * repeated .RegionInfo region_info = 3; + */ + java.util.List + getRegionInfoList(); + /** + * repeated .RegionInfo region_info = 3; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo(int index); + /** + * repeated .RegionInfo region_info = 3; + */ + int getRegionInfoCount(); + /** + * repeated .RegionInfo region_info = 3; + */ + java.util.List + getRegionInfoOrBuilderList(); + /** + * repeated .RegionInfo region_info = 3; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder( + int index); + } + /** + * Protobuf type {@code DeleteTableStateData} + */ + public static final class DeleteTableStateData extends + com.google.protobuf.GeneratedMessage + implements DeleteTableStateDataOrBuilder { + // Use DeleteTableStateData.newBuilder() to construct. + private DeleteTableStateData(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private DeleteTableStateData(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final DeleteTableStateData defaultInstance; + public static DeleteTableStateData getDefaultInstance() { + return defaultInstance; + } + + public DeleteTableStateData getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private DeleteTableStateData( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = userInfo_.toBuilder(); + } + userInfo_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(userInfo_); + userInfo_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 18: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = null; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + subBuilder = tableName_.toBuilder(); + } + tableName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(tableName_); + tableName_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000002; + break; + } + case 26: { + if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + regionInfo_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000004; + } + regionInfo_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + regionInfo_ = java.util.Collections.unmodifiableList(regionInfo_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_DeleteTableStateData_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_DeleteTableStateData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public DeleteTableStateData parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new DeleteTableStateData(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .UserInformation user_info = 1; + public static final int USER_INFO_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation userInfo_; + /** + * required .UserInformation user_info = 1; + */ + public boolean hasUserInfo() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo() { + return userInfo_; + } + /** + * required .UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder() { + return userInfo_; + } + + // required .TableName table_name = 2; + public static final int TABLE_NAME_FIELD_NUMBER = 2; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_; + /** + * required .TableName table_name = 2; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + return tableName_; + } + /** + * required .TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { + return tableName_; + } + + // repeated .RegionInfo region_info = 3; + public static final int REGION_INFO_FIELD_NUMBER = 3; + private java.util.List regionInfo_; + /** + * repeated .RegionInfo region_info = 3; + */ + public java.util.List getRegionInfoList() { + return regionInfo_; + } + /** + * repeated .RegionInfo region_info = 3; + */ + public java.util.List + getRegionInfoOrBuilderList() { + return regionInfo_; + } + /** + * repeated .RegionInfo region_info = 3; + */ + public int getRegionInfoCount() { + return regionInfo_.size(); + } + /** + * repeated .RegionInfo region_info = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo(int index) { + return regionInfo_.get(index); + } + /** + * repeated .RegionInfo region_info = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder( + int index) { + return regionInfo_.get(index); + } + + private void initFields() { + userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance(); + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + regionInfo_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasUserInfo()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasTableName()) { + memoizedIsInitialized = 0; + return false; + } + if (!getUserInfo().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + if (!getTableName().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getRegionInfoCount(); i++) { + if (!getRegionInfo(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, userInfo_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(2, tableName_); + } + for (int i = 0; i < regionInfo_.size(); i++) { + output.writeMessage(3, regionInfo_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, userInfo_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, tableName_); + } + for (int i = 0; i < regionInfo_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, regionInfo_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData other = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData) obj; + + boolean result = true; + result = result && (hasUserInfo() == other.hasUserInfo()); + if (hasUserInfo()) { + result = result && getUserInfo() + .equals(other.getUserInfo()); + } + result = result && (hasTableName() == other.hasTableName()); + if (hasTableName()) { + result = result && getTableName() + .equals(other.getTableName()); + } + result = result && getRegionInfoList() + .equals(other.getRegionInfoList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasUserInfo()) { + hash = (37 * hash) + USER_INFO_FIELD_NUMBER; + hash = (53 * hash) + getUserInfo().hashCode(); + } + if (hasTableName()) { + hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER; + hash = (53 * hash) + getTableName().hashCode(); + } + if (getRegionInfoCount() > 0) { + hash = (37 * hash) + REGION_INFO_FIELD_NUMBER; + hash = (53 * hash) + getRegionInfoList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code DeleteTableStateData} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateDataOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_DeleteTableStateData_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_DeleteTableStateData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getUserInfoFieldBuilder(); + getTableNameFieldBuilder(); + getRegionInfoFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (userInfoBuilder_ == null) { + userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance(); + } else { + userInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + } else { + tableNameBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + if (regionInfoBuilder_ == null) { + regionInfo_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + } else { + regionInfoBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_DeleteTableStateData_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData result = new org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (userInfoBuilder_ == null) { + result.userInfo_ = userInfo_; + } else { + result.userInfo_ = userInfoBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + if (tableNameBuilder_ == null) { + result.tableName_ = tableName_; + } else { + result.tableName_ = tableNameBuilder_.build(); + } + if (regionInfoBuilder_ == null) { + if (((bitField0_ & 0x00000004) == 0x00000004)) { + regionInfo_ = java.util.Collections.unmodifiableList(regionInfo_); + bitField0_ = (bitField0_ & ~0x00000004); + } + result.regionInfo_ = regionInfo_; + } else { + result.regionInfo_ = regionInfoBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData.getDefaultInstance()) return this; + if (other.hasUserInfo()) { + mergeUserInfo(other.getUserInfo()); + } + if (other.hasTableName()) { + mergeTableName(other.getTableName()); + } + if (regionInfoBuilder_ == null) { + if (!other.regionInfo_.isEmpty()) { + if (regionInfo_.isEmpty()) { + regionInfo_ = other.regionInfo_; + bitField0_ = (bitField0_ & ~0x00000004); + } else { + ensureRegionInfoIsMutable(); + regionInfo_.addAll(other.regionInfo_); + } + onChanged(); + } + } else { + if (!other.regionInfo_.isEmpty()) { + if (regionInfoBuilder_.isEmpty()) { + regionInfoBuilder_.dispose(); + regionInfoBuilder_ = null; + regionInfo_ = other.regionInfo_; + bitField0_ = (bitField0_ & ~0x00000004); + regionInfoBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getRegionInfoFieldBuilder() : null; + } else { + regionInfoBuilder_.addAllMessages(other.regionInfo_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasUserInfo()) { + + return false; + } + if (!hasTableName()) { + + return false; + } + if (!getUserInfo().isInitialized()) { + + return false; + } + if (!getTableName().isInitialized()) { + + return false; + } + for (int i = 0; i < getRegionInfoCount(); i++) { + if (!getRegionInfo(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableStateData) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .UserInformation user_info = 1; + private org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder> userInfoBuilder_; + /** + * required .UserInformation user_info = 1; + */ + public boolean hasUserInfo() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo() { + if (userInfoBuilder_ == null) { + return userInfo_; + } else { + return userInfoBuilder_.getMessage(); + } + } + /** + * required .UserInformation user_info = 1; + */ + public Builder setUserInfo(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation value) { + if (userInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + userInfo_ = value; + onChanged(); + } else { + userInfoBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .UserInformation user_info = 1; + */ + public Builder setUserInfo( + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder builderForValue) { + if (userInfoBuilder_ == null) { + userInfo_ = builderForValue.build(); + onChanged(); + } else { + userInfoBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .UserInformation user_info = 1; + */ + public Builder mergeUserInfo(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation value) { + if (userInfoBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + userInfo_ != org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance()) { + userInfo_ = + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.newBuilder(userInfo_).mergeFrom(value).buildPartial(); + } else { + userInfo_ = value; + } + onChanged(); + } else { + userInfoBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .UserInformation user_info = 1; + */ + public Builder clearUserInfo() { + if (userInfoBuilder_ == null) { + userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance(); + onChanged(); + } else { + userInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder getUserInfoBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getUserInfoFieldBuilder().getBuilder(); + } + /** + * required .UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder() { + if (userInfoBuilder_ != null) { + return userInfoBuilder_.getMessageOrBuilder(); + } else { + return userInfo_; + } + } + /** + * required .UserInformation user_info = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder> + getUserInfoFieldBuilder() { + if (userInfoBuilder_ == null) { + userInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder>( + userInfo_, + getParentForChildren(), + isClean()); + userInfo_ = null; + } + return userInfoBuilder_; + } + + // required .TableName table_name = 2; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_; + /** + * required .TableName table_name = 2; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + if (tableNameBuilder_ == null) { + return tableName_; + } else { + return tableNameBuilder_.getMessage(); + } + } + /** + * required .TableName table_name = 2; + */ + public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + tableName_ = value; + onChanged(); + } else { + tableNameBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .TableName table_name = 2; + */ + public Builder setTableName( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + tableName_ = builderForValue.build(); + onChanged(); + } else { + tableNameBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .TableName table_name = 2; + */ + public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + tableName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) { + tableName_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial(); + } else { + tableName_ = value; + } + onChanged(); + } else { + tableNameBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .TableName table_name = 2; + */ + public Builder clearTableName() { + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + onChanged(); + } else { + tableNameBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + /** + * required .TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getTableNameFieldBuilder().getBuilder(); + } + /** + * required .TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { + if (tableNameBuilder_ != null) { + return tableNameBuilder_.getMessageOrBuilder(); + } else { + return tableName_; + } + } + /** + * required .TableName table_name = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTableNameFieldBuilder() { + if (tableNameBuilder_ == null) { + tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + tableName_, + getParentForChildren(), + isClean()); + tableName_ = null; + } + return tableNameBuilder_; + } + + // repeated .RegionInfo region_info = 3; + private java.util.List regionInfo_ = + java.util.Collections.emptyList(); + private void ensureRegionInfoIsMutable() { + if (!((bitField0_ & 0x00000004) == 0x00000004)) { + regionInfo_ = new java.util.ArrayList(regionInfo_); + bitField0_ |= 0x00000004; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> regionInfoBuilder_; + + /** + * repeated .RegionInfo region_info = 3; + */ + public java.util.List getRegionInfoList() { + if (regionInfoBuilder_ == null) { + return java.util.Collections.unmodifiableList(regionInfo_); + } else { + return regionInfoBuilder_.getMessageList(); + } + } + /** + * repeated .RegionInfo region_info = 3; + */ + public int getRegionInfoCount() { + if (regionInfoBuilder_ == null) { + return regionInfo_.size(); + } else { + return regionInfoBuilder_.getCount(); + } + } + /** + * repeated .RegionInfo region_info = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo(int index) { + if (regionInfoBuilder_ == null) { + return regionInfo_.get(index); + } else { + return regionInfoBuilder_.getMessage(index); + } + } + /** + * repeated .RegionInfo region_info = 3; + */ + public Builder setRegionInfo( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) { + if (regionInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRegionInfoIsMutable(); + regionInfo_.set(index, value); + onChanged(); + } else { + regionInfoBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .RegionInfo region_info = 3; + */ + public Builder setRegionInfo( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) { + if (regionInfoBuilder_ == null) { + ensureRegionInfoIsMutable(); + regionInfo_.set(index, builderForValue.build()); + onChanged(); + } else { + regionInfoBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .RegionInfo region_info = 3; + */ + public Builder addRegionInfo(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) { + if (regionInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRegionInfoIsMutable(); + regionInfo_.add(value); + onChanged(); + } else { + regionInfoBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .RegionInfo region_info = 3; + */ + public Builder addRegionInfo( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) { + if (regionInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRegionInfoIsMutable(); + regionInfo_.add(index, value); + onChanged(); + } else { + regionInfoBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .RegionInfo region_info = 3; + */ + public Builder addRegionInfo( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) { + if (regionInfoBuilder_ == null) { + ensureRegionInfoIsMutable(); + regionInfo_.add(builderForValue.build()); + onChanged(); + } else { + regionInfoBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .RegionInfo region_info = 3; + */ + public Builder addRegionInfo( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) { + if (regionInfoBuilder_ == null) { + ensureRegionInfoIsMutable(); + regionInfo_.add(index, builderForValue.build()); + onChanged(); + } else { + regionInfoBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .RegionInfo region_info = 3; + */ + public Builder addAllRegionInfo( + java.lang.Iterable values) { + if (regionInfoBuilder_ == null) { + ensureRegionInfoIsMutable(); + super.addAll(values, regionInfo_); + onChanged(); + } else { + regionInfoBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .RegionInfo region_info = 3; + */ + public Builder clearRegionInfo() { + if (regionInfoBuilder_ == null) { + regionInfo_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + } else { + regionInfoBuilder_.clear(); + } + return this; + } + /** + * repeated .RegionInfo region_info = 3; + */ + public Builder removeRegionInfo(int index) { + if (regionInfoBuilder_ == null) { + ensureRegionInfoIsMutable(); + regionInfo_.remove(index); + onChanged(); + } else { + regionInfoBuilder_.remove(index); + } + return this; + } + /** + * repeated .RegionInfo region_info = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder getRegionInfoBuilder( + int index) { + return getRegionInfoFieldBuilder().getBuilder(index); + } + /** + * repeated .RegionInfo region_info = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder( + int index) { + if (regionInfoBuilder_ == null) { + return regionInfo_.get(index); } else { + return regionInfoBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .RegionInfo region_info = 3; + */ + public java.util.List + getRegionInfoOrBuilderList() { + if (regionInfoBuilder_ != null) { + return regionInfoBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(regionInfo_); + } + } + /** + * repeated .RegionInfo region_info = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder addRegionInfoBuilder() { + return getRegionInfoFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance()); + } + /** + * repeated .RegionInfo region_info = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder addRegionInfoBuilder( + int index) { + return getRegionInfoFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance()); + } + /** + * repeated .RegionInfo region_info = 3; + */ + public java.util.List + getRegionInfoBuilderList() { + return getRegionInfoFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> + getRegionInfoFieldBuilder() { + if (regionInfoBuilder_ == null) { + regionInfoBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>( + regionInfo_, + ((bitField0_ & 0x00000004) == 0x00000004), + getParentForChildren(), + isClean()); + regionInfo_ = null; + } + return regionInfoBuilder_; + } + + // @@protoc_insertion_point(builder_scope:DeleteTableStateData) + } + + static { + defaultInstance = new DeleteTableStateData(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:DeleteTableStateData) + } + + public interface AddColumnFamilyStateDataOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .UserInformation user_info = 1; + /** + * required .UserInformation user_info = 1; + */ + boolean hasUserInfo(); + /** + * required .UserInformation user_info = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo(); + /** + * required .UserInformation user_info = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder(); + + // required .TableName table_name = 2; + /** + * required .TableName table_name = 2; + */ + boolean hasTableName(); + /** + * required .TableName table_name = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName(); + /** + * required .TableName table_name = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder(); + + // required .ColumnFamilySchema columnfamily_schema = 3; + /** + * required .ColumnFamilySchema columnfamily_schema = 3; + */ + boolean hasColumnfamilySchema(); + /** + * required .ColumnFamilySchema columnfamily_schema = 3; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema getColumnfamilySchema(); + /** + * required .ColumnFamilySchema columnfamily_schema = 3; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchemaOrBuilder getColumnfamilySchemaOrBuilder(); + + // optional .TableSchema unmodified_table_schema = 4; + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + boolean hasUnmodifiedTableSchema(); + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getUnmodifiedTableSchema(); + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getUnmodifiedTableSchemaOrBuilder(); + } + /** + * Protobuf type {@code AddColumnFamilyStateData} + */ + public static final class AddColumnFamilyStateData extends + com.google.protobuf.GeneratedMessage + implements AddColumnFamilyStateDataOrBuilder { + // Use AddColumnFamilyStateData.newBuilder() to construct. + private AddColumnFamilyStateData(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private AddColumnFamilyStateData(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final AddColumnFamilyStateData defaultInstance; + public static AddColumnFamilyStateData getDefaultInstance() { + return defaultInstance; + } + + public AddColumnFamilyStateData getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private AddColumnFamilyStateData( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = userInfo_.toBuilder(); + } + userInfo_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(userInfo_); + userInfo_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 18: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = null; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + subBuilder = tableName_.toBuilder(); + } + tableName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(tableName_); + tableName_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000002; + break; + } + case 26: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder subBuilder = null; + if (((bitField0_ & 0x00000004) == 0x00000004)) { + subBuilder = columnfamilySchema_.toBuilder(); + } + columnfamilySchema_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(columnfamilySchema_); + columnfamilySchema_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000004; + break; + } + case 34: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder subBuilder = null; + if (((bitField0_ & 0x00000008) == 0x00000008)) { + subBuilder = unmodifiedTableSchema_.toBuilder(); + } + unmodifiedTableSchema_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(unmodifiedTableSchema_); + unmodifiedTableSchema_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000008; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_AddColumnFamilyStateData_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_AddColumnFamilyStateData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public AddColumnFamilyStateData parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new AddColumnFamilyStateData(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .UserInformation user_info = 1; + public static final int USER_INFO_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation userInfo_; + /** + * required .UserInformation user_info = 1; + */ + public boolean hasUserInfo() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo() { + return userInfo_; + } + /** + * required .UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder() { + return userInfo_; + } + + // required .TableName table_name = 2; + public static final int TABLE_NAME_FIELD_NUMBER = 2; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_; + /** + * required .TableName table_name = 2; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + return tableName_; + } + /** + * required .TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { + return tableName_; + } + + // required .ColumnFamilySchema columnfamily_schema = 3; + public static final int COLUMNFAMILY_SCHEMA_FIELD_NUMBER = 3; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema columnfamilySchema_; + /** + * required .ColumnFamilySchema columnfamily_schema = 3; + */ + public boolean hasColumnfamilySchema() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required .ColumnFamilySchema columnfamily_schema = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema getColumnfamilySchema() { + return columnfamilySchema_; + } + /** + * required .ColumnFamilySchema columnfamily_schema = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchemaOrBuilder getColumnfamilySchemaOrBuilder() { + return columnfamilySchema_; + } + + // optional .TableSchema unmodified_table_schema = 4; + public static final int UNMODIFIED_TABLE_SCHEMA_FIELD_NUMBER = 4; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema unmodifiedTableSchema_; + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + public boolean hasUnmodifiedTableSchema() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getUnmodifiedTableSchema() { + return unmodifiedTableSchema_; + } + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getUnmodifiedTableSchemaOrBuilder() { + return unmodifiedTableSchema_; + } + + private void initFields() { + userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance(); + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + columnfamilySchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.getDefaultInstance(); + unmodifiedTableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasUserInfo()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasTableName()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasColumnfamilySchema()) { + memoizedIsInitialized = 0; + return false; + } + if (!getUserInfo().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + if (!getTableName().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + if (!getColumnfamilySchema().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + if (hasUnmodifiedTableSchema()) { + if (!getUnmodifiedTableSchema().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, userInfo_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(2, tableName_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeMessage(3, columnfamilySchema_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeMessage(4, unmodifiedTableSchema_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, userInfo_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, tableName_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, columnfamilySchema_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(4, unmodifiedTableSchema_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData other = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData) obj; + + boolean result = true; + result = result && (hasUserInfo() == other.hasUserInfo()); + if (hasUserInfo()) { + result = result && getUserInfo() + .equals(other.getUserInfo()); + } + result = result && (hasTableName() == other.hasTableName()); + if (hasTableName()) { + result = result && getTableName() + .equals(other.getTableName()); + } + result = result && (hasColumnfamilySchema() == other.hasColumnfamilySchema()); + if (hasColumnfamilySchema()) { + result = result && getColumnfamilySchema() + .equals(other.getColumnfamilySchema()); + } + result = result && (hasUnmodifiedTableSchema() == other.hasUnmodifiedTableSchema()); + if (hasUnmodifiedTableSchema()) { + result = result && getUnmodifiedTableSchema() + .equals(other.getUnmodifiedTableSchema()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasUserInfo()) { + hash = (37 * hash) + USER_INFO_FIELD_NUMBER; + hash = (53 * hash) + getUserInfo().hashCode(); + } + if (hasTableName()) { + hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER; + hash = (53 * hash) + getTableName().hashCode(); + } + if (hasColumnfamilySchema()) { + hash = (37 * hash) + COLUMNFAMILY_SCHEMA_FIELD_NUMBER; + hash = (53 * hash) + getColumnfamilySchema().hashCode(); + } + if (hasUnmodifiedTableSchema()) { + hash = (37 * hash) + UNMODIFIED_TABLE_SCHEMA_FIELD_NUMBER; + hash = (53 * hash) + getUnmodifiedTableSchema().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code AddColumnFamilyStateData} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateDataOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_AddColumnFamilyStateData_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_AddColumnFamilyStateData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getUserInfoFieldBuilder(); + getTableNameFieldBuilder(); + getColumnfamilySchemaFieldBuilder(); + getUnmodifiedTableSchemaFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (userInfoBuilder_ == null) { + userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance(); + } else { + userInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + } else { + tableNameBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + if (columnfamilySchemaBuilder_ == null) { + columnfamilySchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.getDefaultInstance(); + } else { + columnfamilySchemaBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + if (unmodifiedTableSchemaBuilder_ == null) { + unmodifiedTableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); + } else { + unmodifiedTableSchemaBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000008); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_AddColumnFamilyStateData_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData result = new org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (userInfoBuilder_ == null) { + result.userInfo_ = userInfo_; + } else { + result.userInfo_ = userInfoBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + if (tableNameBuilder_ == null) { + result.tableName_ = tableName_; + } else { + result.tableName_ = tableNameBuilder_.build(); + } + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + if (columnfamilySchemaBuilder_ == null) { + result.columnfamilySchema_ = columnfamilySchema_; + } else { + result.columnfamilySchema_ = columnfamilySchemaBuilder_.build(); + } + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + if (unmodifiedTableSchemaBuilder_ == null) { + result.unmodifiedTableSchema_ = unmodifiedTableSchema_; + } else { + result.unmodifiedTableSchema_ = unmodifiedTableSchemaBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData.getDefaultInstance()) return this; + if (other.hasUserInfo()) { + mergeUserInfo(other.getUserInfo()); + } + if (other.hasTableName()) { + mergeTableName(other.getTableName()); + } + if (other.hasColumnfamilySchema()) { + mergeColumnfamilySchema(other.getColumnfamilySchema()); + } + if (other.hasUnmodifiedTableSchema()) { + mergeUnmodifiedTableSchema(other.getUnmodifiedTableSchema()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasUserInfo()) { + + return false; + } + if (!hasTableName()) { + + return false; + } + if (!hasColumnfamilySchema()) { + + return false; + } + if (!getUserInfo().isInitialized()) { + + return false; + } + if (!getTableName().isInitialized()) { + + return false; + } + if (!getColumnfamilySchema().isInitialized()) { + + return false; + } + if (hasUnmodifiedTableSchema()) { + if (!getUnmodifiedTableSchema().isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyStateData) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .UserInformation user_info = 1; + private org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder> userInfoBuilder_; + /** + * required .UserInformation user_info = 1; + */ + public boolean hasUserInfo() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo() { + if (userInfoBuilder_ == null) { + return userInfo_; + } else { + return userInfoBuilder_.getMessage(); + } + } + /** + * required .UserInformation user_info = 1; + */ + public Builder setUserInfo(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation value) { + if (userInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + userInfo_ = value; + onChanged(); + } else { + userInfoBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .UserInformation user_info = 1; + */ + public Builder setUserInfo( + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder builderForValue) { + if (userInfoBuilder_ == null) { + userInfo_ = builderForValue.build(); + onChanged(); + } else { + userInfoBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .UserInformation user_info = 1; + */ + public Builder mergeUserInfo(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation value) { + if (userInfoBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + userInfo_ != org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance()) { + userInfo_ = + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.newBuilder(userInfo_).mergeFrom(value).buildPartial(); + } else { + userInfo_ = value; + } + onChanged(); + } else { + userInfoBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .UserInformation user_info = 1; + */ + public Builder clearUserInfo() { + if (userInfoBuilder_ == null) { + userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance(); + onChanged(); + } else { + userInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder getUserInfoBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getUserInfoFieldBuilder().getBuilder(); + } + /** + * required .UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder() { + if (userInfoBuilder_ != null) { + return userInfoBuilder_.getMessageOrBuilder(); + } else { + return userInfo_; + } + } + /** + * required .UserInformation user_info = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder> + getUserInfoFieldBuilder() { + if (userInfoBuilder_ == null) { + userInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder>( + userInfo_, + getParentForChildren(), + isClean()); + userInfo_ = null; + } + return userInfoBuilder_; + } + + // required .TableName table_name = 2; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_; + /** + * required .TableName table_name = 2; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + if (tableNameBuilder_ == null) { + return tableName_; + } else { + return tableNameBuilder_.getMessage(); + } + } + /** + * required .TableName table_name = 2; + */ + public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + tableName_ = value; + onChanged(); + } else { + tableNameBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .TableName table_name = 2; + */ + public Builder setTableName( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + tableName_ = builderForValue.build(); + onChanged(); + } else { + tableNameBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .TableName table_name = 2; + */ + public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + tableName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) { + tableName_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial(); + } else { + tableName_ = value; + } + onChanged(); + } else { + tableNameBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .TableName table_name = 2; + */ + public Builder clearTableName() { + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + onChanged(); + } else { + tableNameBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + /** + * required .TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getTableNameFieldBuilder().getBuilder(); + } + /** + * required .TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { + if (tableNameBuilder_ != null) { + return tableNameBuilder_.getMessageOrBuilder(); + } else { + return tableName_; + } + } + /** + * required .TableName table_name = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTableNameFieldBuilder() { + if (tableNameBuilder_ == null) { + tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + tableName_, + getParentForChildren(), + isClean()); + tableName_ = null; + } + return tableNameBuilder_; + } + + // required .ColumnFamilySchema columnfamily_schema = 3; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema columnfamilySchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchemaOrBuilder> columnfamilySchemaBuilder_; + /** + * required .ColumnFamilySchema columnfamily_schema = 3; + */ + public boolean hasColumnfamilySchema() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required .ColumnFamilySchema columnfamily_schema = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema getColumnfamilySchema() { + if (columnfamilySchemaBuilder_ == null) { + return columnfamilySchema_; + } else { + return columnfamilySchemaBuilder_.getMessage(); + } + } + /** + * required .ColumnFamilySchema columnfamily_schema = 3; + */ + public Builder setColumnfamilySchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema value) { + if (columnfamilySchemaBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + columnfamilySchema_ = value; + onChanged(); + } else { + columnfamilySchemaBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * required .ColumnFamilySchema columnfamily_schema = 3; + */ + public Builder setColumnfamilySchema( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder builderForValue) { + if (columnfamilySchemaBuilder_ == null) { + columnfamilySchema_ = builderForValue.build(); + onChanged(); + } else { + columnfamilySchemaBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * required .ColumnFamilySchema columnfamily_schema = 3; + */ + public Builder mergeColumnfamilySchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema value) { + if (columnfamilySchemaBuilder_ == null) { + if (((bitField0_ & 0x00000004) == 0x00000004) && + columnfamilySchema_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.getDefaultInstance()) { + columnfamilySchema_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.newBuilder(columnfamilySchema_).mergeFrom(value).buildPartial(); + } else { + columnfamilySchema_ = value; + } + onChanged(); + } else { + columnfamilySchemaBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * required .ColumnFamilySchema columnfamily_schema = 3; + */ + public Builder clearColumnfamilySchema() { + if (columnfamilySchemaBuilder_ == null) { + columnfamilySchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.getDefaultInstance(); + onChanged(); + } else { + columnfamilySchemaBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + /** + * required .ColumnFamilySchema columnfamily_schema = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder getColumnfamilySchemaBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return getColumnfamilySchemaFieldBuilder().getBuilder(); + } + /** + * required .ColumnFamilySchema columnfamily_schema = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchemaOrBuilder getColumnfamilySchemaOrBuilder() { + if (columnfamilySchemaBuilder_ != null) { + return columnfamilySchemaBuilder_.getMessageOrBuilder(); + } else { + return columnfamilySchema_; + } + } + /** + * required .ColumnFamilySchema columnfamily_schema = 3; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchemaOrBuilder> + getColumnfamilySchemaFieldBuilder() { + if (columnfamilySchemaBuilder_ == null) { + columnfamilySchemaBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchemaOrBuilder>( + columnfamilySchema_, + getParentForChildren(), + isClean()); + columnfamilySchema_ = null; + } + return columnfamilySchemaBuilder_; + } + + // optional .TableSchema unmodified_table_schema = 4; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema unmodifiedTableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder> unmodifiedTableSchemaBuilder_; + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + public boolean hasUnmodifiedTableSchema() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getUnmodifiedTableSchema() { + if (unmodifiedTableSchemaBuilder_ == null) { + return unmodifiedTableSchema_; + } else { + return unmodifiedTableSchemaBuilder_.getMessage(); + } + } + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + public Builder setUnmodifiedTableSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) { + if (unmodifiedTableSchemaBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + unmodifiedTableSchema_ = value; + onChanged(); + } else { + unmodifiedTableSchemaBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + return this; + } + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + public Builder setUnmodifiedTableSchema( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder builderForValue) { + if (unmodifiedTableSchemaBuilder_ == null) { + unmodifiedTableSchema_ = builderForValue.build(); + onChanged(); + } else { + unmodifiedTableSchemaBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + return this; + } + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + public Builder mergeUnmodifiedTableSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) { + if (unmodifiedTableSchemaBuilder_ == null) { + if (((bitField0_ & 0x00000008) == 0x00000008) && + unmodifiedTableSchema_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance()) { + unmodifiedTableSchema_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.newBuilder(unmodifiedTableSchema_).mergeFrom(value).buildPartial(); + } else { + unmodifiedTableSchema_ = value; + } + onChanged(); + } else { + unmodifiedTableSchemaBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000008; + return this; + } + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + public Builder clearUnmodifiedTableSchema() { + if (unmodifiedTableSchemaBuilder_ == null) { + unmodifiedTableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); + onChanged(); + } else { + unmodifiedTableSchemaBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000008); + return this; + } + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder getUnmodifiedTableSchemaBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return getUnmodifiedTableSchemaFieldBuilder().getBuilder(); + } + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getUnmodifiedTableSchemaOrBuilder() { + if (unmodifiedTableSchemaBuilder_ != null) { + return unmodifiedTableSchemaBuilder_.getMessageOrBuilder(); + } else { + return unmodifiedTableSchema_; + } + } + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder> + getUnmodifiedTableSchemaFieldBuilder() { + if (unmodifiedTableSchemaBuilder_ == null) { + unmodifiedTableSchemaBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder>( + unmodifiedTableSchema_, + getParentForChildren(), + isClean()); + unmodifiedTableSchema_ = null; + } + return unmodifiedTableSchemaBuilder_; + } + + // @@protoc_insertion_point(builder_scope:AddColumnFamilyStateData) + } + + static { + defaultInstance = new AddColumnFamilyStateData(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:AddColumnFamilyStateData) + } + + public interface ModifyColumnFamilyStateDataOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .UserInformation user_info = 1; + /** + * required .UserInformation user_info = 1; + */ + boolean hasUserInfo(); + /** + * required .UserInformation user_info = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo(); + /** + * required .UserInformation user_info = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder(); + + // required .TableName table_name = 2; + /** + * required .TableName table_name = 2; + */ + boolean hasTableName(); + /** + * required .TableName table_name = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName(); + /** + * required .TableName table_name = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder(); + + // required .ColumnFamilySchema columnfamily_schema = 3; + /** + * required .ColumnFamilySchema columnfamily_schema = 3; + */ + boolean hasColumnfamilySchema(); + /** + * required .ColumnFamilySchema columnfamily_schema = 3; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema getColumnfamilySchema(); + /** + * required .ColumnFamilySchema columnfamily_schema = 3; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchemaOrBuilder getColumnfamilySchemaOrBuilder(); + + // optional .TableSchema unmodified_table_schema = 4; + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + boolean hasUnmodifiedTableSchema(); + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getUnmodifiedTableSchema(); + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getUnmodifiedTableSchemaOrBuilder(); + } + /** + * Protobuf type {@code ModifyColumnFamilyStateData} + */ + public static final class ModifyColumnFamilyStateData extends + com.google.protobuf.GeneratedMessage + implements ModifyColumnFamilyStateDataOrBuilder { + // Use ModifyColumnFamilyStateData.newBuilder() to construct. + private ModifyColumnFamilyStateData(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private ModifyColumnFamilyStateData(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final ModifyColumnFamilyStateData defaultInstance; + public static ModifyColumnFamilyStateData getDefaultInstance() { + return defaultInstance; + } + + public ModifyColumnFamilyStateData getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ModifyColumnFamilyStateData( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = userInfo_.toBuilder(); + } + userInfo_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(userInfo_); + userInfo_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 18: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = null; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + subBuilder = tableName_.toBuilder(); + } + tableName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(tableName_); + tableName_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000002; + break; + } + case 26: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder subBuilder = null; + if (((bitField0_ & 0x00000004) == 0x00000004)) { + subBuilder = columnfamilySchema_.toBuilder(); + } + columnfamilySchema_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(columnfamilySchema_); + columnfamilySchema_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000004; + break; + } + case 34: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder subBuilder = null; + if (((bitField0_ & 0x00000008) == 0x00000008)) { + subBuilder = unmodifiedTableSchema_.toBuilder(); + } + unmodifiedTableSchema_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(unmodifiedTableSchema_); + unmodifiedTableSchema_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000008; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_ModifyColumnFamilyStateData_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_ModifyColumnFamilyStateData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ModifyColumnFamilyStateData parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ModifyColumnFamilyStateData(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .UserInformation user_info = 1; + public static final int USER_INFO_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation userInfo_; + /** + * required .UserInformation user_info = 1; + */ + public boolean hasUserInfo() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo() { + return userInfo_; + } + /** + * required .UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder() { + return userInfo_; + } + + // required .TableName table_name = 2; + public static final int TABLE_NAME_FIELD_NUMBER = 2; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_; + /** + * required .TableName table_name = 2; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + return tableName_; + } + /** + * required .TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { + return tableName_; + } + + // required .ColumnFamilySchema columnfamily_schema = 3; + public static final int COLUMNFAMILY_SCHEMA_FIELD_NUMBER = 3; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema columnfamilySchema_; + /** + * required .ColumnFamilySchema columnfamily_schema = 3; + */ + public boolean hasColumnfamilySchema() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required .ColumnFamilySchema columnfamily_schema = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema getColumnfamilySchema() { + return columnfamilySchema_; + } + /** + * required .ColumnFamilySchema columnfamily_schema = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchemaOrBuilder getColumnfamilySchemaOrBuilder() { + return columnfamilySchema_; + } + + // optional .TableSchema unmodified_table_schema = 4; + public static final int UNMODIFIED_TABLE_SCHEMA_FIELD_NUMBER = 4; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema unmodifiedTableSchema_; + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + public boolean hasUnmodifiedTableSchema() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getUnmodifiedTableSchema() { + return unmodifiedTableSchema_; + } + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getUnmodifiedTableSchemaOrBuilder() { + return unmodifiedTableSchema_; + } + + private void initFields() { + userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance(); + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + columnfamilySchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.getDefaultInstance(); + unmodifiedTableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasUserInfo()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasTableName()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasColumnfamilySchema()) { + memoizedIsInitialized = 0; + return false; + } + if (!getUserInfo().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + if (!getTableName().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + if (!getColumnfamilySchema().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + if (hasUnmodifiedTableSchema()) { + if (!getUnmodifiedTableSchema().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, userInfo_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(2, tableName_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeMessage(3, columnfamilySchema_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeMessage(4, unmodifiedTableSchema_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, userInfo_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, tableName_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, columnfamilySchema_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(4, unmodifiedTableSchema_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData other = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData) obj; + + boolean result = true; + result = result && (hasUserInfo() == other.hasUserInfo()); + if (hasUserInfo()) { + result = result && getUserInfo() + .equals(other.getUserInfo()); + } + result = result && (hasTableName() == other.hasTableName()); + if (hasTableName()) { + result = result && getTableName() + .equals(other.getTableName()); + } + result = result && (hasColumnfamilySchema() == other.hasColumnfamilySchema()); + if (hasColumnfamilySchema()) { + result = result && getColumnfamilySchema() + .equals(other.getColumnfamilySchema()); + } + result = result && (hasUnmodifiedTableSchema() == other.hasUnmodifiedTableSchema()); + if (hasUnmodifiedTableSchema()) { + result = result && getUnmodifiedTableSchema() + .equals(other.getUnmodifiedTableSchema()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasUserInfo()) { + hash = (37 * hash) + USER_INFO_FIELD_NUMBER; + hash = (53 * hash) + getUserInfo().hashCode(); + } + if (hasTableName()) { + hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER; + hash = (53 * hash) + getTableName().hashCode(); + } + if (hasColumnfamilySchema()) { + hash = (37 * hash) + COLUMNFAMILY_SCHEMA_FIELD_NUMBER; + hash = (53 * hash) + getColumnfamilySchema().hashCode(); + } + if (hasUnmodifiedTableSchema()) { + hash = (37 * hash) + UNMODIFIED_TABLE_SCHEMA_FIELD_NUMBER; + hash = (53 * hash) + getUnmodifiedTableSchema().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code ModifyColumnFamilyStateData} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateDataOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_ModifyColumnFamilyStateData_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_ModifyColumnFamilyStateData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getUserInfoFieldBuilder(); + getTableNameFieldBuilder(); + getColumnfamilySchemaFieldBuilder(); + getUnmodifiedTableSchemaFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (userInfoBuilder_ == null) { + userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance(); + } else { + userInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + } else { + tableNameBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + if (columnfamilySchemaBuilder_ == null) { + columnfamilySchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.getDefaultInstance(); + } else { + columnfamilySchemaBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + if (unmodifiedTableSchemaBuilder_ == null) { + unmodifiedTableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); + } else { + unmodifiedTableSchemaBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000008); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_ModifyColumnFamilyStateData_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData result = new org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (userInfoBuilder_ == null) { + result.userInfo_ = userInfo_; + } else { + result.userInfo_ = userInfoBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + if (tableNameBuilder_ == null) { + result.tableName_ = tableName_; + } else { + result.tableName_ = tableNameBuilder_.build(); + } + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + if (columnfamilySchemaBuilder_ == null) { + result.columnfamilySchema_ = columnfamilySchema_; + } else { + result.columnfamilySchema_ = columnfamilySchemaBuilder_.build(); + } + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + if (unmodifiedTableSchemaBuilder_ == null) { + result.unmodifiedTableSchema_ = unmodifiedTableSchema_; + } else { + result.unmodifiedTableSchema_ = unmodifiedTableSchemaBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData.getDefaultInstance()) return this; + if (other.hasUserInfo()) { + mergeUserInfo(other.getUserInfo()); + } + if (other.hasTableName()) { + mergeTableName(other.getTableName()); + } + if (other.hasColumnfamilySchema()) { + mergeColumnfamilySchema(other.getColumnfamilySchema()); + } + if (other.hasUnmodifiedTableSchema()) { + mergeUnmodifiedTableSchema(other.getUnmodifiedTableSchema()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasUserInfo()) { + + return false; + } + if (!hasTableName()) { + + return false; + } + if (!hasColumnfamilySchema()) { + + return false; + } + if (!getUserInfo().isInitialized()) { + + return false; + } + if (!getTableName().isInitialized()) { + + return false; + } + if (!getColumnfamilySchema().isInitialized()) { + + return false; + } + if (hasUnmodifiedTableSchema()) { + if (!getUnmodifiedTableSchema().isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyStateData) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .UserInformation user_info = 1; + private org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder> userInfoBuilder_; + /** + * required .UserInformation user_info = 1; + */ + public boolean hasUserInfo() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo() { + if (userInfoBuilder_ == null) { + return userInfo_; + } else { + return userInfoBuilder_.getMessage(); + } + } + /** + * required .UserInformation user_info = 1; + */ + public Builder setUserInfo(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation value) { + if (userInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + userInfo_ = value; + onChanged(); + } else { + userInfoBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .UserInformation user_info = 1; + */ + public Builder setUserInfo( + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder builderForValue) { + if (userInfoBuilder_ == null) { + userInfo_ = builderForValue.build(); + onChanged(); + } else { + userInfoBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .UserInformation user_info = 1; + */ + public Builder mergeUserInfo(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation value) { + if (userInfoBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + userInfo_ != org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance()) { + userInfo_ = + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.newBuilder(userInfo_).mergeFrom(value).buildPartial(); + } else { + userInfo_ = value; + } + onChanged(); + } else { + userInfoBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .UserInformation user_info = 1; + */ + public Builder clearUserInfo() { + if (userInfoBuilder_ == null) { + userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance(); + onChanged(); + } else { + userInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder getUserInfoBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getUserInfoFieldBuilder().getBuilder(); + } + /** + * required .UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder() { + if (userInfoBuilder_ != null) { + return userInfoBuilder_.getMessageOrBuilder(); + } else { + return userInfo_; + } + } + /** + * required .UserInformation user_info = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder> + getUserInfoFieldBuilder() { + if (userInfoBuilder_ == null) { + userInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder>( + userInfo_, + getParentForChildren(), + isClean()); + userInfo_ = null; + } + return userInfoBuilder_; + } + + // required .TableName table_name = 2; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_; + /** + * required .TableName table_name = 2; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + if (tableNameBuilder_ == null) { + return tableName_; + } else { + return tableNameBuilder_.getMessage(); + } + } + /** + * required .TableName table_name = 2; + */ + public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + tableName_ = value; + onChanged(); + } else { + tableNameBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .TableName table_name = 2; + */ + public Builder setTableName( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + tableName_ = builderForValue.build(); + onChanged(); + } else { + tableNameBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .TableName table_name = 2; + */ + public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + tableName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) { + tableName_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial(); + } else { + tableName_ = value; + } + onChanged(); + } else { + tableNameBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .TableName table_name = 2; + */ + public Builder clearTableName() { + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + onChanged(); + } else { + tableNameBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + /** + * required .TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getTableNameFieldBuilder().getBuilder(); + } + /** + * required .TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { + if (tableNameBuilder_ != null) { + return tableNameBuilder_.getMessageOrBuilder(); + } else { + return tableName_; + } + } + /** + * required .TableName table_name = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTableNameFieldBuilder() { + if (tableNameBuilder_ == null) { + tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + tableName_, + getParentForChildren(), + isClean()); + tableName_ = null; + } + return tableNameBuilder_; + } + + // required .ColumnFamilySchema columnfamily_schema = 3; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema columnfamilySchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchemaOrBuilder> columnfamilySchemaBuilder_; + /** + * required .ColumnFamilySchema columnfamily_schema = 3; + */ + public boolean hasColumnfamilySchema() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required .ColumnFamilySchema columnfamily_schema = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema getColumnfamilySchema() { + if (columnfamilySchemaBuilder_ == null) { + return columnfamilySchema_; + } else { + return columnfamilySchemaBuilder_.getMessage(); + } + } + /** + * required .ColumnFamilySchema columnfamily_schema = 3; + */ + public Builder setColumnfamilySchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema value) { + if (columnfamilySchemaBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + columnfamilySchema_ = value; + onChanged(); + } else { + columnfamilySchemaBuilder_.setMessage(value); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * required .ColumnFamilySchema columnfamily_schema = 3; + */ + public Builder setColumnfamilySchema( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder builderForValue) { + if (columnfamilySchemaBuilder_ == null) { + columnfamilySchema_ = builderForValue.build(); + onChanged(); + } else { + columnfamilySchemaBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * required .ColumnFamilySchema columnfamily_schema = 3; + */ + public Builder mergeColumnfamilySchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema value) { + if (columnfamilySchemaBuilder_ == null) { + if (((bitField0_ & 0x00000004) == 0x00000004) && + columnfamilySchema_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.getDefaultInstance()) { + columnfamilySchema_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.newBuilder(columnfamilySchema_).mergeFrom(value).buildPartial(); + } else { + columnfamilySchema_ = value; + } + onChanged(); + } else { + columnfamilySchemaBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000004; + return this; + } + /** + * required .ColumnFamilySchema columnfamily_schema = 3; + */ + public Builder clearColumnfamilySchema() { + if (columnfamilySchemaBuilder_ == null) { + columnfamilySchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.getDefaultInstance(); + onChanged(); + } else { + columnfamilySchemaBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + /** + * required .ColumnFamilySchema columnfamily_schema = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder getColumnfamilySchemaBuilder() { + bitField0_ |= 0x00000004; + onChanged(); + return getColumnfamilySchemaFieldBuilder().getBuilder(); + } + /** + * required .ColumnFamilySchema columnfamily_schema = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchemaOrBuilder getColumnfamilySchemaOrBuilder() { + if (columnfamilySchemaBuilder_ != null) { + return columnfamilySchemaBuilder_.getMessageOrBuilder(); + } else { + return columnfamilySchema_; + } + } + /** + * required .ColumnFamilySchema columnfamily_schema = 3; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchemaOrBuilder> + getColumnfamilySchemaFieldBuilder() { + if (columnfamilySchemaBuilder_ == null) { + columnfamilySchemaBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchemaOrBuilder>( + columnfamilySchema_, + getParentForChildren(), + isClean()); + columnfamilySchema_ = null; + } + return columnfamilySchemaBuilder_; + } + + // optional .TableSchema unmodified_table_schema = 4; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema unmodifiedTableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder> unmodifiedTableSchemaBuilder_; + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + public boolean hasUnmodifiedTableSchema() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getUnmodifiedTableSchema() { + if (unmodifiedTableSchemaBuilder_ == null) { + return unmodifiedTableSchema_; + } else { + return unmodifiedTableSchemaBuilder_.getMessage(); + } + } + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + public Builder setUnmodifiedTableSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) { + if (unmodifiedTableSchemaBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + unmodifiedTableSchema_ = value; + onChanged(); + } else { + unmodifiedTableSchemaBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + return this; + } + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + public Builder setUnmodifiedTableSchema( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder builderForValue) { + if (unmodifiedTableSchemaBuilder_ == null) { + unmodifiedTableSchema_ = builderForValue.build(); + onChanged(); + } else { + unmodifiedTableSchemaBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + return this; + } + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + public Builder mergeUnmodifiedTableSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) { + if (unmodifiedTableSchemaBuilder_ == null) { + if (((bitField0_ & 0x00000008) == 0x00000008) && + unmodifiedTableSchema_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance()) { + unmodifiedTableSchema_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.newBuilder(unmodifiedTableSchema_).mergeFrom(value).buildPartial(); + } else { + unmodifiedTableSchema_ = value; + } + onChanged(); + } else { + unmodifiedTableSchemaBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000008; + return this; + } + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + public Builder clearUnmodifiedTableSchema() { + if (unmodifiedTableSchemaBuilder_ == null) { + unmodifiedTableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); + onChanged(); + } else { + unmodifiedTableSchemaBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000008); + return this; + } + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder getUnmodifiedTableSchemaBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return getUnmodifiedTableSchemaFieldBuilder().getBuilder(); + } + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getUnmodifiedTableSchemaOrBuilder() { + if (unmodifiedTableSchemaBuilder_ != null) { + return unmodifiedTableSchemaBuilder_.getMessageOrBuilder(); + } else { + return unmodifiedTableSchema_; + } + } + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder> + getUnmodifiedTableSchemaFieldBuilder() { + if (unmodifiedTableSchemaBuilder_ == null) { + unmodifiedTableSchemaBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder>( + unmodifiedTableSchema_, + getParentForChildren(), + isClean()); + unmodifiedTableSchema_ = null; + } + return unmodifiedTableSchemaBuilder_; + } + + // @@protoc_insertion_point(builder_scope:ModifyColumnFamilyStateData) + } + + static { + defaultInstance = new ModifyColumnFamilyStateData(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:ModifyColumnFamilyStateData) + } + + public interface DeleteColumnFamilyStateDataOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .UserInformation user_info = 1; + /** + * required .UserInformation user_info = 1; + */ + boolean hasUserInfo(); + /** + * required .UserInformation user_info = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo(); + /** + * required .UserInformation user_info = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder(); + + // required .TableName table_name = 2; + /** + * required .TableName table_name = 2; + */ + boolean hasTableName(); + /** + * required .TableName table_name = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName(); + /** + * required .TableName table_name = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder(); + + // required bytes columnfamily_name = 3; + /** + * required bytes columnfamily_name = 3; + */ + boolean hasColumnfamilyName(); + /** + * required bytes columnfamily_name = 3; + */ + com.google.protobuf.ByteString getColumnfamilyName(); + + // optional .TableSchema unmodified_table_schema = 4; + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + boolean hasUnmodifiedTableSchema(); + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getUnmodifiedTableSchema(); + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getUnmodifiedTableSchemaOrBuilder(); + } + /** + * Protobuf type {@code DeleteColumnFamilyStateData} + */ + public static final class DeleteColumnFamilyStateData extends + com.google.protobuf.GeneratedMessage + implements DeleteColumnFamilyStateDataOrBuilder { + // Use DeleteColumnFamilyStateData.newBuilder() to construct. + private DeleteColumnFamilyStateData(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private DeleteColumnFamilyStateData(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final DeleteColumnFamilyStateData defaultInstance; + public static DeleteColumnFamilyStateData getDefaultInstance() { + return defaultInstance; + } + + public DeleteColumnFamilyStateData getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private DeleteColumnFamilyStateData( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = userInfo_.toBuilder(); + } + userInfo_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(userInfo_); + userInfo_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 18: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = null; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + subBuilder = tableName_.toBuilder(); + } + tableName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(tableName_); + tableName_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000002; + break; + } + case 26: { + bitField0_ |= 0x00000004; + columnfamilyName_ = input.readBytes(); + break; + } + case 34: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder subBuilder = null; + if (((bitField0_ & 0x00000008) == 0x00000008)) { + subBuilder = unmodifiedTableSchema_.toBuilder(); + } + unmodifiedTableSchema_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(unmodifiedTableSchema_); + unmodifiedTableSchema_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000008; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_DeleteColumnFamilyStateData_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_DeleteColumnFamilyStateData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyStateData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyStateData.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public DeleteColumnFamilyStateData parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new DeleteColumnFamilyStateData(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .UserInformation user_info = 1; + public static final int USER_INFO_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation userInfo_; + /** + * required .UserInformation user_info = 1; + */ + public boolean hasUserInfo() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo() { + return userInfo_; + } + /** + * required .UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder() { + return userInfo_; + } + + // required .TableName table_name = 2; + public static final int TABLE_NAME_FIELD_NUMBER = 2; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_; + /** + * required .TableName table_name = 2; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + return tableName_; + } + /** + * required .TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { + return tableName_; + } + + // required bytes columnfamily_name = 3; + public static final int COLUMNFAMILY_NAME_FIELD_NUMBER = 3; + private com.google.protobuf.ByteString columnfamilyName_; + /** + * required bytes columnfamily_name = 3; + */ + public boolean hasColumnfamilyName() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required bytes columnfamily_name = 3; + */ + public com.google.protobuf.ByteString getColumnfamilyName() { + return columnfamilyName_; + } + + // optional .TableSchema unmodified_table_schema = 4; + public static final int UNMODIFIED_TABLE_SCHEMA_FIELD_NUMBER = 4; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema unmodifiedTableSchema_; + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + public boolean hasUnmodifiedTableSchema() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getUnmodifiedTableSchema() { + return unmodifiedTableSchema_; + } + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getUnmodifiedTableSchemaOrBuilder() { + return unmodifiedTableSchema_; + } + + private void initFields() { + userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance(); + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + columnfamilyName_ = com.google.protobuf.ByteString.EMPTY; + unmodifiedTableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasUserInfo()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasTableName()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasColumnfamilyName()) { + memoizedIsInitialized = 0; + return false; + } + if (!getUserInfo().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + if (!getTableName().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + if (hasUnmodifiedTableSchema()) { + if (!getUnmodifiedTableSchema().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, userInfo_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(2, tableName_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeBytes(3, columnfamilyName_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeMessage(4, unmodifiedTableSchema_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, userInfo_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, tableName_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(3, columnfamilyName_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(4, unmodifiedTableSchema_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyStateData)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyStateData other = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyStateData) obj; + + boolean result = true; + result = result && (hasUserInfo() == other.hasUserInfo()); + if (hasUserInfo()) { + result = result && getUserInfo() + .equals(other.getUserInfo()); + } + result = result && (hasTableName() == other.hasTableName()); + if (hasTableName()) { + result = result && getTableName() + .equals(other.getTableName()); + } + result = result && (hasColumnfamilyName() == other.hasColumnfamilyName()); + if (hasColumnfamilyName()) { + result = result && getColumnfamilyName() + .equals(other.getColumnfamilyName()); + } + result = result && (hasUnmodifiedTableSchema() == other.hasUnmodifiedTableSchema()); + if (hasUnmodifiedTableSchema()) { + result = result && getUnmodifiedTableSchema() + .equals(other.getUnmodifiedTableSchema()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasUserInfo()) { + hash = (37 * hash) + USER_INFO_FIELD_NUMBER; + hash = (53 * hash) + getUserInfo().hashCode(); + } + if (hasTableName()) { + hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER; + hash = (53 * hash) + getTableName().hashCode(); + } + if (hasColumnfamilyName()) { + hash = (37 * hash) + COLUMNFAMILY_NAME_FIELD_NUMBER; + hash = (53 * hash) + getColumnfamilyName().hashCode(); + } + if (hasUnmodifiedTableSchema()) { + hash = (37 * hash) + UNMODIFIED_TABLE_SCHEMA_FIELD_NUMBER; + hash = (53 * hash) + getUnmodifiedTableSchema().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyStateData parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyStateData parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyStateData parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyStateData parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyStateData parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyStateData parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyStateData parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyStateData parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyStateData parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyStateData parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyStateData prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code DeleteColumnFamilyStateData} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyStateDataOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_DeleteColumnFamilyStateData_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_DeleteColumnFamilyStateData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyStateData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyStateData.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyStateData.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getUserInfoFieldBuilder(); + getTableNameFieldBuilder(); + getUnmodifiedTableSchemaFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (userInfoBuilder_ == null) { + userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance(); + } else { + userInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + } else { + tableNameBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + columnfamilyName_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000004); + if (unmodifiedTableSchemaBuilder_ == null) { + unmodifiedTableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); + } else { + unmodifiedTableSchemaBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000008); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_DeleteColumnFamilyStateData_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyStateData getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyStateData.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyStateData build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyStateData result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyStateData buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyStateData result = new org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyStateData(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (userInfoBuilder_ == null) { + result.userInfo_ = userInfo_; + } else { + result.userInfo_ = userInfoBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + if (tableNameBuilder_ == null) { + result.tableName_ = tableName_; + } else { + result.tableName_ = tableNameBuilder_.build(); + } + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.columnfamilyName_ = columnfamilyName_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + if (unmodifiedTableSchemaBuilder_ == null) { + result.unmodifiedTableSchema_ = unmodifiedTableSchema_; + } else { + result.unmodifiedTableSchema_ = unmodifiedTableSchemaBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyStateData) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyStateData)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyStateData other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyStateData.getDefaultInstance()) return this; + if (other.hasUserInfo()) { + mergeUserInfo(other.getUserInfo()); + } + if (other.hasTableName()) { + mergeTableName(other.getTableName()); + } + if (other.hasColumnfamilyName()) { + setColumnfamilyName(other.getColumnfamilyName()); + } + if (other.hasUnmodifiedTableSchema()) { + mergeUnmodifiedTableSchema(other.getUnmodifiedTableSchema()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasUserInfo()) { + + return false; + } + if (!hasTableName()) { + + return false; + } + if (!hasColumnfamilyName()) { + + return false; + } + if (!getUserInfo().isInitialized()) { + + return false; + } + if (!getTableName().isInitialized()) { + + return false; + } + if (hasUnmodifiedTableSchema()) { + if (!getUnmodifiedTableSchema().isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyStateData parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyStateData) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .UserInformation user_info = 1; + private org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder> userInfoBuilder_; + /** + * required .UserInformation user_info = 1; + */ + public boolean hasUserInfo() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo() { + if (userInfoBuilder_ == null) { + return userInfo_; + } else { + return userInfoBuilder_.getMessage(); + } + } + /** + * required .UserInformation user_info = 1; + */ + public Builder setUserInfo(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation value) { + if (userInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + userInfo_ = value; + onChanged(); + } else { + userInfoBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .UserInformation user_info = 1; + */ + public Builder setUserInfo( + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder builderForValue) { + if (userInfoBuilder_ == null) { + userInfo_ = builderForValue.build(); + onChanged(); + } else { + userInfoBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .UserInformation user_info = 1; + */ + public Builder mergeUserInfo(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation value) { + if (userInfoBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + userInfo_ != org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance()) { + userInfo_ = + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.newBuilder(userInfo_).mergeFrom(value).buildPartial(); + } else { + userInfo_ = value; + } + onChanged(); + } else { + userInfoBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .UserInformation user_info = 1; + */ + public Builder clearUserInfo() { + if (userInfoBuilder_ == null) { + userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance(); + onChanged(); + } else { + userInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder getUserInfoBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getUserInfoFieldBuilder().getBuilder(); + } + /** + * required .UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder() { + if (userInfoBuilder_ != null) { + return userInfoBuilder_.getMessageOrBuilder(); + } else { + return userInfo_; + } + } + /** + * required .UserInformation user_info = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder> + getUserInfoFieldBuilder() { + if (userInfoBuilder_ == null) { + userInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder>( + userInfo_, + getParentForChildren(), + isClean()); + userInfo_ = null; + } + return userInfoBuilder_; + } + + // required .TableName table_name = 2; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_; + /** + * required .TableName table_name = 2; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + if (tableNameBuilder_ == null) { + return tableName_; + } else { + return tableNameBuilder_.getMessage(); + } + } + /** + * required .TableName table_name = 2; + */ + public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + tableName_ = value; + onChanged(); + } else { + tableNameBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .TableName table_name = 2; + */ + public Builder setTableName( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + tableName_ = builderForValue.build(); + onChanged(); + } else { + tableNameBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .TableName table_name = 2; + */ + public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + tableName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) { + tableName_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial(); + } else { + tableName_ = value; + } + onChanged(); + } else { + tableNameBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .TableName table_name = 2; + */ + public Builder clearTableName() { + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + onChanged(); + } else { + tableNameBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + /** + * required .TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getTableNameFieldBuilder().getBuilder(); + } + /** + * required .TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { + if (tableNameBuilder_ != null) { + return tableNameBuilder_.getMessageOrBuilder(); + } else { + return tableName_; + } + } + /** + * required .TableName table_name = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTableNameFieldBuilder() { + if (tableNameBuilder_ == null) { + tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + tableName_, + getParentForChildren(), + isClean()); + tableName_ = null; + } + return tableNameBuilder_; + } + + // required bytes columnfamily_name = 3; + private com.google.protobuf.ByteString columnfamilyName_ = com.google.protobuf.ByteString.EMPTY; + /** + * required bytes columnfamily_name = 3; + */ + public boolean hasColumnfamilyName() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required bytes columnfamily_name = 3; + */ + public com.google.protobuf.ByteString getColumnfamilyName() { + return columnfamilyName_; + } + /** + * required bytes columnfamily_name = 3; + */ + public Builder setColumnfamilyName(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + columnfamilyName_ = value; + onChanged(); + return this; + } + /** + * required bytes columnfamily_name = 3; + */ + public Builder clearColumnfamilyName() { + bitField0_ = (bitField0_ & ~0x00000004); + columnfamilyName_ = getDefaultInstance().getColumnfamilyName(); + onChanged(); + return this; + } + + // optional .TableSchema unmodified_table_schema = 4; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema unmodifiedTableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder> unmodifiedTableSchemaBuilder_; + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + public boolean hasUnmodifiedTableSchema() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getUnmodifiedTableSchema() { + if (unmodifiedTableSchemaBuilder_ == null) { + return unmodifiedTableSchema_; + } else { + return unmodifiedTableSchemaBuilder_.getMessage(); + } + } + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + public Builder setUnmodifiedTableSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) { + if (unmodifiedTableSchemaBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + unmodifiedTableSchema_ = value; + onChanged(); + } else { + unmodifiedTableSchemaBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + return this; + } + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + public Builder setUnmodifiedTableSchema( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder builderForValue) { + if (unmodifiedTableSchemaBuilder_ == null) { + unmodifiedTableSchema_ = builderForValue.build(); + onChanged(); + } else { + unmodifiedTableSchemaBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + return this; + } + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + public Builder mergeUnmodifiedTableSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) { + if (unmodifiedTableSchemaBuilder_ == null) { + if (((bitField0_ & 0x00000008) == 0x00000008) && + unmodifiedTableSchema_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance()) { + unmodifiedTableSchema_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.newBuilder(unmodifiedTableSchema_).mergeFrom(value).buildPartial(); + } else { + unmodifiedTableSchema_ = value; + } + onChanged(); + } else { + unmodifiedTableSchemaBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000008; + return this; + } + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + public Builder clearUnmodifiedTableSchema() { + if (unmodifiedTableSchemaBuilder_ == null) { + unmodifiedTableSchema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); + onChanged(); + } else { + unmodifiedTableSchemaBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000008); + return this; + } + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder getUnmodifiedTableSchemaBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return getUnmodifiedTableSchemaFieldBuilder().getBuilder(); + } + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getUnmodifiedTableSchemaOrBuilder() { + if (unmodifiedTableSchemaBuilder_ != null) { + return unmodifiedTableSchemaBuilder_.getMessageOrBuilder(); + } else { + return unmodifiedTableSchema_; + } + } + /** + * optional .TableSchema unmodified_table_schema = 4; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder> + getUnmodifiedTableSchemaFieldBuilder() { + if (unmodifiedTableSchemaBuilder_ == null) { + unmodifiedTableSchemaBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder>( + unmodifiedTableSchema_, + getParentForChildren(), + isClean()); + unmodifiedTableSchema_ = null; + } + return unmodifiedTableSchemaBuilder_; + } + + // @@protoc_insertion_point(builder_scope:DeleteColumnFamilyStateData) + } + + static { + defaultInstance = new DeleteColumnFamilyStateData(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:DeleteColumnFamilyStateData) + } + + public interface EnableTableStateDataOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .UserInformation user_info = 1; + /** + * required .UserInformation user_info = 1; + */ + boolean hasUserInfo(); + /** + * required .UserInformation user_info = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo(); + /** + * required .UserInformation user_info = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder(); + + // required .TableName table_name = 2; + /** + * required .TableName table_name = 2; + */ + boolean hasTableName(); + /** + * required .TableName table_name = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName(); + /** + * required .TableName table_name = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder(); + + // required bool skip_table_state_check = 3; + /** + * required bool skip_table_state_check = 3; + */ + boolean hasSkipTableStateCheck(); + /** + * required bool skip_table_state_check = 3; + */ + boolean getSkipTableStateCheck(); + } + /** + * Protobuf type {@code EnableTableStateData} + */ + public static final class EnableTableStateData extends + com.google.protobuf.GeneratedMessage + implements EnableTableStateDataOrBuilder { + // Use EnableTableStateData.newBuilder() to construct. + private EnableTableStateData(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private EnableTableStateData(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final EnableTableStateData defaultInstance; + public static EnableTableStateData getDefaultInstance() { + return defaultInstance; + } + + public EnableTableStateData getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private EnableTableStateData( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = userInfo_.toBuilder(); + } + userInfo_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(userInfo_); + userInfo_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 18: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = null; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + subBuilder = tableName_.toBuilder(); + } + tableName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(tableName_); + tableName_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000002; + break; + } + case 24: { + bitField0_ |= 0x00000004; + skipTableStateCheck_ = input.readBool(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_EnableTableStateData_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_EnableTableStateData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public EnableTableStateData parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new EnableTableStateData(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .UserInformation user_info = 1; + public static final int USER_INFO_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation userInfo_; + /** + * required .UserInformation user_info = 1; + */ + public boolean hasUserInfo() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo() { + return userInfo_; + } + /** + * required .UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder() { + return userInfo_; + } + + // required .TableName table_name = 2; + public static final int TABLE_NAME_FIELD_NUMBER = 2; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_; + /** + * required .TableName table_name = 2; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + return tableName_; + } + /** + * required .TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { + return tableName_; + } + + // required bool skip_table_state_check = 3; + public static final int SKIP_TABLE_STATE_CHECK_FIELD_NUMBER = 3; + private boolean skipTableStateCheck_; + /** + * required bool skip_table_state_check = 3; + */ + public boolean hasSkipTableStateCheck() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required bool skip_table_state_check = 3; + */ + public boolean getSkipTableStateCheck() { + return skipTableStateCheck_; + } + + private void initFields() { + userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance(); + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + skipTableStateCheck_ = false; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasUserInfo()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasTableName()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasSkipTableStateCheck()) { + memoizedIsInitialized = 0; + return false; + } + if (!getUserInfo().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + if (!getTableName().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, userInfo_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(2, tableName_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeBool(3, skipTableStateCheck_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, userInfo_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, tableName_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(3, skipTableStateCheck_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData other = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData) obj; + + boolean result = true; + result = result && (hasUserInfo() == other.hasUserInfo()); + if (hasUserInfo()) { + result = result && getUserInfo() + .equals(other.getUserInfo()); + } + result = result && (hasTableName() == other.hasTableName()); + if (hasTableName()) { + result = result && getTableName() + .equals(other.getTableName()); + } + result = result && (hasSkipTableStateCheck() == other.hasSkipTableStateCheck()); + if (hasSkipTableStateCheck()) { + result = result && (getSkipTableStateCheck() + == other.getSkipTableStateCheck()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasUserInfo()) { + hash = (37 * hash) + USER_INFO_FIELD_NUMBER; + hash = (53 * hash) + getUserInfo().hashCode(); + } + if (hasTableName()) { + hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER; + hash = (53 * hash) + getTableName().hashCode(); + } + if (hasSkipTableStateCheck()) { + hash = (37 * hash) + SKIP_TABLE_STATE_CHECK_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getSkipTableStateCheck()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code EnableTableStateData} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateDataOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_EnableTableStateData_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_EnableTableStateData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getUserInfoFieldBuilder(); + getTableNameFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (userInfoBuilder_ == null) { + userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance(); + } else { + userInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + } else { + tableNameBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + skipTableStateCheck_ = false; + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_EnableTableStateData_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData result = new org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (userInfoBuilder_ == null) { + result.userInfo_ = userInfo_; + } else { + result.userInfo_ = userInfoBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + if (tableNameBuilder_ == null) { + result.tableName_ = tableName_; + } else { + result.tableName_ = tableNameBuilder_.build(); + } + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.skipTableStateCheck_ = skipTableStateCheck_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData.getDefaultInstance()) return this; + if (other.hasUserInfo()) { + mergeUserInfo(other.getUserInfo()); + } + if (other.hasTableName()) { + mergeTableName(other.getTableName()); + } + if (other.hasSkipTableStateCheck()) { + setSkipTableStateCheck(other.getSkipTableStateCheck()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasUserInfo()) { + + return false; + } + if (!hasTableName()) { + + return false; + } + if (!hasSkipTableStateCheck()) { + + return false; + } + if (!getUserInfo().isInitialized()) { + + return false; + } + if (!getTableName().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableStateData) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .UserInformation user_info = 1; + private org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder> userInfoBuilder_; + /** + * required .UserInformation user_info = 1; + */ + public boolean hasUserInfo() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo() { + if (userInfoBuilder_ == null) { + return userInfo_; + } else { + return userInfoBuilder_.getMessage(); + } + } + /** + * required .UserInformation user_info = 1; + */ + public Builder setUserInfo(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation value) { + if (userInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + userInfo_ = value; + onChanged(); + } else { + userInfoBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .UserInformation user_info = 1; + */ + public Builder setUserInfo( + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder builderForValue) { + if (userInfoBuilder_ == null) { + userInfo_ = builderForValue.build(); + onChanged(); + } else { + userInfoBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .UserInformation user_info = 1; + */ + public Builder mergeUserInfo(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation value) { + if (userInfoBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + userInfo_ != org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance()) { + userInfo_ = + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.newBuilder(userInfo_).mergeFrom(value).buildPartial(); + } else { + userInfo_ = value; + } + onChanged(); + } else { + userInfoBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .UserInformation user_info = 1; + */ + public Builder clearUserInfo() { + if (userInfoBuilder_ == null) { + userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance(); + onChanged(); + } else { + userInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder getUserInfoBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getUserInfoFieldBuilder().getBuilder(); + } + /** + * required .UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder() { + if (userInfoBuilder_ != null) { + return userInfoBuilder_.getMessageOrBuilder(); + } else { + return userInfo_; + } + } + /** + * required .UserInformation user_info = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder> + getUserInfoFieldBuilder() { + if (userInfoBuilder_ == null) { + userInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder>( + userInfo_, + getParentForChildren(), + isClean()); + userInfo_ = null; + } + return userInfoBuilder_; + } + + // required .TableName table_name = 2; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_; + /** + * required .TableName table_name = 2; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + if (tableNameBuilder_ == null) { + return tableName_; + } else { + return tableNameBuilder_.getMessage(); + } + } + /** + * required .TableName table_name = 2; + */ + public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + tableName_ = value; + onChanged(); + } else { + tableNameBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .TableName table_name = 2; + */ + public Builder setTableName( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + tableName_ = builderForValue.build(); + onChanged(); + } else { + tableNameBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .TableName table_name = 2; + */ + public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + tableName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) { + tableName_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial(); + } else { + tableName_ = value; + } + onChanged(); + } else { + tableNameBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .TableName table_name = 2; + */ + public Builder clearTableName() { + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + onChanged(); + } else { + tableNameBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + /** + * required .TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getTableNameFieldBuilder().getBuilder(); + } + /** + * required .TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { + if (tableNameBuilder_ != null) { + return tableNameBuilder_.getMessageOrBuilder(); + } else { + return tableName_; + } + } + /** + * required .TableName table_name = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTableNameFieldBuilder() { + if (tableNameBuilder_ == null) { + tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + tableName_, + getParentForChildren(), + isClean()); + tableName_ = null; + } + return tableNameBuilder_; + } + + // required bool skip_table_state_check = 3; + private boolean skipTableStateCheck_ ; + /** + * required bool skip_table_state_check = 3; + */ + public boolean hasSkipTableStateCheck() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required bool skip_table_state_check = 3; + */ + public boolean getSkipTableStateCheck() { + return skipTableStateCheck_; + } + /** + * required bool skip_table_state_check = 3; + */ + public Builder setSkipTableStateCheck(boolean value) { + bitField0_ |= 0x00000004; + skipTableStateCheck_ = value; + onChanged(); + return this; + } + /** + * required bool skip_table_state_check = 3; + */ + public Builder clearSkipTableStateCheck() { + bitField0_ = (bitField0_ & ~0x00000004); + skipTableStateCheck_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:EnableTableStateData) + } + + static { + defaultInstance = new EnableTableStateData(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:EnableTableStateData) + } + + public interface DisableTableStateDataOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .UserInformation user_info = 1; + /** + * required .UserInformation user_info = 1; + */ + boolean hasUserInfo(); + /** + * required .UserInformation user_info = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo(); + /** + * required .UserInformation user_info = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder(); + + // required .TableName table_name = 2; + /** + * required .TableName table_name = 2; + */ + boolean hasTableName(); + /** + * required .TableName table_name = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName(); + /** + * required .TableName table_name = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder(); + + // required bool skip_table_state_check = 3; + /** + * required bool skip_table_state_check = 3; + */ + boolean hasSkipTableStateCheck(); + /** + * required bool skip_table_state_check = 3; + */ + boolean getSkipTableStateCheck(); + } + /** + * Protobuf type {@code DisableTableStateData} + */ + public static final class DisableTableStateData extends + com.google.protobuf.GeneratedMessage + implements DisableTableStateDataOrBuilder { + // Use DisableTableStateData.newBuilder() to construct. + private DisableTableStateData(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private DisableTableStateData(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final DisableTableStateData defaultInstance; + public static DisableTableStateData getDefaultInstance() { + return defaultInstance; + } + + public DisableTableStateData getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private DisableTableStateData( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = userInfo_.toBuilder(); + } + userInfo_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(userInfo_); + userInfo_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 18: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = null; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + subBuilder = tableName_.toBuilder(); + } + tableName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(tableName_); + tableName_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000002; + break; + } + case 24: { + bitField0_ |= 0x00000004; + skipTableStateCheck_ = input.readBool(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_DisableTableStateData_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_DisableTableStateData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public DisableTableStateData parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new DisableTableStateData(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .UserInformation user_info = 1; + public static final int USER_INFO_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation userInfo_; + /** + * required .UserInformation user_info = 1; + */ + public boolean hasUserInfo() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo() { + return userInfo_; + } + /** + * required .UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder() { + return userInfo_; + } + + // required .TableName table_name = 2; + public static final int TABLE_NAME_FIELD_NUMBER = 2; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_; + /** + * required .TableName table_name = 2; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + return tableName_; + } + /** + * required .TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { + return tableName_; + } + + // required bool skip_table_state_check = 3; + public static final int SKIP_TABLE_STATE_CHECK_FIELD_NUMBER = 3; + private boolean skipTableStateCheck_; + /** + * required bool skip_table_state_check = 3; + */ + public boolean hasSkipTableStateCheck() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required bool skip_table_state_check = 3; + */ + public boolean getSkipTableStateCheck() { + return skipTableStateCheck_; + } + + private void initFields() { + userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance(); + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + skipTableStateCheck_ = false; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasUserInfo()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasTableName()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasSkipTableStateCheck()) { + memoizedIsInitialized = 0; + return false; + } + if (!getUserInfo().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + if (!getTableName().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, userInfo_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(2, tableName_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeBool(3, skipTableStateCheck_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, userInfo_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, tableName_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(3, skipTableStateCheck_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData other = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData) obj; + + boolean result = true; + result = result && (hasUserInfo() == other.hasUserInfo()); + if (hasUserInfo()) { + result = result && getUserInfo() + .equals(other.getUserInfo()); + } + result = result && (hasTableName() == other.hasTableName()); + if (hasTableName()) { + result = result && getTableName() + .equals(other.getTableName()); + } + result = result && (hasSkipTableStateCheck() == other.hasSkipTableStateCheck()); + if (hasSkipTableStateCheck()) { + result = result && (getSkipTableStateCheck() + == other.getSkipTableStateCheck()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasUserInfo()) { + hash = (37 * hash) + USER_INFO_FIELD_NUMBER; + hash = (53 * hash) + getUserInfo().hashCode(); + } + if (hasTableName()) { + hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER; + hash = (53 * hash) + getTableName().hashCode(); + } + if (hasSkipTableStateCheck()) { + hash = (37 * hash) + SKIP_TABLE_STATE_CHECK_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getSkipTableStateCheck()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code DisableTableStateData} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateDataOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_DisableTableStateData_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_DisableTableStateData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData.class, org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getUserInfoFieldBuilder(); + getTableNameFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (userInfoBuilder_ == null) { + userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance(); + } else { + userInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + } else { + tableNameBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + skipTableStateCheck_ = false; + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.internal_static_DisableTableStateData_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData result = new org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (userInfoBuilder_ == null) { + result.userInfo_ = userInfo_; + } else { + result.userInfo_ = userInfoBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + if (tableNameBuilder_ == null) { + result.tableName_ = tableName_; + } else { + result.tableName_ = tableNameBuilder_.build(); + } + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.skipTableStateCheck_ = skipTableStateCheck_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData.getDefaultInstance()) return this; + if (other.hasUserInfo()) { + mergeUserInfo(other.getUserInfo()); + } + if (other.hasTableName()) { + mergeTableName(other.getTableName()); + } + if (other.hasSkipTableStateCheck()) { + setSkipTableStateCheck(other.getSkipTableStateCheck()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasUserInfo()) { + + return false; + } + if (!hasTableName()) { + + return false; + } + if (!hasSkipTableStateCheck()) { + + return false; + } + if (!getUserInfo().isInitialized()) { + + return false; + } + if (!getTableName().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableStateData) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .UserInformation user_info = 1; + private org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder> userInfoBuilder_; + /** + * required .UserInformation user_info = 1; + */ + public boolean hasUserInfo() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation getUserInfo() { + if (userInfoBuilder_ == null) { + return userInfo_; + } else { + return userInfoBuilder_.getMessage(); + } + } + /** + * required .UserInformation user_info = 1; + */ + public Builder setUserInfo(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation value) { + if (userInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + userInfo_ = value; + onChanged(); + } else { + userInfoBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .UserInformation user_info = 1; + */ + public Builder setUserInfo( + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder builderForValue) { + if (userInfoBuilder_ == null) { + userInfo_ = builderForValue.build(); + onChanged(); + } else { + userInfoBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .UserInformation user_info = 1; + */ + public Builder mergeUserInfo(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation value) { + if (userInfoBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + userInfo_ != org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance()) { + userInfo_ = + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.newBuilder(userInfo_).mergeFrom(value).buildPartial(); + } else { + userInfo_ = value; + } + onChanged(); + } else { + userInfoBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .UserInformation user_info = 1; + */ + public Builder clearUserInfo() { + if (userInfoBuilder_ == null) { + userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance(); + onChanged(); + } else { + userInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder getUserInfoBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getUserInfoFieldBuilder().getBuilder(); + } + /** + * required .UserInformation user_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder() { + if (userInfoBuilder_ != null) { + return userInfoBuilder_.getMessageOrBuilder(); + } else { + return userInfo_; + } + } + /** + * required .UserInformation user_info = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder> + getUserInfoFieldBuilder() { + if (userInfoBuilder_ == null) { + userInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformationOrBuilder>( + userInfo_, + getParentForChildren(), + isClean()); + userInfo_ = null; + } + return userInfoBuilder_; + } + + // required .TableName table_name = 2; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_; + /** + * required .TableName table_name = 2; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + if (tableNameBuilder_ == null) { + return tableName_; + } else { + return tableNameBuilder_.getMessage(); + } + } + /** + * required .TableName table_name = 2; + */ + public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + tableName_ = value; + onChanged(); + } else { + tableNameBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .TableName table_name = 2; + */ + public Builder setTableName( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + tableName_ = builderForValue.build(); + onChanged(); + } else { + tableNameBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .TableName table_name = 2; + */ + public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + tableName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) { + tableName_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial(); + } else { + tableName_ = value; + } + onChanged(); + } else { + tableNameBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .TableName table_name = 2; + */ + public Builder clearTableName() { + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + onChanged(); + } else { + tableNameBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + /** + * required .TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getTableNameFieldBuilder().getBuilder(); + } + /** + * required .TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { + if (tableNameBuilder_ != null) { + return tableNameBuilder_.getMessageOrBuilder(); + } else { + return tableName_; + } + } + /** + * required .TableName table_name = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTableNameFieldBuilder() { + if (tableNameBuilder_ == null) { + tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + tableName_, + getParentForChildren(), + isClean()); + tableName_ = null; + } + return tableNameBuilder_; + } + + // required bool skip_table_state_check = 3; + private boolean skipTableStateCheck_ ; + /** + * required bool skip_table_state_check = 3; + */ + public boolean hasSkipTableStateCheck() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required bool skip_table_state_check = 3; + */ + public boolean getSkipTableStateCheck() { + return skipTableStateCheck_; + } + /** + * required bool skip_table_state_check = 3; + */ + public Builder setSkipTableStateCheck(boolean value) { + bitField0_ |= 0x00000004; + skipTableStateCheck_ = value; + onChanged(); + return this; + } + /** + * required bool skip_table_state_check = 3; + */ + public Builder clearSkipTableStateCheck() { + bitField0_ = (bitField0_ & ~0x00000004); + skipTableStateCheck_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:DisableTableStateData) + } + + static { + defaultInstance = new DisableTableStateData(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:DisableTableStateData) + } + + private static com.google.protobuf.Descriptors.Descriptor + internal_static_CreateTableStateData_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_CreateTableStateData_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_ModifyTableStateData_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_ModifyTableStateData_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_TruncateTableStateData_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_TruncateTableStateData_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_DeleteTableStateData_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_DeleteTableStateData_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_AddColumnFamilyStateData_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_AddColumnFamilyStateData_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_ModifyColumnFamilyStateData_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_ModifyColumnFamilyStateData_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_DeleteColumnFamilyStateData_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_DeleteColumnFamilyStateData_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_EnableTableStateData_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_EnableTableStateData_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_DisableTableStateData_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_DisableTableStateData_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n\025MasterProcedure.proto\032\013HBase.proto\032\tRP" + + "C.proto\"\201\001\n\024CreateTableStateData\022#\n\tuser" + + "_info\030\001 \002(\0132\020.UserInformation\022\"\n\014table_s" + + "chema\030\002 \002(\0132\014.TableSchema\022 \n\013region_info" + + "\030\003 \003(\0132\013.RegionInfo\"\277\001\n\024ModifyTableState" + + "Data\022#\n\tuser_info\030\001 \002(\0132\020.UserInformatio" + + "n\022-\n\027unmodified_table_schema\030\002 \001(\0132\014.Tab" + + "leSchema\022+\n\025modified_table_schema\030\003 \002(\0132" + + "\014.TableSchema\022&\n\036delete_column_family_in" + + "_modify\030\004 \002(\010\"\274\001\n\026TruncateTableStateData", + "\022#\n\tuser_info\030\001 \002(\0132\020.UserInformation\022\027\n" + + "\017preserve_splits\030\002 \002(\010\022\036\n\ntable_name\030\003 \001" + + "(\0132\n.TableName\022\"\n\014table_schema\030\004 \001(\0132\014.T" + + "ableSchema\022 \n\013region_info\030\005 \003(\0132\013.Region" + + "Info\"}\n\024DeleteTableStateData\022#\n\tuser_inf" + + "o\030\001 \002(\0132\020.UserInformation\022\036\n\ntable_name\030" + + "\002 \002(\0132\n.TableName\022 \n\013region_info\030\003 \003(\0132\013" + + ".RegionInfo\"\300\001\n\030AddColumnFamilyStateData" + + "\022#\n\tuser_info\030\001 \002(\0132\020.UserInformation\022\036\n" + + "\ntable_name\030\002 \002(\0132\n.TableName\0220\n\023columnf", + "amily_schema\030\003 \002(\0132\023.ColumnFamilySchema\022" + + "-\n\027unmodified_table_schema\030\004 \001(\0132\014.Table" + + "Schema\"\303\001\n\033ModifyColumnFamilyStateData\022#" + + "\n\tuser_info\030\001 \002(\0132\020.UserInformation\022\036\n\nt" + + "able_name\030\002 \002(\0132\n.TableName\0220\n\023columnfam" + + "ily_schema\030\003 \002(\0132\023.ColumnFamilySchema\022-\n" + + "\027unmodified_table_schema\030\004 \001(\0132\014.TableSc" + + "hema\"\254\001\n\033DeleteColumnFamilyStateData\022#\n\t" + + "user_info\030\001 \002(\0132\020.UserInformation\022\036\n\ntab" + + "le_name\030\002 \002(\0132\n.TableName\022\031\n\021columnfamil", + "y_name\030\003 \002(\014\022-\n\027unmodified_table_schema\030" + + "\004 \001(\0132\014.TableSchema\"{\n\024EnableTableStateD" + + "ata\022#\n\tuser_info\030\001 \002(\0132\020.UserInformation" + + "\022\036\n\ntable_name\030\002 \002(\0132\n.TableName\022\036\n\026skip" + + "_table_state_check\030\003 \002(\010\"|\n\025DisableTable" + + "StateData\022#\n\tuser_info\030\001 \002(\0132\020.UserInfor" + + "mation\022\036\n\ntable_name\030\002 \002(\0132\n.TableName\022\036" + + "\n\026skip_table_state_check\030\003 \002(\010*\330\001\n\020Creat" + + "eTableState\022\036\n\032CREATE_TABLE_PRE_OPERATIO" + + "N\020\001\022 \n\034CREATE_TABLE_WRITE_FS_LAYOUT\020\002\022\034\n", + "\030CREATE_TABLE_ADD_TO_META\020\003\022\037\n\033CREATE_TA" + + "BLE_ASSIGN_REGIONS\020\004\022\"\n\036CREATE_TABLE_UPD" + + "ATE_DESC_CACHE\020\005\022\037\n\033CREATE_TABLE_POST_OP" + + "ERATION\020\006*\207\002\n\020ModifyTableState\022\030\n\024MODIFY" + + "_TABLE_PREPARE\020\001\022\036\n\032MODIFY_TABLE_PRE_OPE" + + "RATION\020\002\022(\n$MODIFY_TABLE_UPDATE_TABLE_DE" + + "SCRIPTOR\020\003\022&\n\"MODIFY_TABLE_REMOVE_REPLIC" + + "A_COLUMN\020\004\022!\n\035MODIFY_TABLE_DELETE_FS_LAY" + + "OUT\020\005\022\037\n\033MODIFY_TABLE_POST_OPERATION\020\006\022#" + + "\n\037MODIFY_TABLE_REOPEN_ALL_REGIONS\020\007*\212\002\n\022", + "TruncateTableState\022 \n\034TRUNCATE_TABLE_PRE" + + "_OPERATION\020\001\022#\n\037TRUNCATE_TABLE_REMOVE_FR" + + "OM_META\020\002\022\"\n\036TRUNCATE_TABLE_CLEAR_FS_LAY" + + "OUT\020\003\022#\n\037TRUNCATE_TABLE_CREATE_FS_LAYOUT" + + "\020\004\022\036\n\032TRUNCATE_TABLE_ADD_TO_META\020\005\022!\n\035TR" + + "UNCATE_TABLE_ASSIGN_REGIONS\020\006\022!\n\035TRUNCAT" + + "E_TABLE_POST_OPERATION\020\007*\337\001\n\020DeleteTable" + + "State\022\036\n\032DELETE_TABLE_PRE_OPERATION\020\001\022!\n" + + "\035DELETE_TABLE_REMOVE_FROM_META\020\002\022 \n\034DELE" + + "TE_TABLE_CLEAR_FS_LAYOUT\020\003\022\"\n\036DELETE_TAB", + "LE_UPDATE_DESC_CACHE\020\004\022!\n\035DELETE_TABLE_U" + + "NASSIGN_REGIONS\020\005\022\037\n\033DELETE_TABLE_POST_O" + + "PERATION\020\006*\331\001\n\024AddColumnFamilyState\022\035\n\031A" + + "DD_COLUMN_FAMILY_PREPARE\020\001\022#\n\037ADD_COLUMN" + + "_FAMILY_PRE_OPERATION\020\002\022-\n)ADD_COLUMN_FA" + + "MILY_UPDATE_TABLE_DESCRIPTOR\020\003\022$\n ADD_CO" + + "LUMN_FAMILY_POST_OPERATION\020\004\022(\n$ADD_COLU" + + "MN_FAMILY_REOPEN_ALL_REGIONS\020\005*\353\001\n\027Modif" + + "yColumnFamilyState\022 \n\034MODIFY_COLUMN_FAMI" + + "LY_PREPARE\020\001\022&\n\"MODIFY_COLUMN_FAMILY_PRE", + "_OPERATION\020\002\0220\n,MODIFY_COLUMN_FAMILY_UPD" + + "ATE_TABLE_DESCRIPTOR\020\003\022\'\n#MODIFY_COLUMN_" + + "FAMILY_POST_OPERATION\020\004\022+\n\'MODIFY_COLUMN" + + "_FAMILY_REOPEN_ALL_REGIONS\020\005*\226\002\n\027DeleteC" + + "olumnFamilyState\022 \n\034DELETE_COLUMN_FAMILY" + + "_PREPARE\020\001\022&\n\"DELETE_COLUMN_FAMILY_PRE_O" + + "PERATION\020\002\0220\n,DELETE_COLUMN_FAMILY_UPDAT" + + "E_TABLE_DESCRIPTOR\020\003\022)\n%DELETE_COLUMN_FA" + + "MILY_DELETE_FS_LAYOUT\020\004\022\'\n#DELETE_COLUMN" + + "_FAMILY_POST_OPERATION\020\005\022+\n\'DELETE_COLUM", + "N_FAMILY_REOPEN_ALL_REGIONS\020\006*\350\001\n\020Enable" + + "TableState\022\030\n\024ENABLE_TABLE_PREPARE\020\001\022\036\n\032" + + "ENABLE_TABLE_PRE_OPERATION\020\002\022)\n%ENABLE_T" + + "ABLE_SET_ENABLING_TABLE_STATE\020\003\022$\n ENABL" + + "E_TABLE_MARK_REGIONS_ONLINE\020\004\022(\n$ENABLE_" + + "TABLE_SET_ENABLED_TABLE_STATE\020\005\022\037\n\033ENABL" + + "E_TABLE_POST_OPERATION\020\006*\362\001\n\021DisableTabl" + + "eState\022\031\n\025DISABLE_TABLE_PREPARE\020\001\022\037\n\033DIS" + + "ABLE_TABLE_PRE_OPERATION\020\002\022+\n\'DISABLE_TA" + + "BLE_SET_DISABLING_TABLE_STATE\020\003\022&\n\"DISAB", + "LE_TABLE_MARK_REGIONS_OFFLINE\020\004\022*\n&DISAB" + + "LE_TABLE_SET_DISABLED_TABLE_STATE\020\005\022 \n\034D" + + "ISABLE_TABLE_POST_OPERATION\020\006BK\n*org.apa" + + "che.hadoop.hbase.protobuf.generatedB\025Mas" + + "terProcedureProtosH\001\210\001\001\240\001\001" + }; + com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = + new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { + public com.google.protobuf.ExtensionRegistry assignDescriptors( + com.google.protobuf.Descriptors.FileDescriptor root) { + descriptor = root; + internal_static_CreateTableStateData_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_CreateTableStateData_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_CreateTableStateData_descriptor, + new java.lang.String[] { "UserInfo", "TableSchema", "RegionInfo", }); + internal_static_ModifyTableStateData_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_ModifyTableStateData_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_ModifyTableStateData_descriptor, + new java.lang.String[] { "UserInfo", "UnmodifiedTableSchema", "ModifiedTableSchema", "DeleteColumnFamilyInModify", }); + internal_static_TruncateTableStateData_descriptor = + getDescriptor().getMessageTypes().get(2); + internal_static_TruncateTableStateData_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_TruncateTableStateData_descriptor, + new java.lang.String[] { "UserInfo", "PreserveSplits", "TableName", "TableSchema", "RegionInfo", }); + internal_static_DeleteTableStateData_descriptor = + getDescriptor().getMessageTypes().get(3); + internal_static_DeleteTableStateData_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_DeleteTableStateData_descriptor, + new java.lang.String[] { "UserInfo", "TableName", "RegionInfo", }); + internal_static_AddColumnFamilyStateData_descriptor = + getDescriptor().getMessageTypes().get(4); + internal_static_AddColumnFamilyStateData_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_AddColumnFamilyStateData_descriptor, + new java.lang.String[] { "UserInfo", "TableName", "ColumnfamilySchema", "UnmodifiedTableSchema", }); + internal_static_ModifyColumnFamilyStateData_descriptor = + getDescriptor().getMessageTypes().get(5); + internal_static_ModifyColumnFamilyStateData_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_ModifyColumnFamilyStateData_descriptor, + new java.lang.String[] { "UserInfo", "TableName", "ColumnfamilySchema", "UnmodifiedTableSchema", }); + internal_static_DeleteColumnFamilyStateData_descriptor = + getDescriptor().getMessageTypes().get(6); + internal_static_DeleteColumnFamilyStateData_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_DeleteColumnFamilyStateData_descriptor, + new java.lang.String[] { "UserInfo", "TableName", "ColumnfamilyName", "UnmodifiedTableSchema", }); + internal_static_EnableTableStateData_descriptor = + getDescriptor().getMessageTypes().get(7); + internal_static_EnableTableStateData_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_EnableTableStateData_descriptor, + new java.lang.String[] { "UserInfo", "TableName", "SkipTableStateCheck", }); + internal_static_DisableTableStateData_descriptor = + getDescriptor().getMessageTypes().get(8); + internal_static_DisableTableStateData_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_DisableTableStateData_descriptor, + new java.lang.String[] { "UserInfo", "TableName", "SkipTableStateCheck", }); + return null; + } + }; + com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.getDescriptor(), + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.getDescriptor(), + }, assigner); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java index a0213f2e94e..463f82f6494 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java @@ -9074,6 +9074,16 @@ public final class MasterProtos { public interface CreateTableResponseOrBuilder extends com.google.protobuf.MessageOrBuilder { + + // optional uint64 proc_id = 1; + /** + * optional uint64 proc_id = 1; + */ + boolean hasProcId(); + /** + * optional uint64 proc_id = 1; + */ + long getProcId(); } /** * Protobuf type {@code CreateTableResponse} @@ -9108,6 +9118,7 @@ public final class MasterProtos { com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); + int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { @@ -9125,6 +9136,11 @@ public final class MasterProtos { } break; } + case 8: { + bitField0_ |= 0x00000001; + procId_ = input.readUInt64(); + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -9164,7 +9180,25 @@ public final class MasterProtos { return PARSER; } + private int bitField0_; + // optional uint64 proc_id = 1; + public static final int PROC_ID_FIELD_NUMBER = 1; + private long procId_; + /** + * optional uint64 proc_id = 1; + */ + public boolean hasProcId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional uint64 proc_id = 1; + */ + public long getProcId() { + return procId_; + } + private void initFields() { + procId_ = 0L; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -9178,6 +9212,9 @@ public final class MasterProtos { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeUInt64(1, procId_); + } getUnknownFields().writeTo(output); } @@ -9187,6 +9224,10 @@ public final class MasterProtos { if (size != -1) return size; size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(1, procId_); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -9210,6 +9251,11 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableResponse) obj; boolean result = true; + result = result && (hasProcId() == other.hasProcId()); + if (hasProcId()) { + result = result && (getProcId() + == other.getProcId()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -9223,6 +9269,10 @@ public final class MasterProtos { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasProcId()) { + hash = (37 * hash) + PROC_ID_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getProcId()); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -9332,6 +9382,8 @@ public final class MasterProtos { public Builder clear() { super.clear(); + procId_ = 0L; + bitField0_ = (bitField0_ & ~0x00000001); return this; } @@ -9358,6 +9410,13 @@ public final class MasterProtos { public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableResponse buildPartial() { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.procId_ = procId_; + result.bitField0_ = to_bitField0_; onBuilt(); return result; } @@ -9373,6 +9432,9 @@ public final class MasterProtos { public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableResponse other) { if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableResponse.getDefaultInstance()) return this; + if (other.hasProcId()) { + setProcId(other.getProcId()); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -9398,6 +9460,40 @@ public final class MasterProtos { } return this; } + private int bitField0_; + + // optional uint64 proc_id = 1; + private long procId_ ; + /** + * optional uint64 proc_id = 1; + */ + public boolean hasProcId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional uint64 proc_id = 1; + */ + public long getProcId() { + return procId_; + } + /** + * optional uint64 proc_id = 1; + */ + public Builder setProcId(long value) { + bitField0_ |= 0x00000001; + procId_ = value; + onChanged(); + return this; + } + /** + * optional uint64 proc_id = 1; + */ + public Builder clearProcId() { + bitField0_ = (bitField0_ & ~0x00000001); + procId_ = 0L; + onChanged(); + return this; + } // @@protoc_insertion_point(builder_scope:CreateTableResponse) } @@ -9973,6 +10069,16 @@ public final class MasterProtos { public interface DeleteTableResponseOrBuilder extends com.google.protobuf.MessageOrBuilder { + + // optional uint64 proc_id = 1; + /** + * optional uint64 proc_id = 1; + */ + boolean hasProcId(); + /** + * optional uint64 proc_id = 1; + */ + long getProcId(); } /** * Protobuf type {@code DeleteTableResponse} @@ -10007,6 +10113,7 @@ public final class MasterProtos { com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); + int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { @@ -10024,6 +10131,11 @@ public final class MasterProtos { } break; } + case 8: { + bitField0_ |= 0x00000001; + procId_ = input.readUInt64(); + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -10063,7 +10175,25 @@ public final class MasterProtos { return PARSER; } + private int bitField0_; + // optional uint64 proc_id = 1; + public static final int PROC_ID_FIELD_NUMBER = 1; + private long procId_; + /** + * optional uint64 proc_id = 1; + */ + public boolean hasProcId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional uint64 proc_id = 1; + */ + public long getProcId() { + return procId_; + } + private void initFields() { + procId_ = 0L; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -10077,6 +10207,9 @@ public final class MasterProtos { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeUInt64(1, procId_); + } getUnknownFields().writeTo(output); } @@ -10086,6 +10219,10 @@ public final class MasterProtos { if (size != -1) return size; size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(1, procId_); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -10109,6 +10246,11 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableResponse) obj; boolean result = true; + result = result && (hasProcId() == other.hasProcId()); + if (hasProcId()) { + result = result && (getProcId() + == other.getProcId()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -10122,6 +10264,10 @@ public final class MasterProtos { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasProcId()) { + hash = (37 * hash) + PROC_ID_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getProcId()); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -10231,6 +10377,8 @@ public final class MasterProtos { public Builder clear() { super.clear(); + procId_ = 0L; + bitField0_ = (bitField0_ & ~0x00000001); return this; } @@ -10257,6 +10405,13 @@ public final class MasterProtos { public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableResponse buildPartial() { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.procId_ = procId_; + result.bitField0_ = to_bitField0_; onBuilt(); return result; } @@ -10272,6 +10427,9 @@ public final class MasterProtos { public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableResponse other) { if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableResponse.getDefaultInstance()) return this; + if (other.hasProcId()) { + setProcId(other.getProcId()); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -10297,6 +10455,40 @@ public final class MasterProtos { } return this; } + private int bitField0_; + + // optional uint64 proc_id = 1; + private long procId_ ; + /** + * optional uint64 proc_id = 1; + */ + public boolean hasProcId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional uint64 proc_id = 1; + */ + public long getProcId() { + return procId_; + } + /** + * optional uint64 proc_id = 1; + */ + public Builder setProcId(long value) { + bitField0_ |= 0x00000001; + procId_ = value; + onChanged(); + return this; + } + /** + * optional uint64 proc_id = 1; + */ + public Builder clearProcId() { + bitField0_ = (bitField0_ & ~0x00000001); + procId_ = 0L; + onChanged(); + return this; + } // @@protoc_insertion_point(builder_scope:DeleteTableResponse) } @@ -11861,6 +12053,16 @@ public final class MasterProtos { public interface EnableTableResponseOrBuilder extends com.google.protobuf.MessageOrBuilder { + + // optional uint64 proc_id = 1; + /** + * optional uint64 proc_id = 1; + */ + boolean hasProcId(); + /** + * optional uint64 proc_id = 1; + */ + long getProcId(); } /** * Protobuf type {@code EnableTableResponse} @@ -11895,6 +12097,7 @@ public final class MasterProtos { com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); + int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { @@ -11912,6 +12115,11 @@ public final class MasterProtos { } break; } + case 8: { + bitField0_ |= 0x00000001; + procId_ = input.readUInt64(); + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -11951,7 +12159,25 @@ public final class MasterProtos { return PARSER; } + private int bitField0_; + // optional uint64 proc_id = 1; + public static final int PROC_ID_FIELD_NUMBER = 1; + private long procId_; + /** + * optional uint64 proc_id = 1; + */ + public boolean hasProcId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional uint64 proc_id = 1; + */ + public long getProcId() { + return procId_; + } + private void initFields() { + procId_ = 0L; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -11965,6 +12191,9 @@ public final class MasterProtos { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeUInt64(1, procId_); + } getUnknownFields().writeTo(output); } @@ -11974,6 +12203,10 @@ public final class MasterProtos { if (size != -1) return size; size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(1, procId_); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -11997,6 +12230,11 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableResponse) obj; boolean result = true; + result = result && (hasProcId() == other.hasProcId()); + if (hasProcId()) { + result = result && (getProcId() + == other.getProcId()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -12010,6 +12248,10 @@ public final class MasterProtos { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasProcId()) { + hash = (37 * hash) + PROC_ID_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getProcId()); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -12119,6 +12361,8 @@ public final class MasterProtos { public Builder clear() { super.clear(); + procId_ = 0L; + bitField0_ = (bitField0_ & ~0x00000001); return this; } @@ -12145,6 +12389,13 @@ public final class MasterProtos { public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableResponse buildPartial() { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.procId_ = procId_; + result.bitField0_ = to_bitField0_; onBuilt(); return result; } @@ -12160,6 +12411,9 @@ public final class MasterProtos { public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableResponse other) { if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableResponse.getDefaultInstance()) return this; + if (other.hasProcId()) { + setProcId(other.getProcId()); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -12185,6 +12439,40 @@ public final class MasterProtos { } return this; } + private int bitField0_; + + // optional uint64 proc_id = 1; + private long procId_ ; + /** + * optional uint64 proc_id = 1; + */ + public boolean hasProcId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional uint64 proc_id = 1; + */ + public long getProcId() { + return procId_; + } + /** + * optional uint64 proc_id = 1; + */ + public Builder setProcId(long value) { + bitField0_ |= 0x00000001; + procId_ = value; + onChanged(); + return this; + } + /** + * optional uint64 proc_id = 1; + */ + public Builder clearProcId() { + bitField0_ = (bitField0_ & ~0x00000001); + procId_ = 0L; + onChanged(); + return this; + } // @@protoc_insertion_point(builder_scope:EnableTableResponse) } @@ -12760,6 +13048,16 @@ public final class MasterProtos { public interface DisableTableResponseOrBuilder extends com.google.protobuf.MessageOrBuilder { + + // optional uint64 proc_id = 1; + /** + * optional uint64 proc_id = 1; + */ + boolean hasProcId(); + /** + * optional uint64 proc_id = 1; + */ + long getProcId(); } /** * Protobuf type {@code DisableTableResponse} @@ -12794,6 +13092,7 @@ public final class MasterProtos { com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { initFields(); + int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { @@ -12811,6 +13110,11 @@ public final class MasterProtos { } break; } + case 8: { + bitField0_ |= 0x00000001; + procId_ = input.readUInt64(); + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -12850,7 +13154,25 @@ public final class MasterProtos { return PARSER; } + private int bitField0_; + // optional uint64 proc_id = 1; + public static final int PROC_ID_FIELD_NUMBER = 1; + private long procId_; + /** + * optional uint64 proc_id = 1; + */ + public boolean hasProcId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional uint64 proc_id = 1; + */ + public long getProcId() { + return procId_; + } + private void initFields() { + procId_ = 0L; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -12864,6 +13186,9 @@ public final class MasterProtos { public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeUInt64(1, procId_); + } getUnknownFields().writeTo(output); } @@ -12873,6 +13198,10 @@ public final class MasterProtos { if (size != -1) return size; size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(1, procId_); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -12896,6 +13225,11 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableResponse) obj; boolean result = true; + result = result && (hasProcId() == other.hasProcId()); + if (hasProcId()) { + result = result && (getProcId() + == other.getProcId()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -12909,6 +13243,10 @@ public final class MasterProtos { } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasProcId()) { + hash = (37 * hash) + PROC_ID_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getProcId()); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -13018,6 +13356,8 @@ public final class MasterProtos { public Builder clear() { super.clear(); + procId_ = 0L; + bitField0_ = (bitField0_ & ~0x00000001); return this; } @@ -13044,6 +13384,13 @@ public final class MasterProtos { public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableResponse buildPartial() { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.procId_ = procId_; + result.bitField0_ = to_bitField0_; onBuilt(); return result; } @@ -13059,6 +13406,9 @@ public final class MasterProtos { public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableResponse other) { if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableResponse.getDefaultInstance()) return this; + if (other.hasProcId()) { + setProcId(other.getProcId()); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -13084,6 +13434,40 @@ public final class MasterProtos { } return this; } + private int bitField0_; + + // optional uint64 proc_id = 1; + private long procId_ ; + /** + * optional uint64 proc_id = 1; + */ + public boolean hasProcId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional uint64 proc_id = 1; + */ + public long getProcId() { + return procId_; + } + /** + * optional uint64 proc_id = 1; + */ + public Builder setProcId(long value) { + bitField0_ |= 0x00000001; + procId_ = value; + onChanged(); + return this; + } + /** + * optional uint64 proc_id = 1; + */ + public Builder clearProcId() { + bitField0_ = (bitField0_ & ~0x00000001); + procId_ = 0L; + onChanged(); + return this; + } // @@protoc_insertion_point(builder_scope:DisableTableResponse) } @@ -24576,6 +24960,786 @@ public final class MasterProtos { // @@protoc_insertion_point(class_scope:SetBalancerRunningResponse) } + public interface IsBalancerEnabledRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + /** + * Protobuf type {@code IsBalancerEnabledRequest} + */ + public static final class IsBalancerEnabledRequest extends + com.google.protobuf.GeneratedMessage + implements IsBalancerEnabledRequestOrBuilder { + // Use IsBalancerEnabledRequest.newBuilder() to construct. + private IsBalancerEnabledRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private IsBalancerEnabledRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final IsBalancerEnabledRequest defaultInstance; + public static IsBalancerEnabledRequest getDefaultInstance() { + return defaultInstance; + } + + public IsBalancerEnabledRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private IsBalancerEnabledRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_IsBalancerEnabledRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_IsBalancerEnabledRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public IsBalancerEnabledRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new IsBalancerEnabledRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code IsBalancerEnabledRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_IsBalancerEnabledRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_IsBalancerEnabledRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_IsBalancerEnabledRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + // @@protoc_insertion_point(builder_scope:IsBalancerEnabledRequest) + } + + static { + defaultInstance = new IsBalancerEnabledRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:IsBalancerEnabledRequest) + } + + public interface IsBalancerEnabledResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required bool enabled = 1; + /** + * required bool enabled = 1; + */ + boolean hasEnabled(); + /** + * required bool enabled = 1; + */ + boolean getEnabled(); + } + /** + * Protobuf type {@code IsBalancerEnabledResponse} + */ + public static final class IsBalancerEnabledResponse extends + com.google.protobuf.GeneratedMessage + implements IsBalancerEnabledResponseOrBuilder { + // Use IsBalancerEnabledResponse.newBuilder() to construct. + private IsBalancerEnabledResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private IsBalancerEnabledResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final IsBalancerEnabledResponse defaultInstance; + public static IsBalancerEnabledResponse getDefaultInstance() { + return defaultInstance; + } + + public IsBalancerEnabledResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private IsBalancerEnabledResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + enabled_ = input.readBool(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_IsBalancerEnabledResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_IsBalancerEnabledResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public IsBalancerEnabledResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new IsBalancerEnabledResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required bool enabled = 1; + public static final int ENABLED_FIELD_NUMBER = 1; + private boolean enabled_; + /** + * required bool enabled = 1; + */ + public boolean hasEnabled() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bool enabled = 1; + */ + public boolean getEnabled() { + return enabled_; + } + + private void initFields() { + enabled_ = false; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasEnabled()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBool(1, enabled_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(1, enabled_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse) obj; + + boolean result = true; + result = result && (hasEnabled() == other.hasEnabled()); + if (hasEnabled()) { + result = result && (getEnabled() + == other.getEnabled()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasEnabled()) { + hash = (37 * hash) + ENABLED_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getEnabled()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code IsBalancerEnabledResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_IsBalancerEnabledResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_IsBalancerEnabledResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + enabled_ = false; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_IsBalancerEnabledResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.enabled_ = enabled_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse.getDefaultInstance()) return this; + if (other.hasEnabled()) { + setEnabled(other.getEnabled()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasEnabled()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required bool enabled = 1; + private boolean enabled_ ; + /** + * required bool enabled = 1; + */ + public boolean hasEnabled() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bool enabled = 1; + */ + public boolean getEnabled() { + return enabled_; + } + /** + * required bool enabled = 1; + */ + public Builder setEnabled(boolean value) { + bitField0_ |= 0x00000001; + enabled_ = value; + onChanged(); + return this; + } + /** + * required bool enabled = 1; + */ + public Builder clearEnabled() { + bitField0_ = (bitField0_ & ~0x00000001); + enabled_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:IsBalancerEnabledResponse) + } + + static { + defaultInstance = new IsBalancerEnabledResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:IsBalancerEnabledResponse) + } + public interface RunCatalogScanRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { } @@ -42597,6 +43761,1464 @@ public final class MasterProtos { // @@protoc_insertion_point(class_scope:IsProcedureDoneResponse) } + public interface GetProcedureResultRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required uint64 proc_id = 1; + /** + * required uint64 proc_id = 1; + */ + boolean hasProcId(); + /** + * required uint64 proc_id = 1; + */ + long getProcId(); + } + /** + * Protobuf type {@code GetProcedureResultRequest} + */ + public static final class GetProcedureResultRequest extends + com.google.protobuf.GeneratedMessage + implements GetProcedureResultRequestOrBuilder { + // Use GetProcedureResultRequest.newBuilder() to construct. + private GetProcedureResultRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private GetProcedureResultRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final GetProcedureResultRequest defaultInstance; + public static GetProcedureResultRequest getDefaultInstance() { + return defaultInstance; + } + + public GetProcedureResultRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private GetProcedureResultRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + procId_ = input.readUInt64(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetProcedureResultRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetProcedureResultRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetProcedureResultRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new GetProcedureResultRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required uint64 proc_id = 1; + public static final int PROC_ID_FIELD_NUMBER = 1; + private long procId_; + /** + * required uint64 proc_id = 1; + */ + public boolean hasProcId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required uint64 proc_id = 1; + */ + public long getProcId() { + return procId_; + } + + private void initFields() { + procId_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasProcId()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeUInt64(1, procId_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(1, procId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest) obj; + + boolean result = true; + result = result && (hasProcId() == other.hasProcId()); + if (hasProcId()) { + result = result && (getProcId() + == other.getProcId()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasProcId()) { + hash = (37 * hash) + PROC_ID_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getProcId()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code GetProcedureResultRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetProcedureResultRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetProcedureResultRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + procId_ = 0L; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetProcedureResultRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.procId_ = procId_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.getDefaultInstance()) return this; + if (other.hasProcId()) { + setProcId(other.getProcId()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasProcId()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required uint64 proc_id = 1; + private long procId_ ; + /** + * required uint64 proc_id = 1; + */ + public boolean hasProcId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required uint64 proc_id = 1; + */ + public long getProcId() { + return procId_; + } + /** + * required uint64 proc_id = 1; + */ + public Builder setProcId(long value) { + bitField0_ |= 0x00000001; + procId_ = value; + onChanged(); + return this; + } + /** + * required uint64 proc_id = 1; + */ + public Builder clearProcId() { + bitField0_ = (bitField0_ & ~0x00000001); + procId_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:GetProcedureResultRequest) + } + + static { + defaultInstance = new GetProcedureResultRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:GetProcedureResultRequest) + } + + public interface GetProcedureResultResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .GetProcedureResultResponse.State state = 1; + /** + * required .GetProcedureResultResponse.State state = 1; + */ + boolean hasState(); + /** + * required .GetProcedureResultResponse.State state = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State getState(); + + // optional uint64 start_time = 2; + /** + * optional uint64 start_time = 2; + */ + boolean hasStartTime(); + /** + * optional uint64 start_time = 2; + */ + long getStartTime(); + + // optional uint64 last_update = 3; + /** + * optional uint64 last_update = 3; + */ + boolean hasLastUpdate(); + /** + * optional uint64 last_update = 3; + */ + long getLastUpdate(); + + // optional bytes result = 4; + /** + * optional bytes result = 4; + */ + boolean hasResult(); + /** + * optional bytes result = 4; + */ + com.google.protobuf.ByteString getResult(); + + // optional .ForeignExceptionMessage exception = 5; + /** + * optional .ForeignExceptionMessage exception = 5; + */ + boolean hasException(); + /** + * optional .ForeignExceptionMessage exception = 5; + */ + org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage getException(); + /** + * optional .ForeignExceptionMessage exception = 5; + */ + org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessageOrBuilder getExceptionOrBuilder(); + } + /** + * Protobuf type {@code GetProcedureResultResponse} + */ + public static final class GetProcedureResultResponse extends + com.google.protobuf.GeneratedMessage + implements GetProcedureResultResponseOrBuilder { + // Use GetProcedureResultResponse.newBuilder() to construct. + private GetProcedureResultResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private GetProcedureResultResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final GetProcedureResultResponse defaultInstance; + public static GetProcedureResultResponse getDefaultInstance() { + return defaultInstance; + } + + public GetProcedureResultResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private GetProcedureResultResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + int rawValue = input.readEnum(); + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State value = org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(1, rawValue); + } else { + bitField0_ |= 0x00000001; + state_ = value; + } + break; + } + case 16: { + bitField0_ |= 0x00000002; + startTime_ = input.readUInt64(); + break; + } + case 24: { + bitField0_ |= 0x00000004; + lastUpdate_ = input.readUInt64(); + break; + } + case 34: { + bitField0_ |= 0x00000008; + result_ = input.readBytes(); + break; + } + case 42: { + org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.Builder subBuilder = null; + if (((bitField0_ & 0x00000010) == 0x00000010)) { + subBuilder = exception_.toBuilder(); + } + exception_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(exception_); + exception_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000010; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetProcedureResultResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetProcedureResultResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetProcedureResultResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new GetProcedureResultResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + /** + * Protobuf enum {@code GetProcedureResultResponse.State} + */ + public enum State + implements com.google.protobuf.ProtocolMessageEnum { + /** + * NOT_FOUND = 0; + */ + NOT_FOUND(0, 0), + /** + * RUNNING = 1; + */ + RUNNING(1, 1), + /** + * FINISHED = 2; + */ + FINISHED(2, 2), + ; + + /** + * NOT_FOUND = 0; + */ + public static final int NOT_FOUND_VALUE = 0; + /** + * RUNNING = 1; + */ + public static final int RUNNING_VALUE = 1; + /** + * FINISHED = 2; + */ + public static final int FINISHED_VALUE = 2; + + + public final int getNumber() { return value; } + + public static State valueOf(int value) { + switch (value) { + case 0: return NOT_FOUND; + case 1: return RUNNING; + case 2: return FINISHED; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public State findValueByNumber(int number) { + return State.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDescriptor().getEnumTypes().get(0); + } + + private static final State[] VALUES = values(); + + public static State valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private State(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:GetProcedureResultResponse.State) + } + + private int bitField0_; + // required .GetProcedureResultResponse.State state = 1; + public static final int STATE_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State state_; + /** + * required .GetProcedureResultResponse.State state = 1; + */ + public boolean hasState() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .GetProcedureResultResponse.State state = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State getState() { + return state_; + } + + // optional uint64 start_time = 2; + public static final int START_TIME_FIELD_NUMBER = 2; + private long startTime_; + /** + * optional uint64 start_time = 2; + */ + public boolean hasStartTime() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional uint64 start_time = 2; + */ + public long getStartTime() { + return startTime_; + } + + // optional uint64 last_update = 3; + public static final int LAST_UPDATE_FIELD_NUMBER = 3; + private long lastUpdate_; + /** + * optional uint64 last_update = 3; + */ + public boolean hasLastUpdate() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional uint64 last_update = 3; + */ + public long getLastUpdate() { + return lastUpdate_; + } + + // optional bytes result = 4; + public static final int RESULT_FIELD_NUMBER = 4; + private com.google.protobuf.ByteString result_; + /** + * optional bytes result = 4; + */ + public boolean hasResult() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional bytes result = 4; + */ + public com.google.protobuf.ByteString getResult() { + return result_; + } + + // optional .ForeignExceptionMessage exception = 5; + public static final int EXCEPTION_FIELD_NUMBER = 5; + private org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage exception_; + /** + * optional .ForeignExceptionMessage exception = 5; + */ + public boolean hasException() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional .ForeignExceptionMessage exception = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage getException() { + return exception_; + } + /** + * optional .ForeignExceptionMessage exception = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessageOrBuilder getExceptionOrBuilder() { + return exception_; + } + + private void initFields() { + state_ = org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State.NOT_FOUND; + startTime_ = 0L; + lastUpdate_ = 0L; + result_ = com.google.protobuf.ByteString.EMPTY; + exception_ = org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasState()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeEnum(1, state_.getNumber()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeUInt64(2, startTime_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeUInt64(3, lastUpdate_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeBytes(4, result_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeMessage(5, exception_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(1, state_.getNumber()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(2, startTime_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(3, lastUpdate_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(4, result_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(5, exception_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse) obj; + + boolean result = true; + result = result && (hasState() == other.hasState()); + if (hasState()) { + result = result && + (getState() == other.getState()); + } + result = result && (hasStartTime() == other.hasStartTime()); + if (hasStartTime()) { + result = result && (getStartTime() + == other.getStartTime()); + } + result = result && (hasLastUpdate() == other.hasLastUpdate()); + if (hasLastUpdate()) { + result = result && (getLastUpdate() + == other.getLastUpdate()); + } + result = result && (hasResult() == other.hasResult()); + if (hasResult()) { + result = result && getResult() + .equals(other.getResult()); + } + result = result && (hasException() == other.hasException()); + if (hasException()) { + result = result && getException() + .equals(other.getException()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasState()) { + hash = (37 * hash) + STATE_FIELD_NUMBER; + hash = (53 * hash) + hashEnum(getState()); + } + if (hasStartTime()) { + hash = (37 * hash) + START_TIME_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getStartTime()); + } + if (hasLastUpdate()) { + hash = (37 * hash) + LAST_UPDATE_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getLastUpdate()); + } + if (hasResult()) { + hash = (37 * hash) + RESULT_FIELD_NUMBER; + hash = (53 * hash) + getResult().hashCode(); + } + if (hasException()) { + hash = (37 * hash) + EXCEPTION_FIELD_NUMBER; + hash = (53 * hash) + getException().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code GetProcedureResultResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetProcedureResultResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetProcedureResultResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getExceptionFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + state_ = org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State.NOT_FOUND; + bitField0_ = (bitField0_ & ~0x00000001); + startTime_ = 0L; + bitField0_ = (bitField0_ & ~0x00000002); + lastUpdate_ = 0L; + bitField0_ = (bitField0_ & ~0x00000004); + result_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000008); + if (exceptionBuilder_ == null) { + exception_ = org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.getDefaultInstance(); + } else { + exceptionBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000010); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetProcedureResultResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.state_ = state_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.startTime_ = startTime_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.lastUpdate_ = lastUpdate_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + result.result_ = result_; + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000010; + } + if (exceptionBuilder_ == null) { + result.exception_ = exception_; + } else { + result.exception_ = exceptionBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance()) return this; + if (other.hasState()) { + setState(other.getState()); + } + if (other.hasStartTime()) { + setStartTime(other.getStartTime()); + } + if (other.hasLastUpdate()) { + setLastUpdate(other.getLastUpdate()); + } + if (other.hasResult()) { + setResult(other.getResult()); + } + if (other.hasException()) { + mergeException(other.getException()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasState()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .GetProcedureResultResponse.State state = 1; + private org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State state_ = org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State.NOT_FOUND; + /** + * required .GetProcedureResultResponse.State state = 1; + */ + public boolean hasState() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .GetProcedureResultResponse.State state = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State getState() { + return state_; + } + /** + * required .GetProcedureResultResponse.State state = 1; + */ + public Builder setState(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + state_ = value; + onChanged(); + return this; + } + /** + * required .GetProcedureResultResponse.State state = 1; + */ + public Builder clearState() { + bitField0_ = (bitField0_ & ~0x00000001); + state_ = org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.State.NOT_FOUND; + onChanged(); + return this; + } + + // optional uint64 start_time = 2; + private long startTime_ ; + /** + * optional uint64 start_time = 2; + */ + public boolean hasStartTime() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional uint64 start_time = 2; + */ + public long getStartTime() { + return startTime_; + } + /** + * optional uint64 start_time = 2; + */ + public Builder setStartTime(long value) { + bitField0_ |= 0x00000002; + startTime_ = value; + onChanged(); + return this; + } + /** + * optional uint64 start_time = 2; + */ + public Builder clearStartTime() { + bitField0_ = (bitField0_ & ~0x00000002); + startTime_ = 0L; + onChanged(); + return this; + } + + // optional uint64 last_update = 3; + private long lastUpdate_ ; + /** + * optional uint64 last_update = 3; + */ + public boolean hasLastUpdate() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional uint64 last_update = 3; + */ + public long getLastUpdate() { + return lastUpdate_; + } + /** + * optional uint64 last_update = 3; + */ + public Builder setLastUpdate(long value) { + bitField0_ |= 0x00000004; + lastUpdate_ = value; + onChanged(); + return this; + } + /** + * optional uint64 last_update = 3; + */ + public Builder clearLastUpdate() { + bitField0_ = (bitField0_ & ~0x00000004); + lastUpdate_ = 0L; + onChanged(); + return this; + } + + // optional bytes result = 4; + private com.google.protobuf.ByteString result_ = com.google.protobuf.ByteString.EMPTY; + /** + * optional bytes result = 4; + */ + public boolean hasResult() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional bytes result = 4; + */ + public com.google.protobuf.ByteString getResult() { + return result_; + } + /** + * optional bytes result = 4; + */ + public Builder setResult(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000008; + result_ = value; + onChanged(); + return this; + } + /** + * optional bytes result = 4; + */ + public Builder clearResult() { + bitField0_ = (bitField0_ & ~0x00000008); + result_ = getDefaultInstance().getResult(); + onChanged(); + return this; + } + + // optional .ForeignExceptionMessage exception = 5; + private org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage exception_ = org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage, org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.Builder, org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessageOrBuilder> exceptionBuilder_; + /** + * optional .ForeignExceptionMessage exception = 5; + */ + public boolean hasException() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional .ForeignExceptionMessage exception = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage getException() { + if (exceptionBuilder_ == null) { + return exception_; + } else { + return exceptionBuilder_.getMessage(); + } + } + /** + * optional .ForeignExceptionMessage exception = 5; + */ + public Builder setException(org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage value) { + if (exceptionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + exception_ = value; + onChanged(); + } else { + exceptionBuilder_.setMessage(value); + } + bitField0_ |= 0x00000010; + return this; + } + /** + * optional .ForeignExceptionMessage exception = 5; + */ + public Builder setException( + org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.Builder builderForValue) { + if (exceptionBuilder_ == null) { + exception_ = builderForValue.build(); + onChanged(); + } else { + exceptionBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000010; + return this; + } + /** + * optional .ForeignExceptionMessage exception = 5; + */ + public Builder mergeException(org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage value) { + if (exceptionBuilder_ == null) { + if (((bitField0_ & 0x00000010) == 0x00000010) && + exception_ != org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.getDefaultInstance()) { + exception_ = + org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.newBuilder(exception_).mergeFrom(value).buildPartial(); + } else { + exception_ = value; + } + onChanged(); + } else { + exceptionBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000010; + return this; + } + /** + * optional .ForeignExceptionMessage exception = 5; + */ + public Builder clearException() { + if (exceptionBuilder_ == null) { + exception_ = org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.getDefaultInstance(); + onChanged(); + } else { + exceptionBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000010); + return this; + } + /** + * optional .ForeignExceptionMessage exception = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.Builder getExceptionBuilder() { + bitField0_ |= 0x00000010; + onChanged(); + return getExceptionFieldBuilder().getBuilder(); + } + /** + * optional .ForeignExceptionMessage exception = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessageOrBuilder getExceptionOrBuilder() { + if (exceptionBuilder_ != null) { + return exceptionBuilder_.getMessageOrBuilder(); + } else { + return exception_; + } + } + /** + * optional .ForeignExceptionMessage exception = 5; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage, org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.Builder, org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessageOrBuilder> + getExceptionFieldBuilder() { + if (exceptionBuilder_ == null) { + exceptionBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage, org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.Builder, org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessageOrBuilder>( + exception_, + getParentForChildren(), + isClean()); + exception_ = null; + } + return exceptionBuilder_; + } + + // @@protoc_insertion_point(builder_scope:GetProcedureResultResponse) + } + + static { + defaultInstance = new GetProcedureResultResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:GetProcedureResultResponse) + } + public interface SetQuotaRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { @@ -46241,6 +48863,19 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest request, com.google.protobuf.RpcCallback done); + /** + * rpc IsBalancerEnabled(.IsBalancerEnabledRequest) returns (.IsBalancerEnabledResponse); + * + *
      +       **
      +       * Query whether the Region Balancer is running.
      +       * 
      + */ + public abstract void isBalancerEnabled( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest request, + com.google.protobuf.RpcCallback done); + /** * rpc RunCatalogScan(.RunCatalogScanRequest) returns (.RunCatalogScanResponse); * @@ -46542,6 +49177,14 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest request, com.google.protobuf.RpcCallback done); + /** + * rpc getProcedureResult(.GetProcedureResultRequest) returns (.GetProcedureResultResponse); + */ + public abstract void getProcedureResult( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest request, + com.google.protobuf.RpcCallback done); + } public static com.google.protobuf.Service newReflectiveService( @@ -46731,6 +49374,14 @@ public final class MasterProtos { impl.setBalancerRunning(controller, request, done); } + @java.lang.Override + public void isBalancerEnabled( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest request, + com.google.protobuf.RpcCallback done) { + impl.isBalancerEnabled(controller, request, done); + } + @java.lang.Override public void runCatalogScan( com.google.protobuf.RpcController controller, @@ -46923,6 +49574,14 @@ public final class MasterProtos { impl.getLastMajorCompactionTimestampForRegion(controller, request, done); } + @java.lang.Override + public void getProcedureResult( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest request, + com.google.protobuf.RpcCallback done) { + impl.getProcedureResult(controller, request, done); + } + }; } @@ -46992,53 +49651,57 @@ public final class MasterProtos { case 22: return impl.setBalancerRunning(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest)request); case 23: - return impl.runCatalogScan(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest)request); + return impl.isBalancerEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest)request); case 24: - return impl.enableCatalogJanitor(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest)request); + return impl.runCatalogScan(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest)request); case 25: - return impl.isCatalogJanitorEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest)request); + return impl.enableCatalogJanitor(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest)request); case 26: - return impl.execMasterService(controller, (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest)request); + return impl.isCatalogJanitorEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest)request); case 27: - return impl.snapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest)request); + return impl.execMasterService(controller, (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest)request); case 28: - return impl.getCompletedSnapshots(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest)request); + return impl.snapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest)request); case 29: - return impl.deleteSnapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest)request); + return impl.getCompletedSnapshots(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest)request); case 30: - return impl.isSnapshotDone(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest)request); + return impl.deleteSnapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest)request); case 31: - return impl.restoreSnapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest)request); + return impl.isSnapshotDone(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest)request); case 32: - return impl.isRestoreSnapshotDone(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneRequest)request); + return impl.restoreSnapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest)request); case 33: - return impl.execProcedure(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest)request); + return impl.isRestoreSnapshotDone(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneRequest)request); case 34: - return impl.execProcedureWithRet(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest)request); + return impl.execProcedure(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest)request); case 35: - return impl.isProcedureDone(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest)request); + return impl.execProcedureWithRet(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest)request); case 36: - return impl.modifyNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest)request); + return impl.isProcedureDone(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest)request); case 37: - return impl.createNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest)request); + return impl.modifyNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest)request); case 38: - return impl.deleteNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest)request); + return impl.createNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest)request); case 39: - return impl.getNamespaceDescriptor(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest)request); + return impl.deleteNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest)request); case 40: - return impl.listNamespaceDescriptors(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest)request); + return impl.getNamespaceDescriptor(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest)request); case 41: - return impl.listTableDescriptorsByNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest)request); + return impl.listNamespaceDescriptors(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest)request); case 42: - return impl.listTableNamesByNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest)request); + return impl.listTableDescriptorsByNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest)request); case 43: - return impl.getTableState(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest)request); + return impl.listTableNamesByNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest)request); case 44: - return impl.setQuota(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest)request); + return impl.getTableState(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest)request); case 45: - return impl.getLastMajorCompactionTimestamp(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest)request); + return impl.setQuota(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest)request); case 46: + return impl.getLastMajorCompactionTimestamp(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest)request); + case 47: return impl.getLastMajorCompactionTimestampForRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest)request); + case 48: + return impl.getProcedureResult(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest)request); default: throw new java.lang.AssertionError("Can't get here."); } @@ -47100,53 +49763,57 @@ public final class MasterProtos { case 22: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest.getDefaultInstance(); case 23: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest.getDefaultInstance(); case 24: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest.getDefaultInstance(); case 25: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest.getDefaultInstance(); case 26: - return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest.getDefaultInstance(); case 27: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest.getDefaultInstance(); case 28: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest.getDefaultInstance(); case 29: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest.getDefaultInstance(); case 30: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest.getDefaultInstance(); case 31: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest.getDefaultInstance(); case 32: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest.getDefaultInstance(); case 33: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneRequest.getDefaultInstance(); case 34: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance(); case 35: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance(); case 36: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest.getDefaultInstance(); case 37: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest.getDefaultInstance(); case 38: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest.getDefaultInstance(); case 39: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest.getDefaultInstance(); case 40: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest.getDefaultInstance(); case 41: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest.getDefaultInstance(); case 42: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest.getDefaultInstance(); case 43: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest.getDefaultInstance(); case 44: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance(); case 45: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.getDefaultInstance(); case 46: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.getDefaultInstance(); + case 47: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.getDefaultInstance(); + case 48: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -47208,53 +49875,57 @@ public final class MasterProtos { case 22: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse.getDefaultInstance(); case 23: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse.getDefaultInstance(); case 24: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse.getDefaultInstance(); case 25: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse.getDefaultInstance(); case 26: - return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance(); case 27: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse.getDefaultInstance(); case 28: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse.getDefaultInstance(); case 29: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse.getDefaultInstance(); case 30: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse.getDefaultInstance(); case 31: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse.getDefaultInstance(); case 32: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse.getDefaultInstance(); case 33: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneResponse.getDefaultInstance(); case 34: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance(); case 35: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance(); case 36: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefaultInstance(); case 37: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse.getDefaultInstance(); case 38: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse.getDefaultInstance(); case 39: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse.getDefaultInstance(); case 40: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse.getDefaultInstance(); case 41: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse.getDefaultInstance(); case 42: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse.getDefaultInstance(); case 43: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance(); case 44: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance(); case 45: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance(); case 46: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(); + case 47: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(); + case 48: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -47552,6 +50223,19 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest request, com.google.protobuf.RpcCallback done); + /** + * rpc IsBalancerEnabled(.IsBalancerEnabledRequest) returns (.IsBalancerEnabledResponse); + * + *
      +     **
      +     * Query whether the Region Balancer is running.
      +     * 
      + */ + public abstract void isBalancerEnabled( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest request, + com.google.protobuf.RpcCallback done); + /** * rpc RunCatalogScan(.RunCatalogScanRequest) returns (.RunCatalogScanResponse); * @@ -47853,6 +50537,14 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest request, com.google.protobuf.RpcCallback done); + /** + * rpc getProcedureResult(.GetProcedureResultRequest) returns (.GetProcedureResultResponse); + */ + public abstract void getProcedureResult( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest request, + com.google.protobuf.RpcCallback done); + public static final com.google.protobuf.Descriptors.ServiceDescriptor getDescriptor() { @@ -47991,125 +50683,135 @@ public final class MasterProtos { done)); return; case 23: + this.isBalancerEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 24: this.runCatalogScan(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 24: + case 25: this.enableCatalogJanitor(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 25: + case 26: this.isCatalogJanitorEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 26: + case 27: this.execMasterService(controller, (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 27: + case 28: this.snapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 28: + case 29: this.getCompletedSnapshots(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 29: + case 30: this.deleteSnapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 30: + case 31: this.isSnapshotDone(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 31: + case 32: this.restoreSnapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 32: + case 33: this.isRestoreSnapshotDone(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 33: + case 34: this.execProcedure(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 34: + case 35: this.execProcedureWithRet(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 35: + case 36: this.isProcedureDone(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 36: + case 37: this.modifyNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 37: + case 38: this.createNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 38: + case 39: this.deleteNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 39: + case 40: this.getNamespaceDescriptor(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 40: + case 41: this.listNamespaceDescriptors(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 41: + case 42: this.listTableDescriptorsByNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 42: + case 43: this.listTableNamesByNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 43: + case 44: this.getTableState(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 44: + case 45: this.setQuota(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 45: + case 46: this.getLastMajorCompactionTimestamp(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 46: + case 47: this.getLastMajorCompactionTimestampForRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; + case 48: + this.getProcedureResult(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; default: throw new java.lang.AssertionError("Can't get here."); } @@ -48171,53 +50873,57 @@ public final class MasterProtos { case 22: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest.getDefaultInstance(); case 23: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest.getDefaultInstance(); case 24: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest.getDefaultInstance(); case 25: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest.getDefaultInstance(); case 26: - return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest.getDefaultInstance(); case 27: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest.getDefaultInstance(); case 28: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest.getDefaultInstance(); case 29: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest.getDefaultInstance(); case 30: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest.getDefaultInstance(); case 31: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest.getDefaultInstance(); case 32: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest.getDefaultInstance(); case 33: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneRequest.getDefaultInstance(); case 34: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance(); case 35: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance(); case 36: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest.getDefaultInstance(); case 37: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest.getDefaultInstance(); case 38: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest.getDefaultInstance(); case 39: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest.getDefaultInstance(); case 40: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest.getDefaultInstance(); case 41: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest.getDefaultInstance(); case 42: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest.getDefaultInstance(); case 43: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest.getDefaultInstance(); case 44: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance(); case 45: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.getDefaultInstance(); case 46: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.getDefaultInstance(); + case 47: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.getDefaultInstance(); + case 48: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -48279,53 +50985,57 @@ public final class MasterProtos { case 22: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse.getDefaultInstance(); case 23: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse.getDefaultInstance(); case 24: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse.getDefaultInstance(); case 25: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse.getDefaultInstance(); case 26: - return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance(); case 27: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse.getDefaultInstance(); case 28: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse.getDefaultInstance(); case 29: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse.getDefaultInstance(); case 30: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse.getDefaultInstance(); case 31: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse.getDefaultInstance(); case 32: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse.getDefaultInstance(); case 33: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneResponse.getDefaultInstance(); case 34: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance(); case 35: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance(); case 36: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefaultInstance(); case 37: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse.getDefaultInstance(); case 38: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse.getDefaultInstance(); case 39: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse.getDefaultInstance(); case 40: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse.getDefaultInstance(); case 41: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse.getDefaultInstance(); case 42: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse.getDefaultInstance(); case 43: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance(); case 44: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance(); case 45: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance(); case 46: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(); + case 47: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(); + case 48: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -48692,12 +51402,27 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse.getDefaultInstance())); } + public void isBalancerEnabled( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(23), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse.getDefaultInstance())); + } + public void runCatalogScan( com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(23), + getDescriptor().getMethods().get(24), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse.getDefaultInstance(), @@ -48712,7 +51437,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(24), + getDescriptor().getMethods().get(25), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse.getDefaultInstance(), @@ -48727,7 +51452,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(25), + getDescriptor().getMethods().get(26), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance(), @@ -48742,7 +51467,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(26), + getDescriptor().getMethods().get(27), controller, request, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse.getDefaultInstance(), @@ -48757,7 +51482,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(27), + getDescriptor().getMethods().get(28), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse.getDefaultInstance(), @@ -48772,7 +51497,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(28), + getDescriptor().getMethods().get(29), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse.getDefaultInstance(), @@ -48787,7 +51512,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(29), + getDescriptor().getMethods().get(30), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse.getDefaultInstance(), @@ -48802,7 +51527,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(30), + getDescriptor().getMethods().get(31), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse.getDefaultInstance(), @@ -48817,7 +51542,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(31), + getDescriptor().getMethods().get(32), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse.getDefaultInstance(), @@ -48832,7 +51557,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(32), + getDescriptor().getMethods().get(33), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneResponse.getDefaultInstance(), @@ -48847,7 +51572,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(33), + getDescriptor().getMethods().get(34), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance(), @@ -48862,7 +51587,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(34), + getDescriptor().getMethods().get(35), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance(), @@ -48877,7 +51602,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(35), + getDescriptor().getMethods().get(36), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefaultInstance(), @@ -48892,7 +51617,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(36), + getDescriptor().getMethods().get(37), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse.getDefaultInstance(), @@ -48907,7 +51632,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(37), + getDescriptor().getMethods().get(38), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse.getDefaultInstance(), @@ -48922,7 +51647,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(38), + getDescriptor().getMethods().get(39), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse.getDefaultInstance(), @@ -48937,7 +51662,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(39), + getDescriptor().getMethods().get(40), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse.getDefaultInstance(), @@ -48952,7 +51677,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(40), + getDescriptor().getMethods().get(41), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse.getDefaultInstance(), @@ -48967,7 +51692,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(41), + getDescriptor().getMethods().get(42), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse.getDefaultInstance(), @@ -48982,7 +51707,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(42), + getDescriptor().getMethods().get(43), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance(), @@ -48997,7 +51722,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(43), + getDescriptor().getMethods().get(44), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance(), @@ -49012,7 +51737,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(44), + getDescriptor().getMethods().get(45), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance(), @@ -49027,7 +51752,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(45), + getDescriptor().getMethods().get(46), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(), @@ -49042,7 +51767,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(46), + getDescriptor().getMethods().get(47), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(), @@ -49051,6 +51776,21 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance())); } + + public void getProcedureResult( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(48), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance())); + } } public static BlockingInterface newBlockingStub( @@ -49174,6 +51914,11 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest request) throws com.google.protobuf.ServiceException; + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse isBalancerEnabled( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest request) + throws com.google.protobuf.ServiceException; + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse runCatalogScan( com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest request) @@ -49293,6 +52038,11 @@ public final class MasterProtos { com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest request) throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse getProcedureResult( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest request) + throws com.google.protobuf.ServiceException; } private static final class BlockingStub implements BlockingInterface { @@ -49578,12 +52328,24 @@ public final class MasterProtos { } + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse isBalancerEnabled( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(23), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse.getDefaultInstance()); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse runCatalogScan( com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(23), + getDescriptor().getMethods().get(24), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse.getDefaultInstance()); @@ -49595,7 +52357,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(24), + getDescriptor().getMethods().get(25), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse.getDefaultInstance()); @@ -49607,7 +52369,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(25), + getDescriptor().getMethods().get(26), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance()); @@ -49619,7 +52381,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(26), + getDescriptor().getMethods().get(27), controller, request, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse.getDefaultInstance()); @@ -49631,7 +52393,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(27), + getDescriptor().getMethods().get(28), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse.getDefaultInstance()); @@ -49643,7 +52405,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(28), + getDescriptor().getMethods().get(29), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse.getDefaultInstance()); @@ -49655,7 +52417,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(29), + getDescriptor().getMethods().get(30), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse.getDefaultInstance()); @@ -49667,7 +52429,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(30), + getDescriptor().getMethods().get(31), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse.getDefaultInstance()); @@ -49679,7 +52441,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(31), + getDescriptor().getMethods().get(32), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse.getDefaultInstance()); @@ -49691,7 +52453,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(32), + getDescriptor().getMethods().get(33), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneResponse.getDefaultInstance()); @@ -49703,7 +52465,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(33), + getDescriptor().getMethods().get(34), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance()); @@ -49715,7 +52477,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(34), + getDescriptor().getMethods().get(35), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance()); @@ -49727,7 +52489,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(35), + getDescriptor().getMethods().get(36), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefaultInstance()); @@ -49739,7 +52501,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(36), + getDescriptor().getMethods().get(37), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse.getDefaultInstance()); @@ -49751,7 +52513,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(37), + getDescriptor().getMethods().get(38), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse.getDefaultInstance()); @@ -49763,7 +52525,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(38), + getDescriptor().getMethods().get(39), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse.getDefaultInstance()); @@ -49775,7 +52537,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(39), + getDescriptor().getMethods().get(40), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse.getDefaultInstance()); @@ -49787,7 +52549,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(40), + getDescriptor().getMethods().get(41), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse.getDefaultInstance()); @@ -49799,7 +52561,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(41), + getDescriptor().getMethods().get(42), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse.getDefaultInstance()); @@ -49811,7 +52573,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(42), + getDescriptor().getMethods().get(43), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance()); @@ -49823,7 +52585,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(43), + getDescriptor().getMethods().get(44), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance()); @@ -49835,7 +52597,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(44), + getDescriptor().getMethods().get(45), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance()); @@ -49847,7 +52609,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(45), + getDescriptor().getMethods().get(46), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance()); @@ -49859,12 +52621,24 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(46), + getDescriptor().getMethods().get(47), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance()); } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse getProcedureResult( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(48), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance()); + } + } // @@protoc_insertion_point(class_scope:MasterService) @@ -50120,6 +52894,16 @@ public final class MasterProtos { private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_SetBalancerRunningResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_IsBalancerEnabledRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_IsBalancerEnabledRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_IsBalancerEnabledResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_IsBalancerEnabledResponse_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_RunCatalogScanRequest_descriptor; private static @@ -50290,6 +53074,16 @@ public final class MasterProtos { private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_IsProcedureDoneResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_GetProcedureResultRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_GetProcedureResultRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_GetProcedureResultResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_GetProcedureResultResponse_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_SetQuotaRequest_descriptor; private static @@ -50325,218 +53119,233 @@ public final class MasterProtos { static { java.lang.String[] descriptorData = { "\n\014Master.proto\032\013HBase.proto\032\014Client.prot" + - "o\032\023ClusterStatus.proto\032\013Quota.proto\"`\n\020A" + - "ddColumnRequest\022\036\n\ntable_name\030\001 \002(\0132\n.Ta" + - "bleName\022,\n\017column_families\030\002 \002(\0132\023.Colum" + - "nFamilySchema\"\023\n\021AddColumnResponse\"J\n\023De" + - "leteColumnRequest\022\036\n\ntable_name\030\001 \002(\0132\n." + - "TableName\022\023\n\013column_name\030\002 \002(\014\"\026\n\024Delete" + - "ColumnResponse\"c\n\023ModifyColumnRequest\022\036\n" + - "\ntable_name\030\001 \002(\0132\n.TableName\022,\n\017column_" + - "families\030\002 \002(\0132\023.ColumnFamilySchema\"\026\n\024M", - "odifyColumnResponse\"\\\n\021MoveRegionRequest" + - "\022 \n\006region\030\001 \002(\0132\020.RegionSpecifier\022%\n\020de" + - "st_server_name\030\002 \001(\0132\013.ServerName\"\024\n\022Mov" + - "eRegionResponse\"\200\001\n\035DispatchMergingRegio" + - "nsRequest\022\"\n\010region_a\030\001 \002(\0132\020.RegionSpec" + - "ifier\022\"\n\010region_b\030\002 \002(\0132\020.RegionSpecifie" + - "r\022\027\n\010forcible\030\003 \001(\010:\005false\" \n\036DispatchMe" + - "rgingRegionsResponse\"7\n\023AssignRegionRequ" + - "est\022 \n\006region\030\001 \002(\0132\020.RegionSpecifier\"\026\n" + - "\024AssignRegionResponse\"O\n\025UnassignRegionR", - "equest\022 \n\006region\030\001 \002(\0132\020.RegionSpecifier" + - "\022\024\n\005force\030\002 \001(\010:\005false\"\030\n\026UnassignRegion" + - "Response\"8\n\024OfflineRegionRequest\022 \n\006regi" + - "on\030\001 \002(\0132\020.RegionSpecifier\"\027\n\025OfflineReg" + - "ionResponse\"L\n\022CreateTableRequest\022\"\n\014tab" + - "le_schema\030\001 \002(\0132\014.TableSchema\022\022\n\nsplit_k" + - "eys\030\002 \003(\014\"\025\n\023CreateTableResponse\"4\n\022Dele" + - "teTableRequest\022\036\n\ntable_name\030\001 \002(\0132\n.Tab" + - "leName\"\025\n\023DeleteTableResponse\"T\n\024Truncat" + - "eTableRequest\022\035\n\ttableName\030\001 \002(\0132\n.Table", - "Name\022\035\n\016preserveSplits\030\002 \001(\010:\005false\"\027\n\025T" + - "runcateTableResponse\"4\n\022EnableTableReque" + - "st\022\036\n\ntable_name\030\001 \002(\0132\n.TableName\"\025\n\023En" + - "ableTableResponse\"5\n\023DisableTableRequest" + - "\022\036\n\ntable_name\030\001 \002(\0132\n.TableName\"\026\n\024Disa" + - "bleTableResponse\"X\n\022ModifyTableRequest\022\036" + - "\n\ntable_name\030\001 \002(\0132\n.TableName\022\"\n\014table_" + - "schema\030\002 \002(\0132\014.TableSchema\"\025\n\023ModifyTabl" + - "eResponse\"K\n\026CreateNamespaceRequest\0221\n\023n" + - "amespaceDescriptor\030\001 \002(\0132\024.NamespaceDesc", - "riptor\"\031\n\027CreateNamespaceResponse\"/\n\026Del" + - "eteNamespaceRequest\022\025\n\rnamespaceName\030\001 \002" + - "(\t\"\031\n\027DeleteNamespaceResponse\"K\n\026ModifyN" + - "amespaceRequest\0221\n\023namespaceDescriptor\030\001" + - " \002(\0132\024.NamespaceDescriptor\"\031\n\027ModifyName" + - "spaceResponse\"6\n\035GetNamespaceDescriptorR" + - "equest\022\025\n\rnamespaceName\030\001 \002(\t\"S\n\036GetName" + - "spaceDescriptorResponse\0221\n\023namespaceDesc" + - "riptor\030\001 \002(\0132\024.NamespaceDescriptor\"!\n\037Li" + - "stNamespaceDescriptorsRequest\"U\n ListNam", - "espaceDescriptorsResponse\0221\n\023namespaceDe" + - "scriptor\030\001 \003(\0132\024.NamespaceDescriptor\"?\n&" + - "ListTableDescriptorsByNamespaceRequest\022\025" + - "\n\rnamespaceName\030\001 \002(\t\"L\n\'ListTableDescri" + - "ptorsByNamespaceResponse\022!\n\013tableSchema\030" + - "\001 \003(\0132\014.TableSchema\"9\n ListTableNamesByN" + - "amespaceRequest\022\025\n\rnamespaceName\030\001 \002(\t\"B" + - "\n!ListTableNamesByNamespaceResponse\022\035\n\tt" + - "ableName\030\001 \003(\0132\n.TableName\"\021\n\017ShutdownRe" + - "quest\"\022\n\020ShutdownResponse\"\023\n\021StopMasterR", - "equest\"\024\n\022StopMasterResponse\"\020\n\016BalanceR" + - "equest\"\'\n\017BalanceResponse\022\024\n\014balancer_ra" + - "n\030\001 \002(\010\"<\n\031SetBalancerRunningRequest\022\n\n\002" + - "on\030\001 \002(\010\022\023\n\013synchronous\030\002 \001(\010\"8\n\032SetBala" + - "ncerRunningResponse\022\032\n\022prev_balance_valu" + - "e\030\001 \001(\010\"\027\n\025RunCatalogScanRequest\"-\n\026RunC" + - "atalogScanResponse\022\023\n\013scan_result\030\001 \001(\005\"" + - "-\n\033EnableCatalogJanitorRequest\022\016\n\006enable" + - "\030\001 \002(\010\"2\n\034EnableCatalogJanitorResponse\022\022" + - "\n\nprev_value\030\001 \001(\010\" \n\036IsCatalogJanitorEn", - "abledRequest\"0\n\037IsCatalogJanitorEnabledR" + - "esponse\022\r\n\005value\030\001 \002(\010\"9\n\017SnapshotReques" + - "t\022&\n\010snapshot\030\001 \002(\0132\024.SnapshotDescriptio" + - "n\",\n\020SnapshotResponse\022\030\n\020expected_timeou" + - "t\030\001 \002(\003\"\036\n\034GetCompletedSnapshotsRequest\"" + - "H\n\035GetCompletedSnapshotsResponse\022\'\n\tsnap" + - "shots\030\001 \003(\0132\024.SnapshotDescription\"?\n\025Del" + - "eteSnapshotRequest\022&\n\010snapshot\030\001 \002(\0132\024.S" + - "napshotDescription\"\030\n\026DeleteSnapshotResp" + - "onse\"@\n\026RestoreSnapshotRequest\022&\n\010snapsh", - "ot\030\001 \002(\0132\024.SnapshotDescription\"\031\n\027Restor" + - "eSnapshotResponse\"?\n\025IsSnapshotDoneReque" + - "st\022&\n\010snapshot\030\001 \001(\0132\024.SnapshotDescripti" + - "on\"U\n\026IsSnapshotDoneResponse\022\023\n\004done\030\001 \001" + - "(\010:\005false\022&\n\010snapshot\030\002 \001(\0132\024.SnapshotDe" + - "scription\"F\n\034IsRestoreSnapshotDoneReques" + - "t\022&\n\010snapshot\030\001 \001(\0132\024.SnapshotDescriptio" + - "n\"4\n\035IsRestoreSnapshotDoneResponse\022\023\n\004do" + - "ne\030\001 \001(\010:\005false\"=\n\033GetSchemaAlterStatusR" + - "equest\022\036\n\ntable_name\030\001 \002(\0132\n.TableName\"T", - "\n\034GetSchemaAlterStatusResponse\022\035\n\025yet_to" + - "_update_regions\030\001 \001(\r\022\025\n\rtotal_regions\030\002" + - " \001(\r\"\202\001\n\032GetTableDescriptorsRequest\022\037\n\013t" + - "able_names\030\001 \003(\0132\n.TableName\022\r\n\005regex\030\002 " + - "\001(\t\022!\n\022include_sys_tables\030\003 \001(\010:\005false\022\021" + - "\n\tnamespace\030\004 \001(\t\"A\n\033GetTableDescriptors" + - "Response\022\"\n\014table_schema\030\001 \003(\0132\014.TableSc" + - "hema\"[\n\024GetTableNamesRequest\022\r\n\005regex\030\001 " + - "\001(\t\022!\n\022include_sys_tables\030\002 \001(\010:\005false\022\021" + - "\n\tnamespace\030\003 \001(\t\"8\n\025GetTableNamesRespon", - "se\022\037\n\013table_names\030\001 \003(\0132\n.TableName\"6\n\024G" + - "etTableStateRequest\022\036\n\ntable_name\030\001 \002(\0132" + - "\n.TableName\"9\n\025GetTableStateResponse\022 \n\013" + - "table_state\030\001 \002(\0132\013.TableState\"\031\n\027GetClu" + - "sterStatusRequest\"B\n\030GetClusterStatusRes" + - "ponse\022&\n\016cluster_status\030\001 \002(\0132\016.ClusterS" + - "tatus\"\030\n\026IsMasterRunningRequest\"4\n\027IsMas" + - "terRunningResponse\022\031\n\021is_master_running\030" + - "\001 \002(\010\"@\n\024ExecProcedureRequest\022(\n\tprocedu" + - "re\030\001 \002(\0132\025.ProcedureDescription\"F\n\025ExecP", - "rocedureResponse\022\030\n\020expected_timeout\030\001 \001" + - "(\003\022\023\n\013return_data\030\002 \001(\014\"B\n\026IsProcedureDo" + - "neRequest\022(\n\tprocedure\030\001 \001(\0132\025.Procedure" + - "Description\"W\n\027IsProcedureDoneResponse\022\023" + - "\n\004done\030\001 \001(\010:\005false\022\'\n\010snapshot\030\002 \001(\0132\025." + - "ProcedureDescription\"\273\001\n\017SetQuotaRequest" + - "\022\021\n\tuser_name\030\001 \001(\t\022\022\n\nuser_group\030\002 \001(\t\022" + - "\021\n\tnamespace\030\003 \001(\t\022\036\n\ntable_name\030\004 \001(\0132\n" + - ".TableName\022\022\n\nremove_all\030\005 \001(\010\022\026\n\016bypass" + - "_globals\030\006 \001(\010\022\"\n\010throttle\030\007 \001(\0132\020.Throt", - "tleRequest\"\022\n\020SetQuotaResponse\"A\n\037MajorC" + - "ompactionTimestampRequest\022\036\n\ntable_name\030" + - "\001 \002(\0132\n.TableName\"L\n(MajorCompactionTime" + - "stampForRegionRequest\022 \n\006region\030\001 \002(\0132\020." + - "RegionSpecifier\"@\n MajorCompactionTimest" + - "ampResponse\022\034\n\024compaction_timestamp\030\001 \002(" + - "\0032\310\032\n\rMasterService\022S\n\024GetSchemaAlterSta" + - "tus\022\034.GetSchemaAlterStatusRequest\032\035.GetS" + - "chemaAlterStatusResponse\022P\n\023GetTableDesc" + - "riptors\022\033.GetTableDescriptorsRequest\032\034.G", - "etTableDescriptorsResponse\022>\n\rGetTableNa" + - "mes\022\025.GetTableNamesRequest\032\026.GetTableNam" + - "esResponse\022G\n\020GetClusterStatus\022\030.GetClus" + - "terStatusRequest\032\031.GetClusterStatusRespo" + - "nse\022D\n\017IsMasterRunning\022\027.IsMasterRunning" + - "Request\032\030.IsMasterRunningResponse\0222\n\tAdd" + - "Column\022\021.AddColumnRequest\032\022.AddColumnRes" + - "ponse\022;\n\014DeleteColumn\022\024.DeleteColumnRequ" + - "est\032\025.DeleteColumnResponse\022;\n\014ModifyColu" + - "mn\022\024.ModifyColumnRequest\032\025.ModifyColumnR", - "esponse\0225\n\nMoveRegion\022\022.MoveRegionReques" + - "t\032\023.MoveRegionResponse\022Y\n\026DispatchMergin" + - "gRegions\022\036.DispatchMergingRegionsRequest" + - "\032\037.DispatchMergingRegionsResponse\022;\n\014Ass" + - "ignRegion\022\024.AssignRegionRequest\032\025.Assign" + - "RegionResponse\022A\n\016UnassignRegion\022\026.Unass" + - "ignRegionRequest\032\027.UnassignRegionRespons" + - "e\022>\n\rOfflineRegion\022\025.OfflineRegionReques" + - "t\032\026.OfflineRegionResponse\0228\n\013DeleteTable" + - "\022\023.DeleteTableRequest\032\024.DeleteTableRespo", - "nse\022>\n\rtruncateTable\022\025.TruncateTableRequ" + - "est\032\026.TruncateTableResponse\0228\n\013EnableTab" + - "le\022\023.EnableTableRequest\032\024.EnableTableRes" + - "ponse\022;\n\014DisableTable\022\024.DisableTableRequ" + - "est\032\025.DisableTableResponse\0228\n\013ModifyTabl" + - "e\022\023.ModifyTableRequest\032\024.ModifyTableResp" + - "onse\0228\n\013CreateTable\022\023.CreateTableRequest" + - "\032\024.CreateTableResponse\022/\n\010Shutdown\022\020.Shu" + - "tdownRequest\032\021.ShutdownResponse\0225\n\nStopM" + - "aster\022\022.StopMasterRequest\032\023.StopMasterRe", - "sponse\022,\n\007Balance\022\017.BalanceRequest\032\020.Bal" + - "anceResponse\022M\n\022SetBalancerRunning\022\032.Set" + - "BalancerRunningRequest\032\033.SetBalancerRunn" + - "ingResponse\022A\n\016RunCatalogScan\022\026.RunCatal" + - "ogScanRequest\032\027.RunCatalogScanResponse\022S" + - "\n\024EnableCatalogJanitor\022\034.EnableCatalogJa" + - "nitorRequest\032\035.EnableCatalogJanitorRespo" + - "nse\022\\\n\027IsCatalogJanitorEnabled\022\037.IsCatal" + - "ogJanitorEnabledRequest\032 .IsCatalogJanit" + - "orEnabledResponse\022L\n\021ExecMasterService\022\032", - ".CoprocessorServiceRequest\032\033.Coprocessor" + - "ServiceResponse\022/\n\010Snapshot\022\020.SnapshotRe" + - "quest\032\021.SnapshotResponse\022V\n\025GetCompleted" + - "Snapshots\022\035.GetCompletedSnapshotsRequest" + - "\032\036.GetCompletedSnapshotsResponse\022A\n\016Dele" + - "teSnapshot\022\026.DeleteSnapshotRequest\032\027.Del" + - "eteSnapshotResponse\022A\n\016IsSnapshotDone\022\026." + - "IsSnapshotDoneRequest\032\027.IsSnapshotDoneRe" + - "sponse\022D\n\017RestoreSnapshot\022\027.RestoreSnaps" + - "hotRequest\032\030.RestoreSnapshotResponse\022V\n\025", - "IsRestoreSnapshotDone\022\035.IsRestoreSnapsho" + - "tDoneRequest\032\036.IsRestoreSnapshotDoneResp" + - "onse\022>\n\rExecProcedure\022\025.ExecProcedureReq" + - "uest\032\026.ExecProcedureResponse\022E\n\024ExecProc" + - "edureWithRet\022\025.ExecProcedureRequest\032\026.Ex" + - "ecProcedureResponse\022D\n\017IsProcedureDone\022\027" + - ".IsProcedureDoneRequest\032\030.IsProcedureDon" + - "eResponse\022D\n\017ModifyNamespace\022\027.ModifyNam" + - "espaceRequest\032\030.ModifyNamespaceResponse\022" + - "D\n\017CreateNamespace\022\027.CreateNamespaceRequ", - "est\032\030.CreateNamespaceResponse\022D\n\017DeleteN" + - "amespace\022\027.DeleteNamespaceRequest\032\030.Dele" + - "teNamespaceResponse\022Y\n\026GetNamespaceDescr" + - "iptor\022\036.GetNamespaceDescriptorRequest\032\037." + - "GetNamespaceDescriptorResponse\022_\n\030ListNa" + - "mespaceDescriptors\022 .ListNamespaceDescri" + - "ptorsRequest\032!.ListNamespaceDescriptorsR" + - "esponse\022t\n\037ListTableDescriptorsByNamespa" + - "ce\022\'.ListTableDescriptorsByNamespaceRequ" + - "est\032(.ListTableDescriptorsByNamespaceRes", - "ponse\022b\n\031ListTableNamesByNamespace\022!.Lis" + - "tTableNamesByNamespaceRequest\032\".ListTabl" + - "eNamesByNamespaceResponse\022>\n\rGetTableSta" + - "te\022\025.GetTableStateRequest\032\026.GetTableStat" + - "eResponse\022/\n\010SetQuota\022\020.SetQuotaRequest\032" + - "\021.SetQuotaResponse\022f\n\037getLastMajorCompac" + - "tionTimestamp\022 .MajorCompactionTimestamp" + - "Request\032!.MajorCompactionTimestampRespon" + - "se\022x\n(getLastMajorCompactionTimestampFor" + - "Region\022).MajorCompactionTimestampForRegi", - "onRequest\032!.MajorCompactionTimestampResp" + - "onseBB\n*org.apache.hadoop.hbase.protobuf" + - ".generatedB\014MasterProtosH\001\210\001\001\240\001\001" + "o\032\023ClusterStatus.proto\032\023ErrorHandling.pr" + + "oto\032\013Quota.proto\"`\n\020AddColumnRequest\022\036\n\n" + + "table_name\030\001 \002(\0132\n.TableName\022,\n\017column_f" + + "amilies\030\002 \002(\0132\023.ColumnFamilySchema\"\023\n\021Ad" + + "dColumnResponse\"J\n\023DeleteColumnRequest\022\036" + + "\n\ntable_name\030\001 \002(\0132\n.TableName\022\023\n\013column" + + "_name\030\002 \002(\014\"\026\n\024DeleteColumnResponse\"c\n\023M" + + "odifyColumnRequest\022\036\n\ntable_name\030\001 \002(\0132\n" + + ".TableName\022,\n\017column_families\030\002 \002(\0132\023.Co", + "lumnFamilySchema\"\026\n\024ModifyColumnResponse" + + "\"\\\n\021MoveRegionRequest\022 \n\006region\030\001 \002(\0132\020." + + "RegionSpecifier\022%\n\020dest_server_name\030\002 \001(" + + "\0132\013.ServerName\"\024\n\022MoveRegionResponse\"\200\001\n" + + "\035DispatchMergingRegionsRequest\022\"\n\010region" + + "_a\030\001 \002(\0132\020.RegionSpecifier\022\"\n\010region_b\030\002" + + " \002(\0132\020.RegionSpecifier\022\027\n\010forcible\030\003 \001(\010" + + ":\005false\" \n\036DispatchMergingRegionsRespons" + + "e\"7\n\023AssignRegionRequest\022 \n\006region\030\001 \002(\013" + + "2\020.RegionSpecifier\"\026\n\024AssignRegionRespon", + "se\"O\n\025UnassignRegionRequest\022 \n\006region\030\001 " + + "\002(\0132\020.RegionSpecifier\022\024\n\005force\030\002 \001(\010:\005fa" + + "lse\"\030\n\026UnassignRegionResponse\"8\n\024Offline" + + "RegionRequest\022 \n\006region\030\001 \002(\0132\020.RegionSp" + + "ecifier\"\027\n\025OfflineRegionResponse\"L\n\022Crea" + + "teTableRequest\022\"\n\014table_schema\030\001 \002(\0132\014.T" + + "ableSchema\022\022\n\nsplit_keys\030\002 \003(\014\"&\n\023Create" + + "TableResponse\022\017\n\007proc_id\030\001 \001(\004\"4\n\022Delete" + + "TableRequest\022\036\n\ntable_name\030\001 \002(\0132\n.Table" + + "Name\"&\n\023DeleteTableResponse\022\017\n\007proc_id\030\001", + " \001(\004\"T\n\024TruncateTableRequest\022\035\n\ttableNam" + + "e\030\001 \002(\0132\n.TableName\022\035\n\016preserveSplits\030\002 " + + "\001(\010:\005false\"\027\n\025TruncateTableResponse\"4\n\022E" + + "nableTableRequest\022\036\n\ntable_name\030\001 \002(\0132\n." + + "TableName\"&\n\023EnableTableResponse\022\017\n\007proc" + + "_id\030\001 \001(\004\"5\n\023DisableTableRequest\022\036\n\ntabl" + + "e_name\030\001 \002(\0132\n.TableName\"\'\n\024DisableTable" + + "Response\022\017\n\007proc_id\030\001 \001(\004\"X\n\022ModifyTable" + + "Request\022\036\n\ntable_name\030\001 \002(\0132\n.TableName\022" + + "\"\n\014table_schema\030\002 \002(\0132\014.TableSchema\"\025\n\023M", + "odifyTableResponse\"K\n\026CreateNamespaceReq" + + "uest\0221\n\023namespaceDescriptor\030\001 \002(\0132\024.Name" + + "spaceDescriptor\"\031\n\027CreateNamespaceRespon" + + "se\"/\n\026DeleteNamespaceRequest\022\025\n\rnamespac" + + "eName\030\001 \002(\t\"\031\n\027DeleteNamespaceResponse\"K" + + "\n\026ModifyNamespaceRequest\0221\n\023namespaceDes" + + "criptor\030\001 \002(\0132\024.NamespaceDescriptor\"\031\n\027M" + + "odifyNamespaceResponse\"6\n\035GetNamespaceDe" + + "scriptorRequest\022\025\n\rnamespaceName\030\001 \002(\t\"S" + + "\n\036GetNamespaceDescriptorResponse\0221\n\023name", + "spaceDescriptor\030\001 \002(\0132\024.NamespaceDescrip" + + "tor\"!\n\037ListNamespaceDescriptorsRequest\"U" + + "\n ListNamespaceDescriptorsResponse\0221\n\023na" + + "mespaceDescriptor\030\001 \003(\0132\024.NamespaceDescr" + + "iptor\"?\n&ListTableDescriptorsByNamespace" + + "Request\022\025\n\rnamespaceName\030\001 \002(\t\"L\n\'ListTa" + + "bleDescriptorsByNamespaceResponse\022!\n\013tab" + + "leSchema\030\001 \003(\0132\014.TableSchema\"9\n ListTabl" + + "eNamesByNamespaceRequest\022\025\n\rnamespaceNam" + + "e\030\001 \002(\t\"B\n!ListTableNamesByNamespaceResp", + "onse\022\035\n\ttableName\030\001 \003(\0132\n.TableName\"\021\n\017S" + + "hutdownRequest\"\022\n\020ShutdownResponse\"\023\n\021St" + + "opMasterRequest\"\024\n\022StopMasterResponse\"\020\n" + + "\016BalanceRequest\"\'\n\017BalanceResponse\022\024\n\014ba" + + "lancer_ran\030\001 \002(\010\"<\n\031SetBalancerRunningRe" + + "quest\022\n\n\002on\030\001 \002(\010\022\023\n\013synchronous\030\002 \001(\010\"8" + + "\n\032SetBalancerRunningResponse\022\032\n\022prev_bal" + + "ance_value\030\001 \001(\010\"\032\n\030IsBalancerEnabledReq" + + "uest\",\n\031IsBalancerEnabledResponse\022\017\n\007ena" + + "bled\030\001 \002(\010\"\027\n\025RunCatalogScanRequest\"-\n\026R", + "unCatalogScanResponse\022\023\n\013scan_result\030\001 \001" + + "(\005\"-\n\033EnableCatalogJanitorRequest\022\016\n\006ena" + + "ble\030\001 \002(\010\"2\n\034EnableCatalogJanitorRespons" + + "e\022\022\n\nprev_value\030\001 \001(\010\" \n\036IsCatalogJanito" + + "rEnabledRequest\"0\n\037IsCatalogJanitorEnabl" + + "edResponse\022\r\n\005value\030\001 \002(\010\"9\n\017SnapshotReq" + + "uest\022&\n\010snapshot\030\001 \002(\0132\024.SnapshotDescrip" + + "tion\",\n\020SnapshotResponse\022\030\n\020expected_tim" + + "eout\030\001 \002(\003\"\036\n\034GetCompletedSnapshotsReque" + + "st\"H\n\035GetCompletedSnapshotsResponse\022\'\n\ts", + "napshots\030\001 \003(\0132\024.SnapshotDescription\"?\n\025" + + "DeleteSnapshotRequest\022&\n\010snapshot\030\001 \002(\0132" + + "\024.SnapshotDescription\"\030\n\026DeleteSnapshotR" + + "esponse\"@\n\026RestoreSnapshotRequest\022&\n\010sna" + + "pshot\030\001 \002(\0132\024.SnapshotDescription\"\031\n\027Res" + + "toreSnapshotResponse\"?\n\025IsSnapshotDoneRe" + + "quest\022&\n\010snapshot\030\001 \001(\0132\024.SnapshotDescri" + + "ption\"U\n\026IsSnapshotDoneResponse\022\023\n\004done\030" + + "\001 \001(\010:\005false\022&\n\010snapshot\030\002 \001(\0132\024.Snapsho" + + "tDescription\"F\n\034IsRestoreSnapshotDoneReq", + "uest\022&\n\010snapshot\030\001 \001(\0132\024.SnapshotDescrip" + + "tion\"4\n\035IsRestoreSnapshotDoneResponse\022\023\n" + + "\004done\030\001 \001(\010:\005false\"=\n\033GetSchemaAlterStat" + + "usRequest\022\036\n\ntable_name\030\001 \002(\0132\n.TableNam" + + "e\"T\n\034GetSchemaAlterStatusResponse\022\035\n\025yet" + + "_to_update_regions\030\001 \001(\r\022\025\n\rtotal_region" + + "s\030\002 \001(\r\"\202\001\n\032GetTableDescriptorsRequest\022\037" + + "\n\013table_names\030\001 \003(\0132\n.TableName\022\r\n\005regex" + + "\030\002 \001(\t\022!\n\022include_sys_tables\030\003 \001(\010:\005fals" + + "e\022\021\n\tnamespace\030\004 \001(\t\"A\n\033GetTableDescript", + "orsResponse\022\"\n\014table_schema\030\001 \003(\0132\014.Tabl" + + "eSchema\"[\n\024GetTableNamesRequest\022\r\n\005regex" + + "\030\001 \001(\t\022!\n\022include_sys_tables\030\002 \001(\010:\005fals" + + "e\022\021\n\tnamespace\030\003 \001(\t\"8\n\025GetTableNamesRes" + + "ponse\022\037\n\013table_names\030\001 \003(\0132\n.TableName\"6" + + "\n\024GetTableStateRequest\022\036\n\ntable_name\030\001 \002" + + "(\0132\n.TableName\"9\n\025GetTableStateResponse\022" + + " \n\013table_state\030\001 \002(\0132\013.TableState\"\031\n\027Get" + + "ClusterStatusRequest\"B\n\030GetClusterStatus" + + "Response\022&\n\016cluster_status\030\001 \002(\0132\016.Clust", + "erStatus\"\030\n\026IsMasterRunningRequest\"4\n\027Is" + + "MasterRunningResponse\022\031\n\021is_master_runni" + + "ng\030\001 \002(\010\"@\n\024ExecProcedureRequest\022(\n\tproc" + + "edure\030\001 \002(\0132\025.ProcedureDescription\"F\n\025Ex" + + "ecProcedureResponse\022\030\n\020expected_timeout\030" + + "\001 \001(\003\022\023\n\013return_data\030\002 \001(\014\"B\n\026IsProcedur" + + "eDoneRequest\022(\n\tprocedure\030\001 \001(\0132\025.Proced" + + "ureDescription\"W\n\027IsProcedureDoneRespons" + + "e\022\023\n\004done\030\001 \001(\010:\005false\022\'\n\010snapshot\030\002 \001(\013" + + "2\025.ProcedureDescription\",\n\031GetProcedureR", + "esultRequest\022\017\n\007proc_id\030\001 \002(\004\"\347\001\n\032GetPro" + + "cedureResultResponse\0220\n\005state\030\001 \002(\0162!.Ge" + + "tProcedureResultResponse.State\022\022\n\nstart_" + + "time\030\002 \001(\004\022\023\n\013last_update\030\003 \001(\004\022\016\n\006resul" + + "t\030\004 \001(\014\022+\n\texception\030\005 \001(\0132\030.ForeignExce" + + "ptionMessage\"1\n\005State\022\r\n\tNOT_FOUND\020\000\022\013\n\007" + + "RUNNING\020\001\022\014\n\010FINISHED\020\002\"\273\001\n\017SetQuotaRequ" + + "est\022\021\n\tuser_name\030\001 \001(\t\022\022\n\nuser_group\030\002 \001" + + "(\t\022\021\n\tnamespace\030\003 \001(\t\022\036\n\ntable_name\030\004 \001(" + + "\0132\n.TableName\022\022\n\nremove_all\030\005 \001(\010\022\026\n\016byp", + "ass_globals\030\006 \001(\010\022\"\n\010throttle\030\007 \001(\0132\020.Th" + + "rottleRequest\"\022\n\020SetQuotaResponse\"A\n\037Maj" + + "orCompactionTimestampRequest\022\036\n\ntable_na" + + "me\030\001 \002(\0132\n.TableName\"L\n(MajorCompactionT" + + "imestampForRegionRequest\022 \n\006region\030\001 \002(\013" + + "2\020.RegionSpecifier\"@\n MajorCompactionTim" + + "estampResponse\022\034\n\024compaction_timestamp\030\001" + + " \002(\0032\343\033\n\rMasterService\022S\n\024GetSchemaAlter" + + "Status\022\034.GetSchemaAlterStatusRequest\032\035.G" + + "etSchemaAlterStatusResponse\022P\n\023GetTableD", + "escriptors\022\033.GetTableDescriptorsRequest\032" + + "\034.GetTableDescriptorsResponse\022>\n\rGetTabl" + + "eNames\022\025.GetTableNamesRequest\032\026.GetTable" + + "NamesResponse\022G\n\020GetClusterStatus\022\030.GetC" + + "lusterStatusRequest\032\031.GetClusterStatusRe" + + "sponse\022D\n\017IsMasterRunning\022\027.IsMasterRunn" + + "ingRequest\032\030.IsMasterRunningResponse\0222\n\t" + + "AddColumn\022\021.AddColumnRequest\032\022.AddColumn" + + "Response\022;\n\014DeleteColumn\022\024.DeleteColumnR" + + "equest\032\025.DeleteColumnResponse\022;\n\014ModifyC", + "olumn\022\024.ModifyColumnRequest\032\025.ModifyColu" + + "mnResponse\0225\n\nMoveRegion\022\022.MoveRegionReq" + + "uest\032\023.MoveRegionResponse\022Y\n\026DispatchMer" + + "gingRegions\022\036.DispatchMergingRegionsRequ" + + "est\032\037.DispatchMergingRegionsResponse\022;\n\014" + + "AssignRegion\022\024.AssignRegionRequest\032\025.Ass" + + "ignRegionResponse\022A\n\016UnassignRegion\022\026.Un" + + "assignRegionRequest\032\027.UnassignRegionResp" + + "onse\022>\n\rOfflineRegion\022\025.OfflineRegionReq" + + "uest\032\026.OfflineRegionResponse\0228\n\013DeleteTa", + "ble\022\023.DeleteTableRequest\032\024.DeleteTableRe" + + "sponse\022>\n\rtruncateTable\022\025.TruncateTableR" + + "equest\032\026.TruncateTableResponse\0228\n\013Enable" + + "Table\022\023.EnableTableRequest\032\024.EnableTable" + + "Response\022;\n\014DisableTable\022\024.DisableTableR" + + "equest\032\025.DisableTableResponse\0228\n\013ModifyT" + + "able\022\023.ModifyTableRequest\032\024.ModifyTableR" + + "esponse\0228\n\013CreateTable\022\023.CreateTableRequ" + + "est\032\024.CreateTableResponse\022/\n\010Shutdown\022\020." + + "ShutdownRequest\032\021.ShutdownResponse\0225\n\nSt", + "opMaster\022\022.StopMasterRequest\032\023.StopMaste" + + "rResponse\022,\n\007Balance\022\017.BalanceRequest\032\020." + + "BalanceResponse\022M\n\022SetBalancerRunning\022\032." + + "SetBalancerRunningRequest\032\033.SetBalancerR" + + "unningResponse\022J\n\021IsBalancerEnabled\022\031.Is" + + "BalancerEnabledRequest\032\032.IsBalancerEnabl" + + "edResponse\022A\n\016RunCatalogScan\022\026.RunCatalo" + + "gScanRequest\032\027.RunCatalogScanResponse\022S\n" + + "\024EnableCatalogJanitor\022\034.EnableCatalogJan" + + "itorRequest\032\035.EnableCatalogJanitorRespon", + "se\022\\\n\027IsCatalogJanitorEnabled\022\037.IsCatalo" + + "gJanitorEnabledRequest\032 .IsCatalogJanito" + + "rEnabledResponse\022L\n\021ExecMasterService\022\032." + + "CoprocessorServiceRequest\032\033.CoprocessorS" + + "erviceResponse\022/\n\010Snapshot\022\020.SnapshotReq" + + "uest\032\021.SnapshotResponse\022V\n\025GetCompletedS" + + "napshots\022\035.GetCompletedSnapshotsRequest\032" + + "\036.GetCompletedSnapshotsResponse\022A\n\016Delet" + + "eSnapshot\022\026.DeleteSnapshotRequest\032\027.Dele" + + "teSnapshotResponse\022A\n\016IsSnapshotDone\022\026.I", + "sSnapshotDoneRequest\032\027.IsSnapshotDoneRes" + + "ponse\022D\n\017RestoreSnapshot\022\027.RestoreSnapsh" + + "otRequest\032\030.RestoreSnapshotResponse\022V\n\025I" + + "sRestoreSnapshotDone\022\035.IsRestoreSnapshot" + + "DoneRequest\032\036.IsRestoreSnapshotDoneRespo" + + "nse\022>\n\rExecProcedure\022\025.ExecProcedureRequ" + + "est\032\026.ExecProcedureResponse\022E\n\024ExecProce" + + "dureWithRet\022\025.ExecProcedureRequest\032\026.Exe" + + "cProcedureResponse\022D\n\017IsProcedureDone\022\027." + + "IsProcedureDoneRequest\032\030.IsProcedureDone", + "Response\022D\n\017ModifyNamespace\022\027.ModifyName" + + "spaceRequest\032\030.ModifyNamespaceResponse\022D" + + "\n\017CreateNamespace\022\027.CreateNamespaceReque" + + "st\032\030.CreateNamespaceResponse\022D\n\017DeleteNa" + + "mespace\022\027.DeleteNamespaceRequest\032\030.Delet" + + "eNamespaceResponse\022Y\n\026GetNamespaceDescri" + + "ptor\022\036.GetNamespaceDescriptorRequest\032\037.G" + + "etNamespaceDescriptorResponse\022_\n\030ListNam" + + "espaceDescriptors\022 .ListNamespaceDescrip" + + "torsRequest\032!.ListNamespaceDescriptorsRe", + "sponse\022t\n\037ListTableDescriptorsByNamespac" + + "e\022\'.ListTableDescriptorsByNamespaceReque" + + "st\032(.ListTableDescriptorsByNamespaceResp" + + "onse\022b\n\031ListTableNamesByNamespace\022!.List" + + "TableNamesByNamespaceRequest\032\".ListTable" + + "NamesByNamespaceResponse\022>\n\rGetTableStat" + + "e\022\025.GetTableStateRequest\032\026.GetTableState" + + "Response\022/\n\010SetQuota\022\020.SetQuotaRequest\032\021" + + ".SetQuotaResponse\022f\n\037getLastMajorCompact" + + "ionTimestamp\022 .MajorCompactionTimestampR", + "equest\032!.MajorCompactionTimestampRespons" + + "e\022x\n(getLastMajorCompactionTimestampForR" + + "egion\022).MajorCompactionTimestampForRegio" + + "nRequest\032!.MajorCompactionTimestampRespo" + + "nse\022M\n\022getProcedureResult\022\032.GetProcedure" + + "ResultRequest\032\033.GetProcedureResultRespon" + + "seBB\n*org.apache.hadoop.hbase.protobuf.g" + + "eneratedB\014MasterProtosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -50650,7 +53459,7 @@ public final class MasterProtos { internal_static_CreateTableResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_CreateTableResponse_descriptor, - new java.lang.String[] { }); + new java.lang.String[] { "ProcId", }); internal_static_DeleteTableRequest_descriptor = getDescriptor().getMessageTypes().get(18); internal_static_DeleteTableRequest_fieldAccessorTable = new @@ -50662,7 +53471,7 @@ public final class MasterProtos { internal_static_DeleteTableResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_DeleteTableResponse_descriptor, - new java.lang.String[] { }); + new java.lang.String[] { "ProcId", }); internal_static_TruncateTableRequest_descriptor = getDescriptor().getMessageTypes().get(20); internal_static_TruncateTableRequest_fieldAccessorTable = new @@ -50686,7 +53495,7 @@ public final class MasterProtos { internal_static_EnableTableResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_EnableTableResponse_descriptor, - new java.lang.String[] { }); + new java.lang.String[] { "ProcId", }); internal_static_DisableTableRequest_descriptor = getDescriptor().getMessageTypes().get(24); internal_static_DisableTableRequest_fieldAccessorTable = new @@ -50698,7 +53507,7 @@ public final class MasterProtos { internal_static_DisableTableResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_DisableTableResponse_descriptor, - new java.lang.String[] { }); + new java.lang.String[] { "ProcId", }); internal_static_ModifyTableRequest_descriptor = getDescriptor().getMessageTypes().get(26); internal_static_ModifyTableRequest_fieldAccessorTable = new @@ -50843,236 +53652,260 @@ public final class MasterProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_SetBalancerRunningResponse_descriptor, new java.lang.String[] { "PrevBalanceValue", }); - internal_static_RunCatalogScanRequest_descriptor = + internal_static_IsBalancerEnabledRequest_descriptor = getDescriptor().getMessageTypes().get(50); + internal_static_IsBalancerEnabledRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_IsBalancerEnabledRequest_descriptor, + new java.lang.String[] { }); + internal_static_IsBalancerEnabledResponse_descriptor = + getDescriptor().getMessageTypes().get(51); + internal_static_IsBalancerEnabledResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_IsBalancerEnabledResponse_descriptor, + new java.lang.String[] { "Enabled", }); + internal_static_RunCatalogScanRequest_descriptor = + getDescriptor().getMessageTypes().get(52); internal_static_RunCatalogScanRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_RunCatalogScanRequest_descriptor, new java.lang.String[] { }); internal_static_RunCatalogScanResponse_descriptor = - getDescriptor().getMessageTypes().get(51); + getDescriptor().getMessageTypes().get(53); internal_static_RunCatalogScanResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_RunCatalogScanResponse_descriptor, new java.lang.String[] { "ScanResult", }); internal_static_EnableCatalogJanitorRequest_descriptor = - getDescriptor().getMessageTypes().get(52); + getDescriptor().getMessageTypes().get(54); internal_static_EnableCatalogJanitorRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_EnableCatalogJanitorRequest_descriptor, new java.lang.String[] { "Enable", }); internal_static_EnableCatalogJanitorResponse_descriptor = - getDescriptor().getMessageTypes().get(53); + getDescriptor().getMessageTypes().get(55); internal_static_EnableCatalogJanitorResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_EnableCatalogJanitorResponse_descriptor, new java.lang.String[] { "PrevValue", }); internal_static_IsCatalogJanitorEnabledRequest_descriptor = - getDescriptor().getMessageTypes().get(54); + getDescriptor().getMessageTypes().get(56); internal_static_IsCatalogJanitorEnabledRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_IsCatalogJanitorEnabledRequest_descriptor, new java.lang.String[] { }); internal_static_IsCatalogJanitorEnabledResponse_descriptor = - getDescriptor().getMessageTypes().get(55); + getDescriptor().getMessageTypes().get(57); internal_static_IsCatalogJanitorEnabledResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_IsCatalogJanitorEnabledResponse_descriptor, new java.lang.String[] { "Value", }); internal_static_SnapshotRequest_descriptor = - getDescriptor().getMessageTypes().get(56); + getDescriptor().getMessageTypes().get(58); internal_static_SnapshotRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_SnapshotRequest_descriptor, new java.lang.String[] { "Snapshot", }); internal_static_SnapshotResponse_descriptor = - getDescriptor().getMessageTypes().get(57); + getDescriptor().getMessageTypes().get(59); internal_static_SnapshotResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_SnapshotResponse_descriptor, new java.lang.String[] { "ExpectedTimeout", }); internal_static_GetCompletedSnapshotsRequest_descriptor = - getDescriptor().getMessageTypes().get(58); + getDescriptor().getMessageTypes().get(60); internal_static_GetCompletedSnapshotsRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_GetCompletedSnapshotsRequest_descriptor, new java.lang.String[] { }); internal_static_GetCompletedSnapshotsResponse_descriptor = - getDescriptor().getMessageTypes().get(59); + getDescriptor().getMessageTypes().get(61); internal_static_GetCompletedSnapshotsResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_GetCompletedSnapshotsResponse_descriptor, new java.lang.String[] { "Snapshots", }); internal_static_DeleteSnapshotRequest_descriptor = - getDescriptor().getMessageTypes().get(60); + getDescriptor().getMessageTypes().get(62); internal_static_DeleteSnapshotRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_DeleteSnapshotRequest_descriptor, new java.lang.String[] { "Snapshot", }); internal_static_DeleteSnapshotResponse_descriptor = - getDescriptor().getMessageTypes().get(61); + getDescriptor().getMessageTypes().get(63); internal_static_DeleteSnapshotResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_DeleteSnapshotResponse_descriptor, new java.lang.String[] { }); internal_static_RestoreSnapshotRequest_descriptor = - getDescriptor().getMessageTypes().get(62); + getDescriptor().getMessageTypes().get(64); internal_static_RestoreSnapshotRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_RestoreSnapshotRequest_descriptor, new java.lang.String[] { "Snapshot", }); internal_static_RestoreSnapshotResponse_descriptor = - getDescriptor().getMessageTypes().get(63); + getDescriptor().getMessageTypes().get(65); internal_static_RestoreSnapshotResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_RestoreSnapshotResponse_descriptor, new java.lang.String[] { }); internal_static_IsSnapshotDoneRequest_descriptor = - getDescriptor().getMessageTypes().get(64); + getDescriptor().getMessageTypes().get(66); internal_static_IsSnapshotDoneRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_IsSnapshotDoneRequest_descriptor, new java.lang.String[] { "Snapshot", }); internal_static_IsSnapshotDoneResponse_descriptor = - getDescriptor().getMessageTypes().get(65); + getDescriptor().getMessageTypes().get(67); internal_static_IsSnapshotDoneResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_IsSnapshotDoneResponse_descriptor, new java.lang.String[] { "Done", "Snapshot", }); internal_static_IsRestoreSnapshotDoneRequest_descriptor = - getDescriptor().getMessageTypes().get(66); + getDescriptor().getMessageTypes().get(68); internal_static_IsRestoreSnapshotDoneRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_IsRestoreSnapshotDoneRequest_descriptor, new java.lang.String[] { "Snapshot", }); internal_static_IsRestoreSnapshotDoneResponse_descriptor = - getDescriptor().getMessageTypes().get(67); + getDescriptor().getMessageTypes().get(69); internal_static_IsRestoreSnapshotDoneResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_IsRestoreSnapshotDoneResponse_descriptor, new java.lang.String[] { "Done", }); internal_static_GetSchemaAlterStatusRequest_descriptor = - getDescriptor().getMessageTypes().get(68); + getDescriptor().getMessageTypes().get(70); internal_static_GetSchemaAlterStatusRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_GetSchemaAlterStatusRequest_descriptor, new java.lang.String[] { "TableName", }); internal_static_GetSchemaAlterStatusResponse_descriptor = - getDescriptor().getMessageTypes().get(69); + getDescriptor().getMessageTypes().get(71); internal_static_GetSchemaAlterStatusResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_GetSchemaAlterStatusResponse_descriptor, new java.lang.String[] { "YetToUpdateRegions", "TotalRegions", }); internal_static_GetTableDescriptorsRequest_descriptor = - getDescriptor().getMessageTypes().get(70); + getDescriptor().getMessageTypes().get(72); internal_static_GetTableDescriptorsRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_GetTableDescriptorsRequest_descriptor, new java.lang.String[] { "TableNames", "Regex", "IncludeSysTables", "Namespace", }); internal_static_GetTableDescriptorsResponse_descriptor = - getDescriptor().getMessageTypes().get(71); + getDescriptor().getMessageTypes().get(73); internal_static_GetTableDescriptorsResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_GetTableDescriptorsResponse_descriptor, new java.lang.String[] { "TableSchema", }); internal_static_GetTableNamesRequest_descriptor = - getDescriptor().getMessageTypes().get(72); + getDescriptor().getMessageTypes().get(74); internal_static_GetTableNamesRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_GetTableNamesRequest_descriptor, new java.lang.String[] { "Regex", "IncludeSysTables", "Namespace", }); internal_static_GetTableNamesResponse_descriptor = - getDescriptor().getMessageTypes().get(73); + getDescriptor().getMessageTypes().get(75); internal_static_GetTableNamesResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_GetTableNamesResponse_descriptor, new java.lang.String[] { "TableNames", }); internal_static_GetTableStateRequest_descriptor = - getDescriptor().getMessageTypes().get(74); + getDescriptor().getMessageTypes().get(76); internal_static_GetTableStateRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_GetTableStateRequest_descriptor, new java.lang.String[] { "TableName", }); internal_static_GetTableStateResponse_descriptor = - getDescriptor().getMessageTypes().get(75); + getDescriptor().getMessageTypes().get(77); internal_static_GetTableStateResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_GetTableStateResponse_descriptor, new java.lang.String[] { "TableState", }); internal_static_GetClusterStatusRequest_descriptor = - getDescriptor().getMessageTypes().get(76); + getDescriptor().getMessageTypes().get(78); internal_static_GetClusterStatusRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_GetClusterStatusRequest_descriptor, new java.lang.String[] { }); internal_static_GetClusterStatusResponse_descriptor = - getDescriptor().getMessageTypes().get(77); + getDescriptor().getMessageTypes().get(79); internal_static_GetClusterStatusResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_GetClusterStatusResponse_descriptor, new java.lang.String[] { "ClusterStatus", }); internal_static_IsMasterRunningRequest_descriptor = - getDescriptor().getMessageTypes().get(78); + getDescriptor().getMessageTypes().get(80); internal_static_IsMasterRunningRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_IsMasterRunningRequest_descriptor, new java.lang.String[] { }); internal_static_IsMasterRunningResponse_descriptor = - getDescriptor().getMessageTypes().get(79); + getDescriptor().getMessageTypes().get(81); internal_static_IsMasterRunningResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_IsMasterRunningResponse_descriptor, new java.lang.String[] { "IsMasterRunning", }); internal_static_ExecProcedureRequest_descriptor = - getDescriptor().getMessageTypes().get(80); + getDescriptor().getMessageTypes().get(82); internal_static_ExecProcedureRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ExecProcedureRequest_descriptor, new java.lang.String[] { "Procedure", }); internal_static_ExecProcedureResponse_descriptor = - getDescriptor().getMessageTypes().get(81); + getDescriptor().getMessageTypes().get(83); internal_static_ExecProcedureResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ExecProcedureResponse_descriptor, new java.lang.String[] { "ExpectedTimeout", "ReturnData", }); internal_static_IsProcedureDoneRequest_descriptor = - getDescriptor().getMessageTypes().get(82); + getDescriptor().getMessageTypes().get(84); internal_static_IsProcedureDoneRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_IsProcedureDoneRequest_descriptor, new java.lang.String[] { "Procedure", }); internal_static_IsProcedureDoneResponse_descriptor = - getDescriptor().getMessageTypes().get(83); + getDescriptor().getMessageTypes().get(85); internal_static_IsProcedureDoneResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_IsProcedureDoneResponse_descriptor, new java.lang.String[] { "Done", "Snapshot", }); + internal_static_GetProcedureResultRequest_descriptor = + getDescriptor().getMessageTypes().get(86); + internal_static_GetProcedureResultRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_GetProcedureResultRequest_descriptor, + new java.lang.String[] { "ProcId", }); + internal_static_GetProcedureResultResponse_descriptor = + getDescriptor().getMessageTypes().get(87); + internal_static_GetProcedureResultResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_GetProcedureResultResponse_descriptor, + new java.lang.String[] { "State", "StartTime", "LastUpdate", "Result", "Exception", }); internal_static_SetQuotaRequest_descriptor = - getDescriptor().getMessageTypes().get(84); + getDescriptor().getMessageTypes().get(88); internal_static_SetQuotaRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_SetQuotaRequest_descriptor, new java.lang.String[] { "UserName", "UserGroup", "Namespace", "TableName", "RemoveAll", "BypassGlobals", "Throttle", }); internal_static_SetQuotaResponse_descriptor = - getDescriptor().getMessageTypes().get(85); + getDescriptor().getMessageTypes().get(89); internal_static_SetQuotaResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_SetQuotaResponse_descriptor, new java.lang.String[] { }); internal_static_MajorCompactionTimestampRequest_descriptor = - getDescriptor().getMessageTypes().get(86); + getDescriptor().getMessageTypes().get(90); internal_static_MajorCompactionTimestampRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_MajorCompactionTimestampRequest_descriptor, new java.lang.String[] { "TableName", }); internal_static_MajorCompactionTimestampForRegionRequest_descriptor = - getDescriptor().getMessageTypes().get(87); + getDescriptor().getMessageTypes().get(91); internal_static_MajorCompactionTimestampForRegionRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_MajorCompactionTimestampForRegionRequest_descriptor, new java.lang.String[] { "Region", }); internal_static_MajorCompactionTimestampResponse_descriptor = - getDescriptor().getMessageTypes().get(88); + getDescriptor().getMessageTypes().get(92); internal_static_MajorCompactionTimestampResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_MajorCompactionTimestampResponse_descriptor, @@ -51086,6 +53919,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.getDescriptor(), org.apache.hadoop.hbase.protobuf.generated.ClientProtos.getDescriptor(), org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.getDescriptor(), + org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.getDescriptor(), org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.getDescriptor(), }, assigner); } diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ProcedureProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ProcedureProtos.java new file mode 100644 index 00000000000..3c7dcdba324 --- /dev/null +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ProcedureProtos.java @@ -0,0 +1,7219 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: Procedure.proto + +package org.apache.hadoop.hbase.protobuf.generated; + +public final class ProcedureProtos { + private ProcedureProtos() {} + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistry registry) { + } + /** + * Protobuf enum {@code ProcedureState} + */ + public enum ProcedureState + implements com.google.protobuf.ProtocolMessageEnum { + /** + * INITIALIZING = 1; + * + *
      +     * Procedure in construction, not yet added to the executor
      +     * 
      + */ + INITIALIZING(0, 1), + /** + * RUNNABLE = 2; + * + *
      +     * Procedure added to the executor, and ready to be executed
      +     * 
      + */ + RUNNABLE(1, 2), + /** + * WAITING = 3; + * + *
      +     * The procedure is waiting on children to be completed
      +     * 
      + */ + WAITING(2, 3), + /** + * WAITING_TIMEOUT = 4; + * + *
      +     * The procedure is waiting a timout or an external event
      +     * 
      + */ + WAITING_TIMEOUT(3, 4), + /** + * ROLLEDBACK = 5; + * + *
      +     * The procedure failed and was rolledback
      +     * 
      + */ + ROLLEDBACK(4, 5), + /** + * FINISHED = 6; + * + *
      +     * The procedure execution is completed. may need a rollback if failed.
      +     * 
      + */ + FINISHED(5, 6), + ; + + /** + * INITIALIZING = 1; + * + *
      +     * Procedure in construction, not yet added to the executor
      +     * 
      + */ + public static final int INITIALIZING_VALUE = 1; + /** + * RUNNABLE = 2; + * + *
      +     * Procedure added to the executor, and ready to be executed
      +     * 
      + */ + public static final int RUNNABLE_VALUE = 2; + /** + * WAITING = 3; + * + *
      +     * The procedure is waiting on children to be completed
      +     * 
      + */ + public static final int WAITING_VALUE = 3; + /** + * WAITING_TIMEOUT = 4; + * + *
      +     * The procedure is waiting a timout or an external event
      +     * 
      + */ + public static final int WAITING_TIMEOUT_VALUE = 4; + /** + * ROLLEDBACK = 5; + * + *
      +     * The procedure failed and was rolledback
      +     * 
      + */ + public static final int ROLLEDBACK_VALUE = 5; + /** + * FINISHED = 6; + * + *
      +     * The procedure execution is completed. may need a rollback if failed.
      +     * 
      + */ + public static final int FINISHED_VALUE = 6; + + + public final int getNumber() { return value; } + + public static ProcedureState valueOf(int value) { + switch (value) { + case 1: return INITIALIZING; + case 2: return RUNNABLE; + case 3: return WAITING; + case 4: return WAITING_TIMEOUT; + case 5: return ROLLEDBACK; + case 6: return FINISHED; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public ProcedureState findValueByNumber(int number) { + return ProcedureState.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.getDescriptor().getEnumTypes().get(0); + } + + private static final ProcedureState[] VALUES = values(); + + public static ProcedureState valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private ProcedureState(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:ProcedureState) + } + + public interface ProcedureOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string class_name = 1; + /** + * required string class_name = 1; + * + *
      +     * internal "static" state
      +     * 
      + */ + boolean hasClassName(); + /** + * required string class_name = 1; + * + *
      +     * internal "static" state
      +     * 
      + */ + java.lang.String getClassName(); + /** + * required string class_name = 1; + * + *
      +     * internal "static" state
      +     * 
      + */ + com.google.protobuf.ByteString + getClassNameBytes(); + + // optional uint64 parent_id = 2; + /** + * optional uint64 parent_id = 2; + * + *
      +     * parent if not a root-procedure otherwise not set
      +     * 
      + */ + boolean hasParentId(); + /** + * optional uint64 parent_id = 2; + * + *
      +     * parent if not a root-procedure otherwise not set
      +     * 
      + */ + long getParentId(); + + // required uint64 proc_id = 3; + /** + * required uint64 proc_id = 3; + */ + boolean hasProcId(); + /** + * required uint64 proc_id = 3; + */ + long getProcId(); + + // required uint64 start_time = 4; + /** + * required uint64 start_time = 4; + */ + boolean hasStartTime(); + /** + * required uint64 start_time = 4; + */ + long getStartTime(); + + // optional string owner = 5; + /** + * optional string owner = 5; + */ + boolean hasOwner(); + /** + * optional string owner = 5; + */ + java.lang.String getOwner(); + /** + * optional string owner = 5; + */ + com.google.protobuf.ByteString + getOwnerBytes(); + + // required .ProcedureState state = 6; + /** + * required .ProcedureState state = 6; + * + *
      +     * internal "runtime" state
      +     * 
      + */ + boolean hasState(); + /** + * required .ProcedureState state = 6; + * + *
      +     * internal "runtime" state
      +     * 
      + */ + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureState getState(); + + // repeated uint32 stack_id = 7; + /** + * repeated uint32 stack_id = 7; + * + *
      +     * stack indices in case the procedure was running
      +     * 
      + */ + java.util.List getStackIdList(); + /** + * repeated uint32 stack_id = 7; + * + *
      +     * stack indices in case the procedure was running
      +     * 
      + */ + int getStackIdCount(); + /** + * repeated uint32 stack_id = 7; + * + *
      +     * stack indices in case the procedure was running
      +     * 
      + */ + int getStackId(int index); + + // required uint64 last_update = 8; + /** + * required uint64 last_update = 8; + */ + boolean hasLastUpdate(); + /** + * required uint64 last_update = 8; + */ + long getLastUpdate(); + + // optional uint32 timeout = 9; + /** + * optional uint32 timeout = 9; + */ + boolean hasTimeout(); + /** + * optional uint32 timeout = 9; + */ + int getTimeout(); + + // optional .ForeignExceptionMessage exception = 10; + /** + * optional .ForeignExceptionMessage exception = 10; + * + *
      +     * user state/results
      +     * 
      + */ + boolean hasException(); + /** + * optional .ForeignExceptionMessage exception = 10; + * + *
      +     * user state/results
      +     * 
      + */ + org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage getException(); + /** + * optional .ForeignExceptionMessage exception = 10; + * + *
      +     * user state/results
      +     * 
      + */ + org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessageOrBuilder getExceptionOrBuilder(); + + // optional bytes result = 11; + /** + * optional bytes result = 11; + * + *
      +     * opaque (user) result structure
      +     * 
      + */ + boolean hasResult(); + /** + * optional bytes result = 11; + * + *
      +     * opaque (user) result structure
      +     * 
      + */ + com.google.protobuf.ByteString getResult(); + + // optional bytes state_data = 12; + /** + * optional bytes state_data = 12; + * + *
      +     * opaque (user) procedure internal-state
      +     * 
      + */ + boolean hasStateData(); + /** + * optional bytes state_data = 12; + * + *
      +     * opaque (user) procedure internal-state
      +     * 
      + */ + com.google.protobuf.ByteString getStateData(); + } + /** + * Protobuf type {@code Procedure} + * + *
      +   **
      +   * Procedure metadata, serialized by the ProcedureStore to be able to recover the old state.
      +   * 
      + */ + public static final class Procedure extends + com.google.protobuf.GeneratedMessage + implements ProcedureOrBuilder { + // Use Procedure.newBuilder() to construct. + private Procedure(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private Procedure(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final Procedure defaultInstance; + public static Procedure getDefaultInstance() { + return defaultInstance; + } + + public Procedure getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private Procedure( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + className_ = input.readBytes(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + parentId_ = input.readUInt64(); + break; + } + case 24: { + bitField0_ |= 0x00000004; + procId_ = input.readUInt64(); + break; + } + case 32: { + bitField0_ |= 0x00000008; + startTime_ = input.readUInt64(); + break; + } + case 42: { + bitField0_ |= 0x00000010; + owner_ = input.readBytes(); + break; + } + case 48: { + int rawValue = input.readEnum(); + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureState value = org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureState.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(6, rawValue); + } else { + bitField0_ |= 0x00000020; + state_ = value; + } + break; + } + case 56: { + if (!((mutable_bitField0_ & 0x00000040) == 0x00000040)) { + stackId_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000040; + } + stackId_.add(input.readUInt32()); + break; + } + case 58: { + int length = input.readRawVarint32(); + int limit = input.pushLimit(length); + if (!((mutable_bitField0_ & 0x00000040) == 0x00000040) && input.getBytesUntilLimit() > 0) { + stackId_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000040; + } + while (input.getBytesUntilLimit() > 0) { + stackId_.add(input.readUInt32()); + } + input.popLimit(limit); + break; + } + case 64: { + bitField0_ |= 0x00000040; + lastUpdate_ = input.readUInt64(); + break; + } + case 72: { + bitField0_ |= 0x00000080; + timeout_ = input.readUInt32(); + break; + } + case 82: { + org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.Builder subBuilder = null; + if (((bitField0_ & 0x00000100) == 0x00000100)) { + subBuilder = exception_.toBuilder(); + } + exception_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(exception_); + exception_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000100; + break; + } + case 90: { + bitField0_ |= 0x00000200; + result_ = input.readBytes(); + break; + } + case 98: { + bitField0_ |= 0x00000400; + stateData_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000040) == 0x00000040)) { + stackId_ = java.util.Collections.unmodifiableList(stackId_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_Procedure_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_Procedure_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.class, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public Procedure parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Procedure(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string class_name = 1; + public static final int CLASS_NAME_FIELD_NUMBER = 1; + private java.lang.Object className_; + /** + * required string class_name = 1; + * + *
      +     * internal "static" state
      +     * 
      + */ + public boolean hasClassName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string class_name = 1; + * + *
      +     * internal "static" state
      +     * 
      + */ + public java.lang.String getClassName() { + java.lang.Object ref = className_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + className_ = s; + } + return s; + } + } + /** + * required string class_name = 1; + * + *
      +     * internal "static" state
      +     * 
      + */ + public com.google.protobuf.ByteString + getClassNameBytes() { + java.lang.Object ref = className_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + className_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional uint64 parent_id = 2; + public static final int PARENT_ID_FIELD_NUMBER = 2; + private long parentId_; + /** + * optional uint64 parent_id = 2; + * + *
      +     * parent if not a root-procedure otherwise not set
      +     * 
      + */ + public boolean hasParentId() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional uint64 parent_id = 2; + * + *
      +     * parent if not a root-procedure otherwise not set
      +     * 
      + */ + public long getParentId() { + return parentId_; + } + + // required uint64 proc_id = 3; + public static final int PROC_ID_FIELD_NUMBER = 3; + private long procId_; + /** + * required uint64 proc_id = 3; + */ + public boolean hasProcId() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required uint64 proc_id = 3; + */ + public long getProcId() { + return procId_; + } + + // required uint64 start_time = 4; + public static final int START_TIME_FIELD_NUMBER = 4; + private long startTime_; + /** + * required uint64 start_time = 4; + */ + public boolean hasStartTime() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * required uint64 start_time = 4; + */ + public long getStartTime() { + return startTime_; + } + + // optional string owner = 5; + public static final int OWNER_FIELD_NUMBER = 5; + private java.lang.Object owner_; + /** + * optional string owner = 5; + */ + public boolean hasOwner() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional string owner = 5; + */ + public java.lang.String getOwner() { + java.lang.Object ref = owner_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + owner_ = s; + } + return s; + } + } + /** + * optional string owner = 5; + */ + public com.google.protobuf.ByteString + getOwnerBytes() { + java.lang.Object ref = owner_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + owner_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required .ProcedureState state = 6; + public static final int STATE_FIELD_NUMBER = 6; + private org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureState state_; + /** + * required .ProcedureState state = 6; + * + *
      +     * internal "runtime" state
      +     * 
      + */ + public boolean hasState() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + /** + * required .ProcedureState state = 6; + * + *
      +     * internal "runtime" state
      +     * 
      + */ + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureState getState() { + return state_; + } + + // repeated uint32 stack_id = 7; + public static final int STACK_ID_FIELD_NUMBER = 7; + private java.util.List stackId_; + /** + * repeated uint32 stack_id = 7; + * + *
      +     * stack indices in case the procedure was running
      +     * 
      + */ + public java.util.List + getStackIdList() { + return stackId_; + } + /** + * repeated uint32 stack_id = 7; + * + *
      +     * stack indices in case the procedure was running
      +     * 
      + */ + public int getStackIdCount() { + return stackId_.size(); + } + /** + * repeated uint32 stack_id = 7; + * + *
      +     * stack indices in case the procedure was running
      +     * 
      + */ + public int getStackId(int index) { + return stackId_.get(index); + } + + // required uint64 last_update = 8; + public static final int LAST_UPDATE_FIELD_NUMBER = 8; + private long lastUpdate_; + /** + * required uint64 last_update = 8; + */ + public boolean hasLastUpdate() { + return ((bitField0_ & 0x00000040) == 0x00000040); + } + /** + * required uint64 last_update = 8; + */ + public long getLastUpdate() { + return lastUpdate_; + } + + // optional uint32 timeout = 9; + public static final int TIMEOUT_FIELD_NUMBER = 9; + private int timeout_; + /** + * optional uint32 timeout = 9; + */ + public boolean hasTimeout() { + return ((bitField0_ & 0x00000080) == 0x00000080); + } + /** + * optional uint32 timeout = 9; + */ + public int getTimeout() { + return timeout_; + } + + // optional .ForeignExceptionMessage exception = 10; + public static final int EXCEPTION_FIELD_NUMBER = 10; + private org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage exception_; + /** + * optional .ForeignExceptionMessage exception = 10; + * + *
      +     * user state/results
      +     * 
      + */ + public boolean hasException() { + return ((bitField0_ & 0x00000100) == 0x00000100); + } + /** + * optional .ForeignExceptionMessage exception = 10; + * + *
      +     * user state/results
      +     * 
      + */ + public org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage getException() { + return exception_; + } + /** + * optional .ForeignExceptionMessage exception = 10; + * + *
      +     * user state/results
      +     * 
      + */ + public org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessageOrBuilder getExceptionOrBuilder() { + return exception_; + } + + // optional bytes result = 11; + public static final int RESULT_FIELD_NUMBER = 11; + private com.google.protobuf.ByteString result_; + /** + * optional bytes result = 11; + * + *
      +     * opaque (user) result structure
      +     * 
      + */ + public boolean hasResult() { + return ((bitField0_ & 0x00000200) == 0x00000200); + } + /** + * optional bytes result = 11; + * + *
      +     * opaque (user) result structure
      +     * 
      + */ + public com.google.protobuf.ByteString getResult() { + return result_; + } + + // optional bytes state_data = 12; + public static final int STATE_DATA_FIELD_NUMBER = 12; + private com.google.protobuf.ByteString stateData_; + /** + * optional bytes state_data = 12; + * + *
      +     * opaque (user) procedure internal-state
      +     * 
      + */ + public boolean hasStateData() { + return ((bitField0_ & 0x00000400) == 0x00000400); + } + /** + * optional bytes state_data = 12; + * + *
      +     * opaque (user) procedure internal-state
      +     * 
      + */ + public com.google.protobuf.ByteString getStateData() { + return stateData_; + } + + private void initFields() { + className_ = ""; + parentId_ = 0L; + procId_ = 0L; + startTime_ = 0L; + owner_ = ""; + state_ = org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureState.INITIALIZING; + stackId_ = java.util.Collections.emptyList(); + lastUpdate_ = 0L; + timeout_ = 0; + exception_ = org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.getDefaultInstance(); + result_ = com.google.protobuf.ByteString.EMPTY; + stateData_ = com.google.protobuf.ByteString.EMPTY; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasClassName()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasProcId()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasStartTime()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasState()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasLastUpdate()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getClassNameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeUInt64(2, parentId_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeUInt64(3, procId_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeUInt64(4, startTime_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeBytes(5, getOwnerBytes()); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + output.writeEnum(6, state_.getNumber()); + } + for (int i = 0; i < stackId_.size(); i++) { + output.writeUInt32(7, stackId_.get(i)); + } + if (((bitField0_ & 0x00000040) == 0x00000040)) { + output.writeUInt64(8, lastUpdate_); + } + if (((bitField0_ & 0x00000080) == 0x00000080)) { + output.writeUInt32(9, timeout_); + } + if (((bitField0_ & 0x00000100) == 0x00000100)) { + output.writeMessage(10, exception_); + } + if (((bitField0_ & 0x00000200) == 0x00000200)) { + output.writeBytes(11, result_); + } + if (((bitField0_ & 0x00000400) == 0x00000400)) { + output.writeBytes(12, stateData_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getClassNameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(2, parentId_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(3, procId_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(4, startTime_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(5, getOwnerBytes()); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(6, state_.getNumber()); + } + { + int dataSize = 0; + for (int i = 0; i < stackId_.size(); i++) { + dataSize += com.google.protobuf.CodedOutputStream + .computeUInt32SizeNoTag(stackId_.get(i)); + } + size += dataSize; + size += 1 * getStackIdList().size(); + } + if (((bitField0_ & 0x00000040) == 0x00000040)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(8, lastUpdate_); + } + if (((bitField0_ & 0x00000080) == 0x00000080)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(9, timeout_); + } + if (((bitField0_ & 0x00000100) == 0x00000100)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(10, exception_); + } + if (((bitField0_ & 0x00000200) == 0x00000200)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(11, result_); + } + if (((bitField0_ & 0x00000400) == 0x00000400)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(12, stateData_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure other = (org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure) obj; + + boolean result = true; + result = result && (hasClassName() == other.hasClassName()); + if (hasClassName()) { + result = result && getClassName() + .equals(other.getClassName()); + } + result = result && (hasParentId() == other.hasParentId()); + if (hasParentId()) { + result = result && (getParentId() + == other.getParentId()); + } + result = result && (hasProcId() == other.hasProcId()); + if (hasProcId()) { + result = result && (getProcId() + == other.getProcId()); + } + result = result && (hasStartTime() == other.hasStartTime()); + if (hasStartTime()) { + result = result && (getStartTime() + == other.getStartTime()); + } + result = result && (hasOwner() == other.hasOwner()); + if (hasOwner()) { + result = result && getOwner() + .equals(other.getOwner()); + } + result = result && (hasState() == other.hasState()); + if (hasState()) { + result = result && + (getState() == other.getState()); + } + result = result && getStackIdList() + .equals(other.getStackIdList()); + result = result && (hasLastUpdate() == other.hasLastUpdate()); + if (hasLastUpdate()) { + result = result && (getLastUpdate() + == other.getLastUpdate()); + } + result = result && (hasTimeout() == other.hasTimeout()); + if (hasTimeout()) { + result = result && (getTimeout() + == other.getTimeout()); + } + result = result && (hasException() == other.hasException()); + if (hasException()) { + result = result && getException() + .equals(other.getException()); + } + result = result && (hasResult() == other.hasResult()); + if (hasResult()) { + result = result && getResult() + .equals(other.getResult()); + } + result = result && (hasStateData() == other.hasStateData()); + if (hasStateData()) { + result = result && getStateData() + .equals(other.getStateData()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasClassName()) { + hash = (37 * hash) + CLASS_NAME_FIELD_NUMBER; + hash = (53 * hash) + getClassName().hashCode(); + } + if (hasParentId()) { + hash = (37 * hash) + PARENT_ID_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getParentId()); + } + if (hasProcId()) { + hash = (37 * hash) + PROC_ID_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getProcId()); + } + if (hasStartTime()) { + hash = (37 * hash) + START_TIME_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getStartTime()); + } + if (hasOwner()) { + hash = (37 * hash) + OWNER_FIELD_NUMBER; + hash = (53 * hash) + getOwner().hashCode(); + } + if (hasState()) { + hash = (37 * hash) + STATE_FIELD_NUMBER; + hash = (53 * hash) + hashEnum(getState()); + } + if (getStackIdCount() > 0) { + hash = (37 * hash) + STACK_ID_FIELD_NUMBER; + hash = (53 * hash) + getStackIdList().hashCode(); + } + if (hasLastUpdate()) { + hash = (37 * hash) + LAST_UPDATE_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getLastUpdate()); + } + if (hasTimeout()) { + hash = (37 * hash) + TIMEOUT_FIELD_NUMBER; + hash = (53 * hash) + getTimeout(); + } + if (hasException()) { + hash = (37 * hash) + EXCEPTION_FIELD_NUMBER; + hash = (53 * hash) + getException().hashCode(); + } + if (hasResult()) { + hash = (37 * hash) + RESULT_FIELD_NUMBER; + hash = (53 * hash) + getResult().hashCode(); + } + if (hasStateData()) { + hash = (37 * hash) + STATE_DATA_FIELD_NUMBER; + hash = (53 * hash) + getStateData().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code Procedure} + * + *
      +     **
      +     * Procedure metadata, serialized by the ProcedureStore to be able to recover the old state.
      +     * 
      + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_Procedure_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_Procedure_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.class, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getExceptionFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + className_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + parentId_ = 0L; + bitField0_ = (bitField0_ & ~0x00000002); + procId_ = 0L; + bitField0_ = (bitField0_ & ~0x00000004); + startTime_ = 0L; + bitField0_ = (bitField0_ & ~0x00000008); + owner_ = ""; + bitField0_ = (bitField0_ & ~0x00000010); + state_ = org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureState.INITIALIZING; + bitField0_ = (bitField0_ & ~0x00000020); + stackId_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000040); + lastUpdate_ = 0L; + bitField0_ = (bitField0_ & ~0x00000080); + timeout_ = 0; + bitField0_ = (bitField0_ & ~0x00000100); + if (exceptionBuilder_ == null) { + exception_ = org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.getDefaultInstance(); + } else { + exceptionBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000200); + result_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000400); + stateData_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000800); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_Procedure_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure build() { + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure result = new org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.className_ = className_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.parentId_ = parentId_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.procId_ = procId_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + result.startTime_ = startTime_; + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000010; + } + result.owner_ = owner_; + if (((from_bitField0_ & 0x00000020) == 0x00000020)) { + to_bitField0_ |= 0x00000020; + } + result.state_ = state_; + if (((bitField0_ & 0x00000040) == 0x00000040)) { + stackId_ = java.util.Collections.unmodifiableList(stackId_); + bitField0_ = (bitField0_ & ~0x00000040); + } + result.stackId_ = stackId_; + if (((from_bitField0_ & 0x00000080) == 0x00000080)) { + to_bitField0_ |= 0x00000040; + } + result.lastUpdate_ = lastUpdate_; + if (((from_bitField0_ & 0x00000100) == 0x00000100)) { + to_bitField0_ |= 0x00000080; + } + result.timeout_ = timeout_; + if (((from_bitField0_ & 0x00000200) == 0x00000200)) { + to_bitField0_ |= 0x00000100; + } + if (exceptionBuilder_ == null) { + result.exception_ = exception_; + } else { + result.exception_ = exceptionBuilder_.build(); + } + if (((from_bitField0_ & 0x00000400) == 0x00000400)) { + to_bitField0_ |= 0x00000200; + } + result.result_ = result_; + if (((from_bitField0_ & 0x00000800) == 0x00000800)) { + to_bitField0_ |= 0x00000400; + } + result.stateData_ = stateData_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.getDefaultInstance()) return this; + if (other.hasClassName()) { + bitField0_ |= 0x00000001; + className_ = other.className_; + onChanged(); + } + if (other.hasParentId()) { + setParentId(other.getParentId()); + } + if (other.hasProcId()) { + setProcId(other.getProcId()); + } + if (other.hasStartTime()) { + setStartTime(other.getStartTime()); + } + if (other.hasOwner()) { + bitField0_ |= 0x00000010; + owner_ = other.owner_; + onChanged(); + } + if (other.hasState()) { + setState(other.getState()); + } + if (!other.stackId_.isEmpty()) { + if (stackId_.isEmpty()) { + stackId_ = other.stackId_; + bitField0_ = (bitField0_ & ~0x00000040); + } else { + ensureStackIdIsMutable(); + stackId_.addAll(other.stackId_); + } + onChanged(); + } + if (other.hasLastUpdate()) { + setLastUpdate(other.getLastUpdate()); + } + if (other.hasTimeout()) { + setTimeout(other.getTimeout()); + } + if (other.hasException()) { + mergeException(other.getException()); + } + if (other.hasResult()) { + setResult(other.getResult()); + } + if (other.hasStateData()) { + setStateData(other.getStateData()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasClassName()) { + + return false; + } + if (!hasProcId()) { + + return false; + } + if (!hasStartTime()) { + + return false; + } + if (!hasState()) { + + return false; + } + if (!hasLastUpdate()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string class_name = 1; + private java.lang.Object className_ = ""; + /** + * required string class_name = 1; + * + *
      +       * internal "static" state
      +       * 
      + */ + public boolean hasClassName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string class_name = 1; + * + *
      +       * internal "static" state
      +       * 
      + */ + public java.lang.String getClassName() { + java.lang.Object ref = className_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + className_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string class_name = 1; + * + *
      +       * internal "static" state
      +       * 
      + */ + public com.google.protobuf.ByteString + getClassNameBytes() { + java.lang.Object ref = className_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + className_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string class_name = 1; + * + *
      +       * internal "static" state
      +       * 
      + */ + public Builder setClassName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + className_ = value; + onChanged(); + return this; + } + /** + * required string class_name = 1; + * + *
      +       * internal "static" state
      +       * 
      + */ + public Builder clearClassName() { + bitField0_ = (bitField0_ & ~0x00000001); + className_ = getDefaultInstance().getClassName(); + onChanged(); + return this; + } + /** + * required string class_name = 1; + * + *
      +       * internal "static" state
      +       * 
      + */ + public Builder setClassNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + className_ = value; + onChanged(); + return this; + } + + // optional uint64 parent_id = 2; + private long parentId_ ; + /** + * optional uint64 parent_id = 2; + * + *
      +       * parent if not a root-procedure otherwise not set
      +       * 
      + */ + public boolean hasParentId() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional uint64 parent_id = 2; + * + *
      +       * parent if not a root-procedure otherwise not set
      +       * 
      + */ + public long getParentId() { + return parentId_; + } + /** + * optional uint64 parent_id = 2; + * + *
      +       * parent if not a root-procedure otherwise not set
      +       * 
      + */ + public Builder setParentId(long value) { + bitField0_ |= 0x00000002; + parentId_ = value; + onChanged(); + return this; + } + /** + * optional uint64 parent_id = 2; + * + *
      +       * parent if not a root-procedure otherwise not set
      +       * 
      + */ + public Builder clearParentId() { + bitField0_ = (bitField0_ & ~0x00000002); + parentId_ = 0L; + onChanged(); + return this; + } + + // required uint64 proc_id = 3; + private long procId_ ; + /** + * required uint64 proc_id = 3; + */ + public boolean hasProcId() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required uint64 proc_id = 3; + */ + public long getProcId() { + return procId_; + } + /** + * required uint64 proc_id = 3; + */ + public Builder setProcId(long value) { + bitField0_ |= 0x00000004; + procId_ = value; + onChanged(); + return this; + } + /** + * required uint64 proc_id = 3; + */ + public Builder clearProcId() { + bitField0_ = (bitField0_ & ~0x00000004); + procId_ = 0L; + onChanged(); + return this; + } + + // required uint64 start_time = 4; + private long startTime_ ; + /** + * required uint64 start_time = 4; + */ + public boolean hasStartTime() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * required uint64 start_time = 4; + */ + public long getStartTime() { + return startTime_; + } + /** + * required uint64 start_time = 4; + */ + public Builder setStartTime(long value) { + bitField0_ |= 0x00000008; + startTime_ = value; + onChanged(); + return this; + } + /** + * required uint64 start_time = 4; + */ + public Builder clearStartTime() { + bitField0_ = (bitField0_ & ~0x00000008); + startTime_ = 0L; + onChanged(); + return this; + } + + // optional string owner = 5; + private java.lang.Object owner_ = ""; + /** + * optional string owner = 5; + */ + public boolean hasOwner() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional string owner = 5; + */ + public java.lang.String getOwner() { + java.lang.Object ref = owner_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + owner_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string owner = 5; + */ + public com.google.protobuf.ByteString + getOwnerBytes() { + java.lang.Object ref = owner_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + owner_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string owner = 5; + */ + public Builder setOwner( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000010; + owner_ = value; + onChanged(); + return this; + } + /** + * optional string owner = 5; + */ + public Builder clearOwner() { + bitField0_ = (bitField0_ & ~0x00000010); + owner_ = getDefaultInstance().getOwner(); + onChanged(); + return this; + } + /** + * optional string owner = 5; + */ + public Builder setOwnerBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000010; + owner_ = value; + onChanged(); + return this; + } + + // required .ProcedureState state = 6; + private org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureState state_ = org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureState.INITIALIZING; + /** + * required .ProcedureState state = 6; + * + *
      +       * internal "runtime" state
      +       * 
      + */ + public boolean hasState() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + /** + * required .ProcedureState state = 6; + * + *
      +       * internal "runtime" state
      +       * 
      + */ + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureState getState() { + return state_; + } + /** + * required .ProcedureState state = 6; + * + *
      +       * internal "runtime" state
      +       * 
      + */ + public Builder setState(org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureState value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000020; + state_ = value; + onChanged(); + return this; + } + /** + * required .ProcedureState state = 6; + * + *
      +       * internal "runtime" state
      +       * 
      + */ + public Builder clearState() { + bitField0_ = (bitField0_ & ~0x00000020); + state_ = org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureState.INITIALIZING; + onChanged(); + return this; + } + + // repeated uint32 stack_id = 7; + private java.util.List stackId_ = java.util.Collections.emptyList(); + private void ensureStackIdIsMutable() { + if (!((bitField0_ & 0x00000040) == 0x00000040)) { + stackId_ = new java.util.ArrayList(stackId_); + bitField0_ |= 0x00000040; + } + } + /** + * repeated uint32 stack_id = 7; + * + *
      +       * stack indices in case the procedure was running
      +       * 
      + */ + public java.util.List + getStackIdList() { + return java.util.Collections.unmodifiableList(stackId_); + } + /** + * repeated uint32 stack_id = 7; + * + *
      +       * stack indices in case the procedure was running
      +       * 
      + */ + public int getStackIdCount() { + return stackId_.size(); + } + /** + * repeated uint32 stack_id = 7; + * + *
      +       * stack indices in case the procedure was running
      +       * 
      + */ + public int getStackId(int index) { + return stackId_.get(index); + } + /** + * repeated uint32 stack_id = 7; + * + *
      +       * stack indices in case the procedure was running
      +       * 
      + */ + public Builder setStackId( + int index, int value) { + ensureStackIdIsMutable(); + stackId_.set(index, value); + onChanged(); + return this; + } + /** + * repeated uint32 stack_id = 7; + * + *
      +       * stack indices in case the procedure was running
      +       * 
      + */ + public Builder addStackId(int value) { + ensureStackIdIsMutable(); + stackId_.add(value); + onChanged(); + return this; + } + /** + * repeated uint32 stack_id = 7; + * + *
      +       * stack indices in case the procedure was running
      +       * 
      + */ + public Builder addAllStackId( + java.lang.Iterable values) { + ensureStackIdIsMutable(); + super.addAll(values, stackId_); + onChanged(); + return this; + } + /** + * repeated uint32 stack_id = 7; + * + *
      +       * stack indices in case the procedure was running
      +       * 
      + */ + public Builder clearStackId() { + stackId_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000040); + onChanged(); + return this; + } + + // required uint64 last_update = 8; + private long lastUpdate_ ; + /** + * required uint64 last_update = 8; + */ + public boolean hasLastUpdate() { + return ((bitField0_ & 0x00000080) == 0x00000080); + } + /** + * required uint64 last_update = 8; + */ + public long getLastUpdate() { + return lastUpdate_; + } + /** + * required uint64 last_update = 8; + */ + public Builder setLastUpdate(long value) { + bitField0_ |= 0x00000080; + lastUpdate_ = value; + onChanged(); + return this; + } + /** + * required uint64 last_update = 8; + */ + public Builder clearLastUpdate() { + bitField0_ = (bitField0_ & ~0x00000080); + lastUpdate_ = 0L; + onChanged(); + return this; + } + + // optional uint32 timeout = 9; + private int timeout_ ; + /** + * optional uint32 timeout = 9; + */ + public boolean hasTimeout() { + return ((bitField0_ & 0x00000100) == 0x00000100); + } + /** + * optional uint32 timeout = 9; + */ + public int getTimeout() { + return timeout_; + } + /** + * optional uint32 timeout = 9; + */ + public Builder setTimeout(int value) { + bitField0_ |= 0x00000100; + timeout_ = value; + onChanged(); + return this; + } + /** + * optional uint32 timeout = 9; + */ + public Builder clearTimeout() { + bitField0_ = (bitField0_ & ~0x00000100); + timeout_ = 0; + onChanged(); + return this; + } + + // optional .ForeignExceptionMessage exception = 10; + private org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage exception_ = org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage, org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.Builder, org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessageOrBuilder> exceptionBuilder_; + /** + * optional .ForeignExceptionMessage exception = 10; + * + *
      +       * user state/results
      +       * 
      + */ + public boolean hasException() { + return ((bitField0_ & 0x00000200) == 0x00000200); + } + /** + * optional .ForeignExceptionMessage exception = 10; + * + *
      +       * user state/results
      +       * 
      + */ + public org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage getException() { + if (exceptionBuilder_ == null) { + return exception_; + } else { + return exceptionBuilder_.getMessage(); + } + } + /** + * optional .ForeignExceptionMessage exception = 10; + * + *
      +       * user state/results
      +       * 
      + */ + public Builder setException(org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage value) { + if (exceptionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + exception_ = value; + onChanged(); + } else { + exceptionBuilder_.setMessage(value); + } + bitField0_ |= 0x00000200; + return this; + } + /** + * optional .ForeignExceptionMessage exception = 10; + * + *
      +       * user state/results
      +       * 
      + */ + public Builder setException( + org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.Builder builderForValue) { + if (exceptionBuilder_ == null) { + exception_ = builderForValue.build(); + onChanged(); + } else { + exceptionBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000200; + return this; + } + /** + * optional .ForeignExceptionMessage exception = 10; + * + *
      +       * user state/results
      +       * 
      + */ + public Builder mergeException(org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage value) { + if (exceptionBuilder_ == null) { + if (((bitField0_ & 0x00000200) == 0x00000200) && + exception_ != org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.getDefaultInstance()) { + exception_ = + org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.newBuilder(exception_).mergeFrom(value).buildPartial(); + } else { + exception_ = value; + } + onChanged(); + } else { + exceptionBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000200; + return this; + } + /** + * optional .ForeignExceptionMessage exception = 10; + * + *
      +       * user state/results
      +       * 
      + */ + public Builder clearException() { + if (exceptionBuilder_ == null) { + exception_ = org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.getDefaultInstance(); + onChanged(); + } else { + exceptionBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000200); + return this; + } + /** + * optional .ForeignExceptionMessage exception = 10; + * + *
      +       * user state/results
      +       * 
      + */ + public org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.Builder getExceptionBuilder() { + bitField0_ |= 0x00000200; + onChanged(); + return getExceptionFieldBuilder().getBuilder(); + } + /** + * optional .ForeignExceptionMessage exception = 10; + * + *
      +       * user state/results
      +       * 
      + */ + public org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessageOrBuilder getExceptionOrBuilder() { + if (exceptionBuilder_ != null) { + return exceptionBuilder_.getMessageOrBuilder(); + } else { + return exception_; + } + } + /** + * optional .ForeignExceptionMessage exception = 10; + * + *
      +       * user state/results
      +       * 
      + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage, org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.Builder, org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessageOrBuilder> + getExceptionFieldBuilder() { + if (exceptionBuilder_ == null) { + exceptionBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage, org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.Builder, org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessageOrBuilder>( + exception_, + getParentForChildren(), + isClean()); + exception_ = null; + } + return exceptionBuilder_; + } + + // optional bytes result = 11; + private com.google.protobuf.ByteString result_ = com.google.protobuf.ByteString.EMPTY; + /** + * optional bytes result = 11; + * + *
      +       * opaque (user) result structure
      +       * 
      + */ + public boolean hasResult() { + return ((bitField0_ & 0x00000400) == 0x00000400); + } + /** + * optional bytes result = 11; + * + *
      +       * opaque (user) result structure
      +       * 
      + */ + public com.google.protobuf.ByteString getResult() { + return result_; + } + /** + * optional bytes result = 11; + * + *
      +       * opaque (user) result structure
      +       * 
      + */ + public Builder setResult(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000400; + result_ = value; + onChanged(); + return this; + } + /** + * optional bytes result = 11; + * + *
      +       * opaque (user) result structure
      +       * 
      + */ + public Builder clearResult() { + bitField0_ = (bitField0_ & ~0x00000400); + result_ = getDefaultInstance().getResult(); + onChanged(); + return this; + } + + // optional bytes state_data = 12; + private com.google.protobuf.ByteString stateData_ = com.google.protobuf.ByteString.EMPTY; + /** + * optional bytes state_data = 12; + * + *
      +       * opaque (user) procedure internal-state
      +       * 
      + */ + public boolean hasStateData() { + return ((bitField0_ & 0x00000800) == 0x00000800); + } + /** + * optional bytes state_data = 12; + * + *
      +       * opaque (user) procedure internal-state
      +       * 
      + */ + public com.google.protobuf.ByteString getStateData() { + return stateData_; + } + /** + * optional bytes state_data = 12; + * + *
      +       * opaque (user) procedure internal-state
      +       * 
      + */ + public Builder setStateData(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000800; + stateData_ = value; + onChanged(); + return this; + } + /** + * optional bytes state_data = 12; + * + *
      +       * opaque (user) procedure internal-state
      +       * 
      + */ + public Builder clearStateData() { + bitField0_ = (bitField0_ & ~0x00000800); + stateData_ = getDefaultInstance().getStateData(); + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:Procedure) + } + + static { + defaultInstance = new Procedure(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:Procedure) + } + + public interface SequentialProcedureDataOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required bool executed = 1; + /** + * required bool executed = 1; + */ + boolean hasExecuted(); + /** + * required bool executed = 1; + */ + boolean getExecuted(); + } + /** + * Protobuf type {@code SequentialProcedureData} + * + *
      +   **
      +   * SequentialProcedure data
      +   * 
      + */ + public static final class SequentialProcedureData extends + com.google.protobuf.GeneratedMessage + implements SequentialProcedureDataOrBuilder { + // Use SequentialProcedureData.newBuilder() to construct. + private SequentialProcedureData(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private SequentialProcedureData(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final SequentialProcedureData defaultInstance; + public static SequentialProcedureData getDefaultInstance() { + return defaultInstance; + } + + public SequentialProcedureData getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private SequentialProcedureData( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + executed_ = input.readBool(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_SequentialProcedureData_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_SequentialProcedureData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData.class, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public SequentialProcedureData parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new SequentialProcedureData(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required bool executed = 1; + public static final int EXECUTED_FIELD_NUMBER = 1; + private boolean executed_; + /** + * required bool executed = 1; + */ + public boolean hasExecuted() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bool executed = 1; + */ + public boolean getExecuted() { + return executed_; + } + + private void initFields() { + executed_ = false; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasExecuted()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBool(1, executed_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(1, executed_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData other = (org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData) obj; + + boolean result = true; + result = result && (hasExecuted() == other.hasExecuted()); + if (hasExecuted()) { + result = result && (getExecuted() + == other.getExecuted()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasExecuted()) { + hash = (37 * hash) + EXECUTED_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getExecuted()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code SequentialProcedureData} + * + *
      +     **
      +     * SequentialProcedure data
      +     * 
      + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureDataOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_SequentialProcedureData_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_SequentialProcedureData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData.class, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + executed_ = false; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_SequentialProcedureData_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData build() { + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData result = new org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.executed_ = executed_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData.getDefaultInstance()) return this; + if (other.hasExecuted()) { + setExecuted(other.getExecuted()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasExecuted()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required bool executed = 1; + private boolean executed_ ; + /** + * required bool executed = 1; + */ + public boolean hasExecuted() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bool executed = 1; + */ + public boolean getExecuted() { + return executed_; + } + /** + * required bool executed = 1; + */ + public Builder setExecuted(boolean value) { + bitField0_ |= 0x00000001; + executed_ = value; + onChanged(); + return this; + } + /** + * required bool executed = 1; + */ + public Builder clearExecuted() { + bitField0_ = (bitField0_ & ~0x00000001); + executed_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:SequentialProcedureData) + } + + static { + defaultInstance = new SequentialProcedureData(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:SequentialProcedureData) + } + + public interface StateMachineProcedureDataOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // repeated uint32 state = 1; + /** + * repeated uint32 state = 1; + */ + java.util.List getStateList(); + /** + * repeated uint32 state = 1; + */ + int getStateCount(); + /** + * repeated uint32 state = 1; + */ + int getState(int index); + } + /** + * Protobuf type {@code StateMachineProcedureData} + * + *
      +   **
      +   * StateMachineProcedure data
      +   * 
      + */ + public static final class StateMachineProcedureData extends + com.google.protobuf.GeneratedMessage + implements StateMachineProcedureDataOrBuilder { + // Use StateMachineProcedureData.newBuilder() to construct. + private StateMachineProcedureData(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private StateMachineProcedureData(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final StateMachineProcedureData defaultInstance; + public static StateMachineProcedureData getDefaultInstance() { + return defaultInstance; + } + + public StateMachineProcedureData getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private StateMachineProcedureData( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + state_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + state_.add(input.readUInt32()); + break; + } + case 10: { + int length = input.readRawVarint32(); + int limit = input.pushLimit(length); + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001) && input.getBytesUntilLimit() > 0) { + state_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + while (input.getBytesUntilLimit() > 0) { + state_.add(input.readUInt32()); + } + input.popLimit(limit); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + state_ = java.util.Collections.unmodifiableList(state_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_StateMachineProcedureData_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_StateMachineProcedureData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData.class, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public StateMachineProcedureData parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new StateMachineProcedureData(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + // repeated uint32 state = 1; + public static final int STATE_FIELD_NUMBER = 1; + private java.util.List state_; + /** + * repeated uint32 state = 1; + */ + public java.util.List + getStateList() { + return state_; + } + /** + * repeated uint32 state = 1; + */ + public int getStateCount() { + return state_.size(); + } + /** + * repeated uint32 state = 1; + */ + public int getState(int index) { + return state_.get(index); + } + + private void initFields() { + state_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + for (int i = 0; i < state_.size(); i++) { + output.writeUInt32(1, state_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + { + int dataSize = 0; + for (int i = 0; i < state_.size(); i++) { + dataSize += com.google.protobuf.CodedOutputStream + .computeUInt32SizeNoTag(state_.get(i)); + } + size += dataSize; + size += 1 * getStateList().size(); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData other = (org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData) obj; + + boolean result = true; + result = result && getStateList() + .equals(other.getStateList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (getStateCount() > 0) { + hash = (37 * hash) + STATE_FIELD_NUMBER; + hash = (53 * hash) + getStateList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code StateMachineProcedureData} + * + *
      +     **
      +     * StateMachineProcedure data
      +     * 
      + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureDataOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_StateMachineProcedureData_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_StateMachineProcedureData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData.class, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + state_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_StateMachineProcedureData_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData build() { + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData result = new org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData(this); + int from_bitField0_ = bitField0_; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + state_ = java.util.Collections.unmodifiableList(state_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.state_ = state_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData.getDefaultInstance()) return this; + if (!other.state_.isEmpty()) { + if (state_.isEmpty()) { + state_ = other.state_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureStateIsMutable(); + state_.addAll(other.state_); + } + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // repeated uint32 state = 1; + private java.util.List state_ = java.util.Collections.emptyList(); + private void ensureStateIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + state_ = new java.util.ArrayList(state_); + bitField0_ |= 0x00000001; + } + } + /** + * repeated uint32 state = 1; + */ + public java.util.List + getStateList() { + return java.util.Collections.unmodifiableList(state_); + } + /** + * repeated uint32 state = 1; + */ + public int getStateCount() { + return state_.size(); + } + /** + * repeated uint32 state = 1; + */ + public int getState(int index) { + return state_.get(index); + } + /** + * repeated uint32 state = 1; + */ + public Builder setState( + int index, int value) { + ensureStateIsMutable(); + state_.set(index, value); + onChanged(); + return this; + } + /** + * repeated uint32 state = 1; + */ + public Builder addState(int value) { + ensureStateIsMutable(); + state_.add(value); + onChanged(); + return this; + } + /** + * repeated uint32 state = 1; + */ + public Builder addAllState( + java.lang.Iterable values) { + ensureStateIsMutable(); + super.addAll(values, state_); + onChanged(); + return this; + } + /** + * repeated uint32 state = 1; + */ + public Builder clearState() { + state_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:StateMachineProcedureData) + } + + static { + defaultInstance = new StateMachineProcedureData(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:StateMachineProcedureData) + } + + public interface ProcedureWALHeaderOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required uint32 version = 1; + /** + * required uint32 version = 1; + */ + boolean hasVersion(); + /** + * required uint32 version = 1; + */ + int getVersion(); + + // required uint32 type = 2; + /** + * required uint32 type = 2; + */ + boolean hasType(); + /** + * required uint32 type = 2; + */ + int getType(); + + // required uint64 log_id = 3; + /** + * required uint64 log_id = 3; + */ + boolean hasLogId(); + /** + * required uint64 log_id = 3; + */ + long getLogId(); + + // required uint64 min_proc_id = 4; + /** + * required uint64 min_proc_id = 4; + */ + boolean hasMinProcId(); + /** + * required uint64 min_proc_id = 4; + */ + long getMinProcId(); + } + /** + * Protobuf type {@code ProcedureWALHeader} + * + *
      +   **
      +   * Procedure WAL header
      +   * 
      + */ + public static final class ProcedureWALHeader extends + com.google.protobuf.GeneratedMessage + implements ProcedureWALHeaderOrBuilder { + // Use ProcedureWALHeader.newBuilder() to construct. + private ProcedureWALHeader(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private ProcedureWALHeader(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final ProcedureWALHeader defaultInstance; + public static ProcedureWALHeader getDefaultInstance() { + return defaultInstance; + } + + public ProcedureWALHeader getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ProcedureWALHeader( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + version_ = input.readUInt32(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + type_ = input.readUInt32(); + break; + } + case 24: { + bitField0_ |= 0x00000004; + logId_ = input.readUInt64(); + break; + } + case 32: { + bitField0_ |= 0x00000008; + minProcId_ = input.readUInt64(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_ProcedureWALHeader_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_ProcedureWALHeader_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader.class, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ProcedureWALHeader parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ProcedureWALHeader(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required uint32 version = 1; + public static final int VERSION_FIELD_NUMBER = 1; + private int version_; + /** + * required uint32 version = 1; + */ + public boolean hasVersion() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required uint32 version = 1; + */ + public int getVersion() { + return version_; + } + + // required uint32 type = 2; + public static final int TYPE_FIELD_NUMBER = 2; + private int type_; + /** + * required uint32 type = 2; + */ + public boolean hasType() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required uint32 type = 2; + */ + public int getType() { + return type_; + } + + // required uint64 log_id = 3; + public static final int LOG_ID_FIELD_NUMBER = 3; + private long logId_; + /** + * required uint64 log_id = 3; + */ + public boolean hasLogId() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required uint64 log_id = 3; + */ + public long getLogId() { + return logId_; + } + + // required uint64 min_proc_id = 4; + public static final int MIN_PROC_ID_FIELD_NUMBER = 4; + private long minProcId_; + /** + * required uint64 min_proc_id = 4; + */ + public boolean hasMinProcId() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * required uint64 min_proc_id = 4; + */ + public long getMinProcId() { + return minProcId_; + } + + private void initFields() { + version_ = 0; + type_ = 0; + logId_ = 0L; + minProcId_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasVersion()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasType()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasLogId()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasMinProcId()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeUInt32(1, version_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeUInt32(2, type_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeUInt64(3, logId_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeUInt64(4, minProcId_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(1, version_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(2, type_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(3, logId_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(4, minProcId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader other = (org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader) obj; + + boolean result = true; + result = result && (hasVersion() == other.hasVersion()); + if (hasVersion()) { + result = result && (getVersion() + == other.getVersion()); + } + result = result && (hasType() == other.hasType()); + if (hasType()) { + result = result && (getType() + == other.getType()); + } + result = result && (hasLogId() == other.hasLogId()); + if (hasLogId()) { + result = result && (getLogId() + == other.getLogId()); + } + result = result && (hasMinProcId() == other.hasMinProcId()); + if (hasMinProcId()) { + result = result && (getMinProcId() + == other.getMinProcId()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasVersion()) { + hash = (37 * hash) + VERSION_FIELD_NUMBER; + hash = (53 * hash) + getVersion(); + } + if (hasType()) { + hash = (37 * hash) + TYPE_FIELD_NUMBER; + hash = (53 * hash) + getType(); + } + if (hasLogId()) { + hash = (37 * hash) + LOG_ID_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getLogId()); + } + if (hasMinProcId()) { + hash = (37 * hash) + MIN_PROC_ID_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getMinProcId()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code ProcedureWALHeader} + * + *
      +     **
      +     * Procedure WAL header
      +     * 
      + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeaderOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_ProcedureWALHeader_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_ProcedureWALHeader_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader.class, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + version_ = 0; + bitField0_ = (bitField0_ & ~0x00000001); + type_ = 0; + bitField0_ = (bitField0_ & ~0x00000002); + logId_ = 0L; + bitField0_ = (bitField0_ & ~0x00000004); + minProcId_ = 0L; + bitField0_ = (bitField0_ & ~0x00000008); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_ProcedureWALHeader_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader build() { + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader result = new org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.version_ = version_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.type_ = type_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.logId_ = logId_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + result.minProcId_ = minProcId_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader.getDefaultInstance()) return this; + if (other.hasVersion()) { + setVersion(other.getVersion()); + } + if (other.hasType()) { + setType(other.getType()); + } + if (other.hasLogId()) { + setLogId(other.getLogId()); + } + if (other.hasMinProcId()) { + setMinProcId(other.getMinProcId()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasVersion()) { + + return false; + } + if (!hasType()) { + + return false; + } + if (!hasLogId()) { + + return false; + } + if (!hasMinProcId()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required uint32 version = 1; + private int version_ ; + /** + * required uint32 version = 1; + */ + public boolean hasVersion() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required uint32 version = 1; + */ + public int getVersion() { + return version_; + } + /** + * required uint32 version = 1; + */ + public Builder setVersion(int value) { + bitField0_ |= 0x00000001; + version_ = value; + onChanged(); + return this; + } + /** + * required uint32 version = 1; + */ + public Builder clearVersion() { + bitField0_ = (bitField0_ & ~0x00000001); + version_ = 0; + onChanged(); + return this; + } + + // required uint32 type = 2; + private int type_ ; + /** + * required uint32 type = 2; + */ + public boolean hasType() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required uint32 type = 2; + */ + public int getType() { + return type_; + } + /** + * required uint32 type = 2; + */ + public Builder setType(int value) { + bitField0_ |= 0x00000002; + type_ = value; + onChanged(); + return this; + } + /** + * required uint32 type = 2; + */ + public Builder clearType() { + bitField0_ = (bitField0_ & ~0x00000002); + type_ = 0; + onChanged(); + return this; + } + + // required uint64 log_id = 3; + private long logId_ ; + /** + * required uint64 log_id = 3; + */ + public boolean hasLogId() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required uint64 log_id = 3; + */ + public long getLogId() { + return logId_; + } + /** + * required uint64 log_id = 3; + */ + public Builder setLogId(long value) { + bitField0_ |= 0x00000004; + logId_ = value; + onChanged(); + return this; + } + /** + * required uint64 log_id = 3; + */ + public Builder clearLogId() { + bitField0_ = (bitField0_ & ~0x00000004); + logId_ = 0L; + onChanged(); + return this; + } + + // required uint64 min_proc_id = 4; + private long minProcId_ ; + /** + * required uint64 min_proc_id = 4; + */ + public boolean hasMinProcId() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * required uint64 min_proc_id = 4; + */ + public long getMinProcId() { + return minProcId_; + } + /** + * required uint64 min_proc_id = 4; + */ + public Builder setMinProcId(long value) { + bitField0_ |= 0x00000008; + minProcId_ = value; + onChanged(); + return this; + } + /** + * required uint64 min_proc_id = 4; + */ + public Builder clearMinProcId() { + bitField0_ = (bitField0_ & ~0x00000008); + minProcId_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:ProcedureWALHeader) + } + + static { + defaultInstance = new ProcedureWALHeader(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:ProcedureWALHeader) + } + + public interface ProcedureWALTrailerOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required uint32 version = 1; + /** + * required uint32 version = 1; + */ + boolean hasVersion(); + /** + * required uint32 version = 1; + */ + int getVersion(); + + // required uint64 tracker_pos = 2; + /** + * required uint64 tracker_pos = 2; + */ + boolean hasTrackerPos(); + /** + * required uint64 tracker_pos = 2; + */ + long getTrackerPos(); + } + /** + * Protobuf type {@code ProcedureWALTrailer} + * + *
      +   **
      +   * Procedure WAL trailer
      +   * 
      + */ + public static final class ProcedureWALTrailer extends + com.google.protobuf.GeneratedMessage + implements ProcedureWALTrailerOrBuilder { + // Use ProcedureWALTrailer.newBuilder() to construct. + private ProcedureWALTrailer(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private ProcedureWALTrailer(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final ProcedureWALTrailer defaultInstance; + public static ProcedureWALTrailer getDefaultInstance() { + return defaultInstance; + } + + public ProcedureWALTrailer getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ProcedureWALTrailer( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + version_ = input.readUInt32(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + trackerPos_ = input.readUInt64(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_ProcedureWALTrailer_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_ProcedureWALTrailer_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer.class, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ProcedureWALTrailer parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ProcedureWALTrailer(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required uint32 version = 1; + public static final int VERSION_FIELD_NUMBER = 1; + private int version_; + /** + * required uint32 version = 1; + */ + public boolean hasVersion() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required uint32 version = 1; + */ + public int getVersion() { + return version_; + } + + // required uint64 tracker_pos = 2; + public static final int TRACKER_POS_FIELD_NUMBER = 2; + private long trackerPos_; + /** + * required uint64 tracker_pos = 2; + */ + public boolean hasTrackerPos() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required uint64 tracker_pos = 2; + */ + public long getTrackerPos() { + return trackerPos_; + } + + private void initFields() { + version_ = 0; + trackerPos_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasVersion()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasTrackerPos()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeUInt32(1, version_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeUInt64(2, trackerPos_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(1, version_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(2, trackerPos_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer other = (org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer) obj; + + boolean result = true; + result = result && (hasVersion() == other.hasVersion()); + if (hasVersion()) { + result = result && (getVersion() + == other.getVersion()); + } + result = result && (hasTrackerPos() == other.hasTrackerPos()); + if (hasTrackerPos()) { + result = result && (getTrackerPos() + == other.getTrackerPos()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasVersion()) { + hash = (37 * hash) + VERSION_FIELD_NUMBER; + hash = (53 * hash) + getVersion(); + } + if (hasTrackerPos()) { + hash = (37 * hash) + TRACKER_POS_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getTrackerPos()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code ProcedureWALTrailer} + * + *
      +     **
      +     * Procedure WAL trailer
      +     * 
      + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailerOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_ProcedureWALTrailer_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_ProcedureWALTrailer_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer.class, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + version_ = 0; + bitField0_ = (bitField0_ & ~0x00000001); + trackerPos_ = 0L; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_ProcedureWALTrailer_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer build() { + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer result = new org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.version_ = version_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.trackerPos_ = trackerPos_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer.getDefaultInstance()) return this; + if (other.hasVersion()) { + setVersion(other.getVersion()); + } + if (other.hasTrackerPos()) { + setTrackerPos(other.getTrackerPos()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasVersion()) { + + return false; + } + if (!hasTrackerPos()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required uint32 version = 1; + private int version_ ; + /** + * required uint32 version = 1; + */ + public boolean hasVersion() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required uint32 version = 1; + */ + public int getVersion() { + return version_; + } + /** + * required uint32 version = 1; + */ + public Builder setVersion(int value) { + bitField0_ |= 0x00000001; + version_ = value; + onChanged(); + return this; + } + /** + * required uint32 version = 1; + */ + public Builder clearVersion() { + bitField0_ = (bitField0_ & ~0x00000001); + version_ = 0; + onChanged(); + return this; + } + + // required uint64 tracker_pos = 2; + private long trackerPos_ ; + /** + * required uint64 tracker_pos = 2; + */ + public boolean hasTrackerPos() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required uint64 tracker_pos = 2; + */ + public long getTrackerPos() { + return trackerPos_; + } + /** + * required uint64 tracker_pos = 2; + */ + public Builder setTrackerPos(long value) { + bitField0_ |= 0x00000002; + trackerPos_ = value; + onChanged(); + return this; + } + /** + * required uint64 tracker_pos = 2; + */ + public Builder clearTrackerPos() { + bitField0_ = (bitField0_ & ~0x00000002); + trackerPos_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:ProcedureWALTrailer) + } + + static { + defaultInstance = new ProcedureWALTrailer(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:ProcedureWALTrailer) + } + + public interface ProcedureStoreTrackerOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // repeated .ProcedureStoreTracker.TrackerNode node = 1; + /** + * repeated .ProcedureStoreTracker.TrackerNode node = 1; + */ + java.util.List + getNodeList(); + /** + * repeated .ProcedureStoreTracker.TrackerNode node = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode getNode(int index); + /** + * repeated .ProcedureStoreTracker.TrackerNode node = 1; + */ + int getNodeCount(); + /** + * repeated .ProcedureStoreTracker.TrackerNode node = 1; + */ + java.util.List + getNodeOrBuilderList(); + /** + * repeated .ProcedureStoreTracker.TrackerNode node = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNodeOrBuilder getNodeOrBuilder( + int index); + } + /** + * Protobuf type {@code ProcedureStoreTracker} + */ + public static final class ProcedureStoreTracker extends + com.google.protobuf.GeneratedMessage + implements ProcedureStoreTrackerOrBuilder { + // Use ProcedureStoreTracker.newBuilder() to construct. + private ProcedureStoreTracker(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private ProcedureStoreTracker(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final ProcedureStoreTracker defaultInstance; + public static ProcedureStoreTracker getDefaultInstance() { + return defaultInstance; + } + + public ProcedureStoreTracker getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ProcedureStoreTracker( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + node_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + node_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + node_ = java.util.Collections.unmodifiableList(node_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_ProcedureStoreTracker_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_ProcedureStoreTracker_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.class, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ProcedureStoreTracker parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ProcedureStoreTracker(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public interface TrackerNodeOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required uint64 start_id = 1; + /** + * required uint64 start_id = 1; + */ + boolean hasStartId(); + /** + * required uint64 start_id = 1; + */ + long getStartId(); + + // repeated uint64 updated = 2; + /** + * repeated uint64 updated = 2; + */ + java.util.List getUpdatedList(); + /** + * repeated uint64 updated = 2; + */ + int getUpdatedCount(); + /** + * repeated uint64 updated = 2; + */ + long getUpdated(int index); + + // repeated uint64 deleted = 3; + /** + * repeated uint64 deleted = 3; + */ + java.util.List getDeletedList(); + /** + * repeated uint64 deleted = 3; + */ + int getDeletedCount(); + /** + * repeated uint64 deleted = 3; + */ + long getDeleted(int index); + } + /** + * Protobuf type {@code ProcedureStoreTracker.TrackerNode} + */ + public static final class TrackerNode extends + com.google.protobuf.GeneratedMessage + implements TrackerNodeOrBuilder { + // Use TrackerNode.newBuilder() to construct. + private TrackerNode(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private TrackerNode(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final TrackerNode defaultInstance; + public static TrackerNode getDefaultInstance() { + return defaultInstance; + } + + public TrackerNode getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private TrackerNode( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + startId_ = input.readUInt64(); + break; + } + case 16: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + updated_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000002; + } + updated_.add(input.readUInt64()); + break; + } + case 18: { + int length = input.readRawVarint32(); + int limit = input.pushLimit(length); + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002) && input.getBytesUntilLimit() > 0) { + updated_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000002; + } + while (input.getBytesUntilLimit() > 0) { + updated_.add(input.readUInt64()); + } + input.popLimit(limit); + break; + } + case 24: { + if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + deleted_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000004; + } + deleted_.add(input.readUInt64()); + break; + } + case 26: { + int length = input.readRawVarint32(); + int limit = input.pushLimit(length); + if (!((mutable_bitField0_ & 0x00000004) == 0x00000004) && input.getBytesUntilLimit() > 0) { + deleted_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000004; + } + while (input.getBytesUntilLimit() > 0) { + deleted_.add(input.readUInt64()); + } + input.popLimit(limit); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + updated_ = java.util.Collections.unmodifiableList(updated_); + } + if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + deleted_ = java.util.Collections.unmodifiableList(deleted_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_ProcedureStoreTracker_TrackerNode_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_ProcedureStoreTracker_TrackerNode_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode.class, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public TrackerNode parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new TrackerNode(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required uint64 start_id = 1; + public static final int START_ID_FIELD_NUMBER = 1; + private long startId_; + /** + * required uint64 start_id = 1; + */ + public boolean hasStartId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required uint64 start_id = 1; + */ + public long getStartId() { + return startId_; + } + + // repeated uint64 updated = 2; + public static final int UPDATED_FIELD_NUMBER = 2; + private java.util.List updated_; + /** + * repeated uint64 updated = 2; + */ + public java.util.List + getUpdatedList() { + return updated_; + } + /** + * repeated uint64 updated = 2; + */ + public int getUpdatedCount() { + return updated_.size(); + } + /** + * repeated uint64 updated = 2; + */ + public long getUpdated(int index) { + return updated_.get(index); + } + + // repeated uint64 deleted = 3; + public static final int DELETED_FIELD_NUMBER = 3; + private java.util.List deleted_; + /** + * repeated uint64 deleted = 3; + */ + public java.util.List + getDeletedList() { + return deleted_; + } + /** + * repeated uint64 deleted = 3; + */ + public int getDeletedCount() { + return deleted_.size(); + } + /** + * repeated uint64 deleted = 3; + */ + public long getDeleted(int index) { + return deleted_.get(index); + } + + private void initFields() { + startId_ = 0L; + updated_ = java.util.Collections.emptyList(); + deleted_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasStartId()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeUInt64(1, startId_); + } + for (int i = 0; i < updated_.size(); i++) { + output.writeUInt64(2, updated_.get(i)); + } + for (int i = 0; i < deleted_.size(); i++) { + output.writeUInt64(3, deleted_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(1, startId_); + } + { + int dataSize = 0; + for (int i = 0; i < updated_.size(); i++) { + dataSize += com.google.protobuf.CodedOutputStream + .computeUInt64SizeNoTag(updated_.get(i)); + } + size += dataSize; + size += 1 * getUpdatedList().size(); + } + { + int dataSize = 0; + for (int i = 0; i < deleted_.size(); i++) { + dataSize += com.google.protobuf.CodedOutputStream + .computeUInt64SizeNoTag(deleted_.get(i)); + } + size += dataSize; + size += 1 * getDeletedList().size(); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode other = (org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode) obj; + + boolean result = true; + result = result && (hasStartId() == other.hasStartId()); + if (hasStartId()) { + result = result && (getStartId() + == other.getStartId()); + } + result = result && getUpdatedList() + .equals(other.getUpdatedList()); + result = result && getDeletedList() + .equals(other.getDeletedList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasStartId()) { + hash = (37 * hash) + START_ID_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getStartId()); + } + if (getUpdatedCount() > 0) { + hash = (37 * hash) + UPDATED_FIELD_NUMBER; + hash = (53 * hash) + getUpdatedList().hashCode(); + } + if (getDeletedCount() > 0) { + hash = (37 * hash) + DELETED_FIELD_NUMBER; + hash = (53 * hash) + getDeletedList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code ProcedureStoreTracker.TrackerNode} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNodeOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_ProcedureStoreTracker_TrackerNode_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_ProcedureStoreTracker_TrackerNode_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode.class, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + startId_ = 0L; + bitField0_ = (bitField0_ & ~0x00000001); + updated_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + deleted_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_ProcedureStoreTracker_TrackerNode_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode build() { + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode result = new org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.startId_ = startId_; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + updated_ = java.util.Collections.unmodifiableList(updated_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.updated_ = updated_; + if (((bitField0_ & 0x00000004) == 0x00000004)) { + deleted_ = java.util.Collections.unmodifiableList(deleted_); + bitField0_ = (bitField0_ & ~0x00000004); + } + result.deleted_ = deleted_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode.getDefaultInstance()) return this; + if (other.hasStartId()) { + setStartId(other.getStartId()); + } + if (!other.updated_.isEmpty()) { + if (updated_.isEmpty()) { + updated_ = other.updated_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureUpdatedIsMutable(); + updated_.addAll(other.updated_); + } + onChanged(); + } + if (!other.deleted_.isEmpty()) { + if (deleted_.isEmpty()) { + deleted_ = other.deleted_; + bitField0_ = (bitField0_ & ~0x00000004); + } else { + ensureDeletedIsMutable(); + deleted_.addAll(other.deleted_); + } + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasStartId()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required uint64 start_id = 1; + private long startId_ ; + /** + * required uint64 start_id = 1; + */ + public boolean hasStartId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required uint64 start_id = 1; + */ + public long getStartId() { + return startId_; + } + /** + * required uint64 start_id = 1; + */ + public Builder setStartId(long value) { + bitField0_ |= 0x00000001; + startId_ = value; + onChanged(); + return this; + } + /** + * required uint64 start_id = 1; + */ + public Builder clearStartId() { + bitField0_ = (bitField0_ & ~0x00000001); + startId_ = 0L; + onChanged(); + return this; + } + + // repeated uint64 updated = 2; + private java.util.List updated_ = java.util.Collections.emptyList(); + private void ensureUpdatedIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + updated_ = new java.util.ArrayList(updated_); + bitField0_ |= 0x00000002; + } + } + /** + * repeated uint64 updated = 2; + */ + public java.util.List + getUpdatedList() { + return java.util.Collections.unmodifiableList(updated_); + } + /** + * repeated uint64 updated = 2; + */ + public int getUpdatedCount() { + return updated_.size(); + } + /** + * repeated uint64 updated = 2; + */ + public long getUpdated(int index) { + return updated_.get(index); + } + /** + * repeated uint64 updated = 2; + */ + public Builder setUpdated( + int index, long value) { + ensureUpdatedIsMutable(); + updated_.set(index, value); + onChanged(); + return this; + } + /** + * repeated uint64 updated = 2; + */ + public Builder addUpdated(long value) { + ensureUpdatedIsMutable(); + updated_.add(value); + onChanged(); + return this; + } + /** + * repeated uint64 updated = 2; + */ + public Builder addAllUpdated( + java.lang.Iterable values) { + ensureUpdatedIsMutable(); + super.addAll(values, updated_); + onChanged(); + return this; + } + /** + * repeated uint64 updated = 2; + */ + public Builder clearUpdated() { + updated_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + // repeated uint64 deleted = 3; + private java.util.List deleted_ = java.util.Collections.emptyList(); + private void ensureDeletedIsMutable() { + if (!((bitField0_ & 0x00000004) == 0x00000004)) { + deleted_ = new java.util.ArrayList(deleted_); + bitField0_ |= 0x00000004; + } + } + /** + * repeated uint64 deleted = 3; + */ + public java.util.List + getDeletedList() { + return java.util.Collections.unmodifiableList(deleted_); + } + /** + * repeated uint64 deleted = 3; + */ + public int getDeletedCount() { + return deleted_.size(); + } + /** + * repeated uint64 deleted = 3; + */ + public long getDeleted(int index) { + return deleted_.get(index); + } + /** + * repeated uint64 deleted = 3; + */ + public Builder setDeleted( + int index, long value) { + ensureDeletedIsMutable(); + deleted_.set(index, value); + onChanged(); + return this; + } + /** + * repeated uint64 deleted = 3; + */ + public Builder addDeleted(long value) { + ensureDeletedIsMutable(); + deleted_.add(value); + onChanged(); + return this; + } + /** + * repeated uint64 deleted = 3; + */ + public Builder addAllDeleted( + java.lang.Iterable values) { + ensureDeletedIsMutable(); + super.addAll(values, deleted_); + onChanged(); + return this; + } + /** + * repeated uint64 deleted = 3; + */ + public Builder clearDeleted() { + deleted_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:ProcedureStoreTracker.TrackerNode) + } + + static { + defaultInstance = new TrackerNode(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:ProcedureStoreTracker.TrackerNode) + } + + // repeated .ProcedureStoreTracker.TrackerNode node = 1; + public static final int NODE_FIELD_NUMBER = 1; + private java.util.List node_; + /** + * repeated .ProcedureStoreTracker.TrackerNode node = 1; + */ + public java.util.List getNodeList() { + return node_; + } + /** + * repeated .ProcedureStoreTracker.TrackerNode node = 1; + */ + public java.util.List + getNodeOrBuilderList() { + return node_; + } + /** + * repeated .ProcedureStoreTracker.TrackerNode node = 1; + */ + public int getNodeCount() { + return node_.size(); + } + /** + * repeated .ProcedureStoreTracker.TrackerNode node = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode getNode(int index) { + return node_.get(index); + } + /** + * repeated .ProcedureStoreTracker.TrackerNode node = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNodeOrBuilder getNodeOrBuilder( + int index) { + return node_.get(index); + } + + private void initFields() { + node_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + for (int i = 0; i < getNodeCount(); i++) { + if (!getNode(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + for (int i = 0; i < node_.size(); i++) { + output.writeMessage(1, node_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < node_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, node_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker other = (org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker) obj; + + boolean result = true; + result = result && getNodeList() + .equals(other.getNodeList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (getNodeCount() > 0) { + hash = (37 * hash) + NODE_FIELD_NUMBER; + hash = (53 * hash) + getNodeList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code ProcedureStoreTracker} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTrackerOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_ProcedureStoreTracker_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_ProcedureStoreTracker_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.class, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getNodeFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (nodeBuilder_ == null) { + node_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + nodeBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_ProcedureStoreTracker_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker build() { + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker result = new org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker(this); + int from_bitField0_ = bitField0_; + if (nodeBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + node_ = java.util.Collections.unmodifiableList(node_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.node_ = node_; + } else { + result.node_ = nodeBuilder_.build(); + } + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.getDefaultInstance()) return this; + if (nodeBuilder_ == null) { + if (!other.node_.isEmpty()) { + if (node_.isEmpty()) { + node_ = other.node_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureNodeIsMutable(); + node_.addAll(other.node_); + } + onChanged(); + } + } else { + if (!other.node_.isEmpty()) { + if (nodeBuilder_.isEmpty()) { + nodeBuilder_.dispose(); + nodeBuilder_ = null; + node_ = other.node_; + bitField0_ = (bitField0_ & ~0x00000001); + nodeBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getNodeFieldBuilder() : null; + } else { + nodeBuilder_.addAllMessages(other.node_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + for (int i = 0; i < getNodeCount(); i++) { + if (!getNode(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // repeated .ProcedureStoreTracker.TrackerNode node = 1; + private java.util.List node_ = + java.util.Collections.emptyList(); + private void ensureNodeIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + node_ = new java.util.ArrayList(node_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode.Builder, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNodeOrBuilder> nodeBuilder_; + + /** + * repeated .ProcedureStoreTracker.TrackerNode node = 1; + */ + public java.util.List getNodeList() { + if (nodeBuilder_ == null) { + return java.util.Collections.unmodifiableList(node_); + } else { + return nodeBuilder_.getMessageList(); + } + } + /** + * repeated .ProcedureStoreTracker.TrackerNode node = 1; + */ + public int getNodeCount() { + if (nodeBuilder_ == null) { + return node_.size(); + } else { + return nodeBuilder_.getCount(); + } + } + /** + * repeated .ProcedureStoreTracker.TrackerNode node = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode getNode(int index) { + if (nodeBuilder_ == null) { + return node_.get(index); + } else { + return nodeBuilder_.getMessage(index); + } + } + /** + * repeated .ProcedureStoreTracker.TrackerNode node = 1; + */ + public Builder setNode( + int index, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode value) { + if (nodeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureNodeIsMutable(); + node_.set(index, value); + onChanged(); + } else { + nodeBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .ProcedureStoreTracker.TrackerNode node = 1; + */ + public Builder setNode( + int index, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode.Builder builderForValue) { + if (nodeBuilder_ == null) { + ensureNodeIsMutable(); + node_.set(index, builderForValue.build()); + onChanged(); + } else { + nodeBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .ProcedureStoreTracker.TrackerNode node = 1; + */ + public Builder addNode(org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode value) { + if (nodeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureNodeIsMutable(); + node_.add(value); + onChanged(); + } else { + nodeBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .ProcedureStoreTracker.TrackerNode node = 1; + */ + public Builder addNode( + int index, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode value) { + if (nodeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureNodeIsMutable(); + node_.add(index, value); + onChanged(); + } else { + nodeBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .ProcedureStoreTracker.TrackerNode node = 1; + */ + public Builder addNode( + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode.Builder builderForValue) { + if (nodeBuilder_ == null) { + ensureNodeIsMutable(); + node_.add(builderForValue.build()); + onChanged(); + } else { + nodeBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .ProcedureStoreTracker.TrackerNode node = 1; + */ + public Builder addNode( + int index, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode.Builder builderForValue) { + if (nodeBuilder_ == null) { + ensureNodeIsMutable(); + node_.add(index, builderForValue.build()); + onChanged(); + } else { + nodeBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .ProcedureStoreTracker.TrackerNode node = 1; + */ + public Builder addAllNode( + java.lang.Iterable values) { + if (nodeBuilder_ == null) { + ensureNodeIsMutable(); + super.addAll(values, node_); + onChanged(); + } else { + nodeBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .ProcedureStoreTracker.TrackerNode node = 1; + */ + public Builder clearNode() { + if (nodeBuilder_ == null) { + node_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + nodeBuilder_.clear(); + } + return this; + } + /** + * repeated .ProcedureStoreTracker.TrackerNode node = 1; + */ + public Builder removeNode(int index) { + if (nodeBuilder_ == null) { + ensureNodeIsMutable(); + node_.remove(index); + onChanged(); + } else { + nodeBuilder_.remove(index); + } + return this; + } + /** + * repeated .ProcedureStoreTracker.TrackerNode node = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode.Builder getNodeBuilder( + int index) { + return getNodeFieldBuilder().getBuilder(index); + } + /** + * repeated .ProcedureStoreTracker.TrackerNode node = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNodeOrBuilder getNodeOrBuilder( + int index) { + if (nodeBuilder_ == null) { + return node_.get(index); } else { + return nodeBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .ProcedureStoreTracker.TrackerNode node = 1; + */ + public java.util.List + getNodeOrBuilderList() { + if (nodeBuilder_ != null) { + return nodeBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(node_); + } + } + /** + * repeated .ProcedureStoreTracker.TrackerNode node = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode.Builder addNodeBuilder() { + return getNodeFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode.getDefaultInstance()); + } + /** + * repeated .ProcedureStoreTracker.TrackerNode node = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode.Builder addNodeBuilder( + int index) { + return getNodeFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode.getDefaultInstance()); + } + /** + * repeated .ProcedureStoreTracker.TrackerNode node = 1; + */ + public java.util.List + getNodeBuilderList() { + return getNodeFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode.Builder, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNodeOrBuilder> + getNodeFieldBuilder() { + if (nodeBuilder_ == null) { + nodeBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode.Builder, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNodeOrBuilder>( + node_, + ((bitField0_ & 0x00000001) == 0x00000001), + getParentForChildren(), + isClean()); + node_ = null; + } + return nodeBuilder_; + } + + // @@protoc_insertion_point(builder_scope:ProcedureStoreTracker) + } + + static { + defaultInstance = new ProcedureStoreTracker(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:ProcedureStoreTracker) + } + + public interface ProcedureWALEntryOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .ProcedureWALEntry.Type type = 1; + /** + * required .ProcedureWALEntry.Type type = 1; + */ + boolean hasType(); + /** + * required .ProcedureWALEntry.Type type = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry.Type getType(); + + // repeated .Procedure procedure = 2; + /** + * repeated .Procedure procedure = 2; + */ + java.util.List + getProcedureList(); + /** + * repeated .Procedure procedure = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure getProcedure(int index); + /** + * repeated .Procedure procedure = 2; + */ + int getProcedureCount(); + /** + * repeated .Procedure procedure = 2; + */ + java.util.List + getProcedureOrBuilderList(); + /** + * repeated .Procedure procedure = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureOrBuilder getProcedureOrBuilder( + int index); + + // optional uint64 proc_id = 3; + /** + * optional uint64 proc_id = 3; + */ + boolean hasProcId(); + /** + * optional uint64 proc_id = 3; + */ + long getProcId(); + } + /** + * Protobuf type {@code ProcedureWALEntry} + */ + public static final class ProcedureWALEntry extends + com.google.protobuf.GeneratedMessage + implements ProcedureWALEntryOrBuilder { + // Use ProcedureWALEntry.newBuilder() to construct. + private ProcedureWALEntry(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private ProcedureWALEntry(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final ProcedureWALEntry defaultInstance; + public static ProcedureWALEntry getDefaultInstance() { + return defaultInstance; + } + + public ProcedureWALEntry getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ProcedureWALEntry( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + int rawValue = input.readEnum(); + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry.Type value = org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry.Type.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(1, rawValue); + } else { + bitField0_ |= 0x00000001; + type_ = value; + } + break; + } + case 18: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + procedure_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000002; + } + procedure_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.PARSER, extensionRegistry)); + break; + } + case 24: { + bitField0_ |= 0x00000002; + procId_ = input.readUInt64(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + procedure_ = java.util.Collections.unmodifiableList(procedure_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_ProcedureWALEntry_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_ProcedureWALEntry_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry.class, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ProcedureWALEntry parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ProcedureWALEntry(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + /** + * Protobuf enum {@code ProcedureWALEntry.Type} + */ + public enum Type + implements com.google.protobuf.ProtocolMessageEnum { + /** + * EOF = 1; + */ + EOF(0, 1), + /** + * INIT = 2; + */ + INIT(1, 2), + /** + * INSERT = 3; + */ + INSERT(2, 3), + /** + * UPDATE = 4; + */ + UPDATE(3, 4), + /** + * DELETE = 5; + */ + DELETE(4, 5), + /** + * COMPACT = 6; + */ + COMPACT(5, 6), + ; + + /** + * EOF = 1; + */ + public static final int EOF_VALUE = 1; + /** + * INIT = 2; + */ + public static final int INIT_VALUE = 2; + /** + * INSERT = 3; + */ + public static final int INSERT_VALUE = 3; + /** + * UPDATE = 4; + */ + public static final int UPDATE_VALUE = 4; + /** + * DELETE = 5; + */ + public static final int DELETE_VALUE = 5; + /** + * COMPACT = 6; + */ + public static final int COMPACT_VALUE = 6; + + + public final int getNumber() { return value; } + + public static Type valueOf(int value) { + switch (value) { + case 1: return EOF; + case 2: return INIT; + case 3: return INSERT; + case 4: return UPDATE; + case 5: return DELETE; + case 6: return COMPACT; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public Type findValueByNumber(int number) { + return Type.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry.getDescriptor().getEnumTypes().get(0); + } + + private static final Type[] VALUES = values(); + + public static Type valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private Type(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:ProcedureWALEntry.Type) + } + + private int bitField0_; + // required .ProcedureWALEntry.Type type = 1; + public static final int TYPE_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry.Type type_; + /** + * required .ProcedureWALEntry.Type type = 1; + */ + public boolean hasType() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .ProcedureWALEntry.Type type = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry.Type getType() { + return type_; + } + + // repeated .Procedure procedure = 2; + public static final int PROCEDURE_FIELD_NUMBER = 2; + private java.util.List procedure_; + /** + * repeated .Procedure procedure = 2; + */ + public java.util.List getProcedureList() { + return procedure_; + } + /** + * repeated .Procedure procedure = 2; + */ + public java.util.List + getProcedureOrBuilderList() { + return procedure_; + } + /** + * repeated .Procedure procedure = 2; + */ + public int getProcedureCount() { + return procedure_.size(); + } + /** + * repeated .Procedure procedure = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure getProcedure(int index) { + return procedure_.get(index); + } + /** + * repeated .Procedure procedure = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureOrBuilder getProcedureOrBuilder( + int index) { + return procedure_.get(index); + } + + // optional uint64 proc_id = 3; + public static final int PROC_ID_FIELD_NUMBER = 3; + private long procId_; + /** + * optional uint64 proc_id = 3; + */ + public boolean hasProcId() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional uint64 proc_id = 3; + */ + public long getProcId() { + return procId_; + } + + private void initFields() { + type_ = org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry.Type.EOF; + procedure_ = java.util.Collections.emptyList(); + procId_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasType()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getProcedureCount(); i++) { + if (!getProcedure(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeEnum(1, type_.getNumber()); + } + for (int i = 0; i < procedure_.size(); i++) { + output.writeMessage(2, procedure_.get(i)); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeUInt64(3, procId_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(1, type_.getNumber()); + } + for (int i = 0; i < procedure_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, procedure_.get(i)); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(3, procId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry other = (org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry) obj; + + boolean result = true; + result = result && (hasType() == other.hasType()); + if (hasType()) { + result = result && + (getType() == other.getType()); + } + result = result && getProcedureList() + .equals(other.getProcedureList()); + result = result && (hasProcId() == other.hasProcId()); + if (hasProcId()) { + result = result && (getProcId() + == other.getProcId()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasType()) { + hash = (37 * hash) + TYPE_FIELD_NUMBER; + hash = (53 * hash) + hashEnum(getType()); + } + if (getProcedureCount() > 0) { + hash = (37 * hash) + PROCEDURE_FIELD_NUMBER; + hash = (53 * hash) + getProcedureList().hashCode(); + } + if (hasProcId()) { + hash = (37 * hash) + PROC_ID_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getProcId()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code ProcedureWALEntry} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntryOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_ProcedureWALEntry_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_ProcedureWALEntry_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry.class, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getProcedureFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + type_ = org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry.Type.EOF; + bitField0_ = (bitField0_ & ~0x00000001); + if (procedureBuilder_ == null) { + procedure_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + } else { + procedureBuilder_.clear(); + } + procId_ = 0L; + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_ProcedureWALEntry_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry build() { + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry result = new org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.type_ = type_; + if (procedureBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { + procedure_ = java.util.Collections.unmodifiableList(procedure_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.procedure_ = procedure_; + } else { + result.procedure_ = procedureBuilder_.build(); + } + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000002; + } + result.procId_ = procId_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry.getDefaultInstance()) return this; + if (other.hasType()) { + setType(other.getType()); + } + if (procedureBuilder_ == null) { + if (!other.procedure_.isEmpty()) { + if (procedure_.isEmpty()) { + procedure_ = other.procedure_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureProcedureIsMutable(); + procedure_.addAll(other.procedure_); + } + onChanged(); + } + } else { + if (!other.procedure_.isEmpty()) { + if (procedureBuilder_.isEmpty()) { + procedureBuilder_.dispose(); + procedureBuilder_ = null; + procedure_ = other.procedure_; + bitField0_ = (bitField0_ & ~0x00000002); + procedureBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getProcedureFieldBuilder() : null; + } else { + procedureBuilder_.addAllMessages(other.procedure_); + } + } + } + if (other.hasProcId()) { + setProcId(other.getProcId()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasType()) { + + return false; + } + for (int i = 0; i < getProcedureCount(); i++) { + if (!getProcedure(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .ProcedureWALEntry.Type type = 1; + private org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry.Type type_ = org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry.Type.EOF; + /** + * required .ProcedureWALEntry.Type type = 1; + */ + public boolean hasType() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .ProcedureWALEntry.Type type = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry.Type getType() { + return type_; + } + /** + * required .ProcedureWALEntry.Type type = 1; + */ + public Builder setType(org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry.Type value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + type_ = value; + onChanged(); + return this; + } + /** + * required .ProcedureWALEntry.Type type = 1; + */ + public Builder clearType() { + bitField0_ = (bitField0_ & ~0x00000001); + type_ = org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry.Type.EOF; + onChanged(); + return this; + } + + // repeated .Procedure procedure = 2; + private java.util.List procedure_ = + java.util.Collections.emptyList(); + private void ensureProcedureIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + procedure_ = new java.util.ArrayList(procedure_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.Builder, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureOrBuilder> procedureBuilder_; + + /** + * repeated .Procedure procedure = 2; + */ + public java.util.List getProcedureList() { + if (procedureBuilder_ == null) { + return java.util.Collections.unmodifiableList(procedure_); + } else { + return procedureBuilder_.getMessageList(); + } + } + /** + * repeated .Procedure procedure = 2; + */ + public int getProcedureCount() { + if (procedureBuilder_ == null) { + return procedure_.size(); + } else { + return procedureBuilder_.getCount(); + } + } + /** + * repeated .Procedure procedure = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure getProcedure(int index) { + if (procedureBuilder_ == null) { + return procedure_.get(index); + } else { + return procedureBuilder_.getMessage(index); + } + } + /** + * repeated .Procedure procedure = 2; + */ + public Builder setProcedure( + int index, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure value) { + if (procedureBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureProcedureIsMutable(); + procedure_.set(index, value); + onChanged(); + } else { + procedureBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .Procedure procedure = 2; + */ + public Builder setProcedure( + int index, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.Builder builderForValue) { + if (procedureBuilder_ == null) { + ensureProcedureIsMutable(); + procedure_.set(index, builderForValue.build()); + onChanged(); + } else { + procedureBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .Procedure procedure = 2; + */ + public Builder addProcedure(org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure value) { + if (procedureBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureProcedureIsMutable(); + procedure_.add(value); + onChanged(); + } else { + procedureBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .Procedure procedure = 2; + */ + public Builder addProcedure( + int index, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure value) { + if (procedureBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureProcedureIsMutable(); + procedure_.add(index, value); + onChanged(); + } else { + procedureBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .Procedure procedure = 2; + */ + public Builder addProcedure( + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.Builder builderForValue) { + if (procedureBuilder_ == null) { + ensureProcedureIsMutable(); + procedure_.add(builderForValue.build()); + onChanged(); + } else { + procedureBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .Procedure procedure = 2; + */ + public Builder addProcedure( + int index, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.Builder builderForValue) { + if (procedureBuilder_ == null) { + ensureProcedureIsMutable(); + procedure_.add(index, builderForValue.build()); + onChanged(); + } else { + procedureBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .Procedure procedure = 2; + */ + public Builder addAllProcedure( + java.lang.Iterable values) { + if (procedureBuilder_ == null) { + ensureProcedureIsMutable(); + super.addAll(values, procedure_); + onChanged(); + } else { + procedureBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .Procedure procedure = 2; + */ + public Builder clearProcedure() { + if (procedureBuilder_ == null) { + procedure_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + procedureBuilder_.clear(); + } + return this; + } + /** + * repeated .Procedure procedure = 2; + */ + public Builder removeProcedure(int index) { + if (procedureBuilder_ == null) { + ensureProcedureIsMutable(); + procedure_.remove(index); + onChanged(); + } else { + procedureBuilder_.remove(index); + } + return this; + } + /** + * repeated .Procedure procedure = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.Builder getProcedureBuilder( + int index) { + return getProcedureFieldBuilder().getBuilder(index); + } + /** + * repeated .Procedure procedure = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureOrBuilder getProcedureOrBuilder( + int index) { + if (procedureBuilder_ == null) { + return procedure_.get(index); } else { + return procedureBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .Procedure procedure = 2; + */ + public java.util.List + getProcedureOrBuilderList() { + if (procedureBuilder_ != null) { + return procedureBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(procedure_); + } + } + /** + * repeated .Procedure procedure = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.Builder addProcedureBuilder() { + return getProcedureFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.getDefaultInstance()); + } + /** + * repeated .Procedure procedure = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.Builder addProcedureBuilder( + int index) { + return getProcedureFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.getDefaultInstance()); + } + /** + * repeated .Procedure procedure = 2; + */ + public java.util.List + getProcedureBuilderList() { + return getProcedureFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.Builder, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureOrBuilder> + getProcedureFieldBuilder() { + if (procedureBuilder_ == null) { + procedureBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.Builder, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureOrBuilder>( + procedure_, + ((bitField0_ & 0x00000002) == 0x00000002), + getParentForChildren(), + isClean()); + procedure_ = null; + } + return procedureBuilder_; + } + + // optional uint64 proc_id = 3; + private long procId_ ; + /** + * optional uint64 proc_id = 3; + */ + public boolean hasProcId() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional uint64 proc_id = 3; + */ + public long getProcId() { + return procId_; + } + /** + * optional uint64 proc_id = 3; + */ + public Builder setProcId(long value) { + bitField0_ |= 0x00000004; + procId_ = value; + onChanged(); + return this; + } + /** + * optional uint64 proc_id = 3; + */ + public Builder clearProcId() { + bitField0_ = (bitField0_ & ~0x00000004); + procId_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:ProcedureWALEntry) + } + + static { + defaultInstance = new ProcedureWALEntry(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:ProcedureWALEntry) + } + + private static com.google.protobuf.Descriptors.Descriptor + internal_static_Procedure_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_Procedure_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_SequentialProcedureData_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_SequentialProcedureData_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_StateMachineProcedureData_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_StateMachineProcedureData_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_ProcedureWALHeader_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_ProcedureWALHeader_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_ProcedureWALTrailer_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_ProcedureWALTrailer_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_ProcedureStoreTracker_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_ProcedureStoreTracker_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_ProcedureStoreTracker_TrackerNode_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_ProcedureStoreTracker_TrackerNode_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_ProcedureWALEntry_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_ProcedureWALEntry_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n\017Procedure.proto\032\023ErrorHandling.proto\"\217" + + "\002\n\tProcedure\022\022\n\nclass_name\030\001 \002(\t\022\021\n\tpare" + + "nt_id\030\002 \001(\004\022\017\n\007proc_id\030\003 \002(\004\022\022\n\nstart_ti" + + "me\030\004 \002(\004\022\r\n\005owner\030\005 \001(\t\022\036\n\005state\030\006 \002(\0162\017" + + ".ProcedureState\022\020\n\010stack_id\030\007 \003(\r\022\023\n\013las" + + "t_update\030\010 \002(\004\022\017\n\007timeout\030\t \001(\r\022+\n\texcep" + + "tion\030\n \001(\0132\030.ForeignExceptionMessage\022\016\n\006" + + "result\030\013 \001(\014\022\022\n\nstate_data\030\014 \001(\014\"+\n\027Sequ" + + "entialProcedureData\022\020\n\010executed\030\001 \002(\010\"*\n" + + "\031StateMachineProcedureData\022\r\n\005state\030\001 \003(", + "\r\"X\n\022ProcedureWALHeader\022\017\n\007version\030\001 \002(\r" + + "\022\014\n\004type\030\002 \002(\r\022\016\n\006log_id\030\003 \002(\004\022\023\n\013min_pr" + + "oc_id\030\004 \002(\004\";\n\023ProcedureWALTrailer\022\017\n\007ve" + + "rsion\030\001 \002(\r\022\023\n\013tracker_pos\030\002 \002(\004\"\214\001\n\025Pro" + + "cedureStoreTracker\0220\n\004node\030\001 \003(\0132\".Proce" + + "dureStoreTracker.TrackerNode\032A\n\013TrackerN" + + "ode\022\020\n\010start_id\030\001 \002(\004\022\017\n\007updated\030\002 \003(\004\022\017" + + "\n\007deleted\030\003 \003(\004\"\266\001\n\021ProcedureWALEntry\022%\n" + + "\004type\030\001 \002(\0162\027.ProcedureWALEntry.Type\022\035\n\t" + + "procedure\030\002 \003(\0132\n.Procedure\022\017\n\007proc_id\030\003", + " \001(\004\"J\n\004Type\022\007\n\003EOF\020\001\022\010\n\004INIT\020\002\022\n\n\006INSER" + + "T\020\003\022\n\n\006UPDATE\020\004\022\n\n\006DELETE\020\005\022\013\n\007COMPACT\020\006" + + "*p\n\016ProcedureState\022\020\n\014INITIALIZING\020\001\022\014\n\010" + + "RUNNABLE\020\002\022\013\n\007WAITING\020\003\022\023\n\017WAITING_TIMEO" + + "UT\020\004\022\016\n\nROLLEDBACK\020\005\022\014\n\010FINISHED\020\006BE\n*or" + + "g.apache.hadoop.hbase.protobuf.generated" + + "B\017ProcedureProtosH\001\210\001\001\240\001\001" + }; + com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = + new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { + public com.google.protobuf.ExtensionRegistry assignDescriptors( + com.google.protobuf.Descriptors.FileDescriptor root) { + descriptor = root; + internal_static_Procedure_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_Procedure_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_Procedure_descriptor, + new java.lang.String[] { "ClassName", "ParentId", "ProcId", "StartTime", "Owner", "State", "StackId", "LastUpdate", "Timeout", "Exception", "Result", "StateData", }); + internal_static_SequentialProcedureData_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_SequentialProcedureData_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_SequentialProcedureData_descriptor, + new java.lang.String[] { "Executed", }); + internal_static_StateMachineProcedureData_descriptor = + getDescriptor().getMessageTypes().get(2); + internal_static_StateMachineProcedureData_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_StateMachineProcedureData_descriptor, + new java.lang.String[] { "State", }); + internal_static_ProcedureWALHeader_descriptor = + getDescriptor().getMessageTypes().get(3); + internal_static_ProcedureWALHeader_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_ProcedureWALHeader_descriptor, + new java.lang.String[] { "Version", "Type", "LogId", "MinProcId", }); + internal_static_ProcedureWALTrailer_descriptor = + getDescriptor().getMessageTypes().get(4); + internal_static_ProcedureWALTrailer_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_ProcedureWALTrailer_descriptor, + new java.lang.String[] { "Version", "TrackerPos", }); + internal_static_ProcedureStoreTracker_descriptor = + getDescriptor().getMessageTypes().get(5); + internal_static_ProcedureStoreTracker_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_ProcedureStoreTracker_descriptor, + new java.lang.String[] { "Node", }); + internal_static_ProcedureStoreTracker_TrackerNode_descriptor = + internal_static_ProcedureStoreTracker_descriptor.getNestedTypes().get(0); + internal_static_ProcedureStoreTracker_TrackerNode_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_ProcedureStoreTracker_TrackerNode_descriptor, + new java.lang.String[] { "StartId", "Updated", "Deleted", }); + internal_static_ProcedureWALEntry_descriptor = + getDescriptor().getMessageTypes().get(6); + internal_static_ProcedureWALEntry_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_ProcedureWALEntry_descriptor, + new java.lang.String[] { "Type", "Procedure", "ProcId", }); + return null; + } + }; + com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.getDescriptor(), + }, assigner); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RPCProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RPCProtos.java index 5c1b959af1a..7758e98dba1 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RPCProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RPCProtos.java @@ -698,6 +698,1396 @@ public final class RPCProtos { // @@protoc_insertion_point(class_scope:UserInformation) } + public interface VersionInfoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string version = 1; + /** + * required string version = 1; + */ + boolean hasVersion(); + /** + * required string version = 1; + */ + java.lang.String getVersion(); + /** + * required string version = 1; + */ + com.google.protobuf.ByteString + getVersionBytes(); + + // required string url = 2; + /** + * required string url = 2; + */ + boolean hasUrl(); + /** + * required string url = 2; + */ + java.lang.String getUrl(); + /** + * required string url = 2; + */ + com.google.protobuf.ByteString + getUrlBytes(); + + // required string revision = 3; + /** + * required string revision = 3; + */ + boolean hasRevision(); + /** + * required string revision = 3; + */ + java.lang.String getRevision(); + /** + * required string revision = 3; + */ + com.google.protobuf.ByteString + getRevisionBytes(); + + // required string user = 4; + /** + * required string user = 4; + */ + boolean hasUser(); + /** + * required string user = 4; + */ + java.lang.String getUser(); + /** + * required string user = 4; + */ + com.google.protobuf.ByteString + getUserBytes(); + + // required string date = 5; + /** + * required string date = 5; + */ + boolean hasDate(); + /** + * required string date = 5; + */ + java.lang.String getDate(); + /** + * required string date = 5; + */ + com.google.protobuf.ByteString + getDateBytes(); + + // required string src_checksum = 6; + /** + * required string src_checksum = 6; + */ + boolean hasSrcChecksum(); + /** + * required string src_checksum = 6; + */ + java.lang.String getSrcChecksum(); + /** + * required string src_checksum = 6; + */ + com.google.protobuf.ByteString + getSrcChecksumBytes(); + } + /** + * Protobuf type {@code VersionInfo} + * + *
      +   * Rpc client version info proto. Included in ConnectionHeader on connection setup
      +   * 
      + */ + public static final class VersionInfo extends + com.google.protobuf.GeneratedMessage + implements VersionInfoOrBuilder { + // Use VersionInfo.newBuilder() to construct. + private VersionInfo(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private VersionInfo(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final VersionInfo defaultInstance; + public static VersionInfo getDefaultInstance() { + return defaultInstance; + } + + public VersionInfo getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private VersionInfo( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + version_ = input.readBytes(); + break; + } + case 18: { + bitField0_ |= 0x00000002; + url_ = input.readBytes(); + break; + } + case 26: { + bitField0_ |= 0x00000004; + revision_ = input.readBytes(); + break; + } + case 34: { + bitField0_ |= 0x00000008; + user_ = input.readBytes(); + break; + } + case 42: { + bitField0_ |= 0x00000010; + date_ = input.readBytes(); + break; + } + case 50: { + bitField0_ |= 0x00000020; + srcChecksum_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RPCProtos.internal_static_VersionInfo_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RPCProtos.internal_static_VersionInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo.class, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public VersionInfo parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new VersionInfo(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string version = 1; + public static final int VERSION_FIELD_NUMBER = 1; + private java.lang.Object version_; + /** + * required string version = 1; + */ + public boolean hasVersion() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string version = 1; + */ + public java.lang.String getVersion() { + java.lang.Object ref = version_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + version_ = s; + } + return s; + } + } + /** + * required string version = 1; + */ + public com.google.protobuf.ByteString + getVersionBytes() { + java.lang.Object ref = version_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + version_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required string url = 2; + public static final int URL_FIELD_NUMBER = 2; + private java.lang.Object url_; + /** + * required string url = 2; + */ + public boolean hasUrl() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required string url = 2; + */ + public java.lang.String getUrl() { + java.lang.Object ref = url_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + url_ = s; + } + return s; + } + } + /** + * required string url = 2; + */ + public com.google.protobuf.ByteString + getUrlBytes() { + java.lang.Object ref = url_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + url_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required string revision = 3; + public static final int REVISION_FIELD_NUMBER = 3; + private java.lang.Object revision_; + /** + * required string revision = 3; + */ + public boolean hasRevision() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required string revision = 3; + */ + public java.lang.String getRevision() { + java.lang.Object ref = revision_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + revision_ = s; + } + return s; + } + } + /** + * required string revision = 3; + */ + public com.google.protobuf.ByteString + getRevisionBytes() { + java.lang.Object ref = revision_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + revision_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required string user = 4; + public static final int USER_FIELD_NUMBER = 4; + private java.lang.Object user_; + /** + * required string user = 4; + */ + public boolean hasUser() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * required string user = 4; + */ + public java.lang.String getUser() { + java.lang.Object ref = user_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + user_ = s; + } + return s; + } + } + /** + * required string user = 4; + */ + public com.google.protobuf.ByteString + getUserBytes() { + java.lang.Object ref = user_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + user_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required string date = 5; + public static final int DATE_FIELD_NUMBER = 5; + private java.lang.Object date_; + /** + * required string date = 5; + */ + public boolean hasDate() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * required string date = 5; + */ + public java.lang.String getDate() { + java.lang.Object ref = date_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + date_ = s; + } + return s; + } + } + /** + * required string date = 5; + */ + public com.google.protobuf.ByteString + getDateBytes() { + java.lang.Object ref = date_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + date_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required string src_checksum = 6; + public static final int SRC_CHECKSUM_FIELD_NUMBER = 6; + private java.lang.Object srcChecksum_; + /** + * required string src_checksum = 6; + */ + public boolean hasSrcChecksum() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + /** + * required string src_checksum = 6; + */ + public java.lang.String getSrcChecksum() { + java.lang.Object ref = srcChecksum_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + srcChecksum_ = s; + } + return s; + } + } + /** + * required string src_checksum = 6; + */ + public com.google.protobuf.ByteString + getSrcChecksumBytes() { + java.lang.Object ref = srcChecksum_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + srcChecksum_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + version_ = ""; + url_ = ""; + revision_ = ""; + user_ = ""; + date_ = ""; + srcChecksum_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasVersion()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasUrl()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasRevision()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasUser()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasDate()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasSrcChecksum()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getVersionBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getUrlBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeBytes(3, getRevisionBytes()); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeBytes(4, getUserBytes()); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeBytes(5, getDateBytes()); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + output.writeBytes(6, getSrcChecksumBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getVersionBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getUrlBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(3, getRevisionBytes()); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(4, getUserBytes()); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(5, getDateBytes()); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(6, getSrcChecksumBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo other = (org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo) obj; + + boolean result = true; + result = result && (hasVersion() == other.hasVersion()); + if (hasVersion()) { + result = result && getVersion() + .equals(other.getVersion()); + } + result = result && (hasUrl() == other.hasUrl()); + if (hasUrl()) { + result = result && getUrl() + .equals(other.getUrl()); + } + result = result && (hasRevision() == other.hasRevision()); + if (hasRevision()) { + result = result && getRevision() + .equals(other.getRevision()); + } + result = result && (hasUser() == other.hasUser()); + if (hasUser()) { + result = result && getUser() + .equals(other.getUser()); + } + result = result && (hasDate() == other.hasDate()); + if (hasDate()) { + result = result && getDate() + .equals(other.getDate()); + } + result = result && (hasSrcChecksum() == other.hasSrcChecksum()); + if (hasSrcChecksum()) { + result = result && getSrcChecksum() + .equals(other.getSrcChecksum()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasVersion()) { + hash = (37 * hash) + VERSION_FIELD_NUMBER; + hash = (53 * hash) + getVersion().hashCode(); + } + if (hasUrl()) { + hash = (37 * hash) + URL_FIELD_NUMBER; + hash = (53 * hash) + getUrl().hashCode(); + } + if (hasRevision()) { + hash = (37 * hash) + REVISION_FIELD_NUMBER; + hash = (53 * hash) + getRevision().hashCode(); + } + if (hasUser()) { + hash = (37 * hash) + USER_FIELD_NUMBER; + hash = (53 * hash) + getUser().hashCode(); + } + if (hasDate()) { + hash = (37 * hash) + DATE_FIELD_NUMBER; + hash = (53 * hash) + getDate().hashCode(); + } + if (hasSrcChecksum()) { + hash = (37 * hash) + SRC_CHECKSUM_FIELD_NUMBER; + hash = (53 * hash) + getSrcChecksum().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code VersionInfo} + * + *
      +     * Rpc client version info proto. Included in ConnectionHeader on connection setup
      +     * 
      + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RPCProtos.internal_static_VersionInfo_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RPCProtos.internal_static_VersionInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo.class, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + version_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + url_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + revision_ = ""; + bitField0_ = (bitField0_ & ~0x00000004); + user_ = ""; + bitField0_ = (bitField0_ & ~0x00000008); + date_ = ""; + bitField0_ = (bitField0_ & ~0x00000010); + srcChecksum_ = ""; + bitField0_ = (bitField0_ & ~0x00000020); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.RPCProtos.internal_static_VersionInfo_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo build() { + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo result = new org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.version_ = version_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.url_ = url_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.revision_ = revision_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + result.user_ = user_; + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000010; + } + result.date_ = date_; + if (((from_bitField0_ & 0x00000020) == 0x00000020)) { + to_bitField0_ |= 0x00000020; + } + result.srcChecksum_ = srcChecksum_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo.getDefaultInstance()) return this; + if (other.hasVersion()) { + bitField0_ |= 0x00000001; + version_ = other.version_; + onChanged(); + } + if (other.hasUrl()) { + bitField0_ |= 0x00000002; + url_ = other.url_; + onChanged(); + } + if (other.hasRevision()) { + bitField0_ |= 0x00000004; + revision_ = other.revision_; + onChanged(); + } + if (other.hasUser()) { + bitField0_ |= 0x00000008; + user_ = other.user_; + onChanged(); + } + if (other.hasDate()) { + bitField0_ |= 0x00000010; + date_ = other.date_; + onChanged(); + } + if (other.hasSrcChecksum()) { + bitField0_ |= 0x00000020; + srcChecksum_ = other.srcChecksum_; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasVersion()) { + + return false; + } + if (!hasUrl()) { + + return false; + } + if (!hasRevision()) { + + return false; + } + if (!hasUser()) { + + return false; + } + if (!hasDate()) { + + return false; + } + if (!hasSrcChecksum()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string version = 1; + private java.lang.Object version_ = ""; + /** + * required string version = 1; + */ + public boolean hasVersion() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string version = 1; + */ + public java.lang.String getVersion() { + java.lang.Object ref = version_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + version_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string version = 1; + */ + public com.google.protobuf.ByteString + getVersionBytes() { + java.lang.Object ref = version_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + version_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string version = 1; + */ + public Builder setVersion( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + version_ = value; + onChanged(); + return this; + } + /** + * required string version = 1; + */ + public Builder clearVersion() { + bitField0_ = (bitField0_ & ~0x00000001); + version_ = getDefaultInstance().getVersion(); + onChanged(); + return this; + } + /** + * required string version = 1; + */ + public Builder setVersionBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + version_ = value; + onChanged(); + return this; + } + + // required string url = 2; + private java.lang.Object url_ = ""; + /** + * required string url = 2; + */ + public boolean hasUrl() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required string url = 2; + */ + public java.lang.String getUrl() { + java.lang.Object ref = url_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + url_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string url = 2; + */ + public com.google.protobuf.ByteString + getUrlBytes() { + java.lang.Object ref = url_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + url_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string url = 2; + */ + public Builder setUrl( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + url_ = value; + onChanged(); + return this; + } + /** + * required string url = 2; + */ + public Builder clearUrl() { + bitField0_ = (bitField0_ & ~0x00000002); + url_ = getDefaultInstance().getUrl(); + onChanged(); + return this; + } + /** + * required string url = 2; + */ + public Builder setUrlBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + url_ = value; + onChanged(); + return this; + } + + // required string revision = 3; + private java.lang.Object revision_ = ""; + /** + * required string revision = 3; + */ + public boolean hasRevision() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required string revision = 3; + */ + public java.lang.String getRevision() { + java.lang.Object ref = revision_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + revision_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string revision = 3; + */ + public com.google.protobuf.ByteString + getRevisionBytes() { + java.lang.Object ref = revision_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + revision_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string revision = 3; + */ + public Builder setRevision( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + revision_ = value; + onChanged(); + return this; + } + /** + * required string revision = 3; + */ + public Builder clearRevision() { + bitField0_ = (bitField0_ & ~0x00000004); + revision_ = getDefaultInstance().getRevision(); + onChanged(); + return this; + } + /** + * required string revision = 3; + */ + public Builder setRevisionBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + revision_ = value; + onChanged(); + return this; + } + + // required string user = 4; + private java.lang.Object user_ = ""; + /** + * required string user = 4; + */ + public boolean hasUser() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * required string user = 4; + */ + public java.lang.String getUser() { + java.lang.Object ref = user_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + user_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string user = 4; + */ + public com.google.protobuf.ByteString + getUserBytes() { + java.lang.Object ref = user_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + user_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string user = 4; + */ + public Builder setUser( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000008; + user_ = value; + onChanged(); + return this; + } + /** + * required string user = 4; + */ + public Builder clearUser() { + bitField0_ = (bitField0_ & ~0x00000008); + user_ = getDefaultInstance().getUser(); + onChanged(); + return this; + } + /** + * required string user = 4; + */ + public Builder setUserBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000008; + user_ = value; + onChanged(); + return this; + } + + // required string date = 5; + private java.lang.Object date_ = ""; + /** + * required string date = 5; + */ + public boolean hasDate() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * required string date = 5; + */ + public java.lang.String getDate() { + java.lang.Object ref = date_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + date_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string date = 5; + */ + public com.google.protobuf.ByteString + getDateBytes() { + java.lang.Object ref = date_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + date_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string date = 5; + */ + public Builder setDate( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000010; + date_ = value; + onChanged(); + return this; + } + /** + * required string date = 5; + */ + public Builder clearDate() { + bitField0_ = (bitField0_ & ~0x00000010); + date_ = getDefaultInstance().getDate(); + onChanged(); + return this; + } + /** + * required string date = 5; + */ + public Builder setDateBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000010; + date_ = value; + onChanged(); + return this; + } + + // required string src_checksum = 6; + private java.lang.Object srcChecksum_ = ""; + /** + * required string src_checksum = 6; + */ + public boolean hasSrcChecksum() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + /** + * required string src_checksum = 6; + */ + public java.lang.String getSrcChecksum() { + java.lang.Object ref = srcChecksum_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + srcChecksum_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string src_checksum = 6; + */ + public com.google.protobuf.ByteString + getSrcChecksumBytes() { + java.lang.Object ref = srcChecksum_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + srcChecksum_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string src_checksum = 6; + */ + public Builder setSrcChecksum( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000020; + srcChecksum_ = value; + onChanged(); + return this; + } + /** + * required string src_checksum = 6; + */ + public Builder clearSrcChecksum() { + bitField0_ = (bitField0_ & ~0x00000020); + srcChecksum_ = getDefaultInstance().getSrcChecksum(); + onChanged(); + return this; + } + /** + * required string src_checksum = 6; + */ + public Builder setSrcChecksumBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000020; + srcChecksum_ = value; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:VersionInfo) + } + + static { + defaultInstance = new VersionInfo(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:VersionInfo) + } + public interface ConnectionHeaderOrBuilder extends com.google.protobuf.MessageOrBuilder { @@ -789,6 +2179,20 @@ public final class RPCProtos { */ com.google.protobuf.ByteString getCellBlockCompressorClassBytes(); + + // optional .VersionInfo version_info = 5; + /** + * optional .VersionInfo version_info = 5; + */ + boolean hasVersionInfo(); + /** + * optional .VersionInfo version_info = 5; + */ + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo getVersionInfo(); + /** + * optional .VersionInfo version_info = 5; + */ + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfoOrBuilder getVersionInfoOrBuilder(); } /** * Protobuf type {@code ConnectionHeader} @@ -873,6 +2277,19 @@ public final class RPCProtos { cellBlockCompressorClass_ = input.readBytes(); break; } + case 42: { + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo.Builder subBuilder = null; + if (((bitField0_ & 0x00000010) == 0x00000010)) { + subBuilder = versionInfo_.toBuilder(); + } + versionInfo_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(versionInfo_); + versionInfo_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000010; + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -1094,11 +2511,34 @@ public final class RPCProtos { } } + // optional .VersionInfo version_info = 5; + public static final int VERSION_INFO_FIELD_NUMBER = 5; + private org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo versionInfo_; + /** + * optional .VersionInfo version_info = 5; + */ + public boolean hasVersionInfo() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional .VersionInfo version_info = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo getVersionInfo() { + return versionInfo_; + } + /** + * optional .VersionInfo version_info = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfoOrBuilder getVersionInfoOrBuilder() { + return versionInfo_; + } + private void initFields() { userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance(); serviceName_ = ""; cellBlockCodecClass_ = ""; cellBlockCompressorClass_ = ""; + versionInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo.getDefaultInstance(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -1111,6 +2551,12 @@ public final class RPCProtos { return false; } } + if (hasVersionInfo()) { + if (!getVersionInfo().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } memoizedIsInitialized = 1; return true; } @@ -1130,6 +2576,9 @@ public final class RPCProtos { if (((bitField0_ & 0x00000008) == 0x00000008)) { output.writeBytes(4, getCellBlockCompressorClassBytes()); } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeMessage(5, versionInfo_); + } getUnknownFields().writeTo(output); } @@ -1155,6 +2604,10 @@ public final class RPCProtos { size += com.google.protobuf.CodedOutputStream .computeBytesSize(4, getCellBlockCompressorClassBytes()); } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(5, versionInfo_); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -1198,6 +2651,11 @@ public final class RPCProtos { result = result && getCellBlockCompressorClass() .equals(other.getCellBlockCompressorClass()); } + result = result && (hasVersionInfo() == other.hasVersionInfo()); + if (hasVersionInfo()) { + result = result && getVersionInfo() + .equals(other.getVersionInfo()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -1227,6 +2685,10 @@ public final class RPCProtos { hash = (37 * hash) + CELL_BLOCK_COMPRESSOR_CLASS_FIELD_NUMBER; hash = (53 * hash) + getCellBlockCompressorClass().hashCode(); } + if (hasVersionInfo()) { + hash = (37 * hash) + VERSION_INFO_FIELD_NUMBER; + hash = (53 * hash) + getVersionInfo().hashCode(); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -1333,6 +2795,7 @@ public final class RPCProtos { private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getUserInfoFieldBuilder(); + getVersionInfoFieldBuilder(); } } private static Builder create() { @@ -1353,6 +2816,12 @@ public final class RPCProtos { bitField0_ = (bitField0_ & ~0x00000004); cellBlockCompressorClass_ = ""; bitField0_ = (bitField0_ & ~0x00000008); + if (versionInfoBuilder_ == null) { + versionInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo.getDefaultInstance(); + } else { + versionInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000010); return this; } @@ -1401,6 +2870,14 @@ public final class RPCProtos { to_bitField0_ |= 0x00000008; } result.cellBlockCompressorClass_ = cellBlockCompressorClass_; + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000010; + } + if (versionInfoBuilder_ == null) { + result.versionInfo_ = versionInfo_; + } else { + result.versionInfo_ = versionInfoBuilder_.build(); + } result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -1435,6 +2912,9 @@ public final class RPCProtos { cellBlockCompressorClass_ = other.cellBlockCompressorClass_; onChanged(); } + if (other.hasVersionInfo()) { + mergeVersionInfo(other.getVersionInfo()); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -1446,6 +2926,12 @@ public final class RPCProtos { return false; } } + if (hasVersionInfo()) { + if (!getVersionInfo().isInitialized()) { + + return false; + } + } return true; } @@ -1867,6 +3353,123 @@ public final class RPCProtos { return this; } + // optional .VersionInfo version_info = 5; + private org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo versionInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfoOrBuilder> versionInfoBuilder_; + /** + * optional .VersionInfo version_info = 5; + */ + public boolean hasVersionInfo() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional .VersionInfo version_info = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo getVersionInfo() { + if (versionInfoBuilder_ == null) { + return versionInfo_; + } else { + return versionInfoBuilder_.getMessage(); + } + } + /** + * optional .VersionInfo version_info = 5; + */ + public Builder setVersionInfo(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo value) { + if (versionInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + versionInfo_ = value; + onChanged(); + } else { + versionInfoBuilder_.setMessage(value); + } + bitField0_ |= 0x00000010; + return this; + } + /** + * optional .VersionInfo version_info = 5; + */ + public Builder setVersionInfo( + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo.Builder builderForValue) { + if (versionInfoBuilder_ == null) { + versionInfo_ = builderForValue.build(); + onChanged(); + } else { + versionInfoBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000010; + return this; + } + /** + * optional .VersionInfo version_info = 5; + */ + public Builder mergeVersionInfo(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo value) { + if (versionInfoBuilder_ == null) { + if (((bitField0_ & 0x00000010) == 0x00000010) && + versionInfo_ != org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo.getDefaultInstance()) { + versionInfo_ = + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo.newBuilder(versionInfo_).mergeFrom(value).buildPartial(); + } else { + versionInfo_ = value; + } + onChanged(); + } else { + versionInfoBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000010; + return this; + } + /** + * optional .VersionInfo version_info = 5; + */ + public Builder clearVersionInfo() { + if (versionInfoBuilder_ == null) { + versionInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo.getDefaultInstance(); + onChanged(); + } else { + versionInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000010); + return this; + } + /** + * optional .VersionInfo version_info = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo.Builder getVersionInfoBuilder() { + bitField0_ |= 0x00000010; + onChanged(); + return getVersionInfoFieldBuilder().getBuilder(); + } + /** + * optional .VersionInfo version_info = 5; + */ + public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfoOrBuilder getVersionInfoOrBuilder() { + if (versionInfoBuilder_ != null) { + return versionInfoBuilder_.getMessageOrBuilder(); + } else { + return versionInfo_; + } + } + /** + * optional .VersionInfo version_info = 5; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfoOrBuilder> + getVersionInfoFieldBuilder() { + if (versionInfoBuilder_ == null) { + versionInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfoOrBuilder>( + versionInfo_, + getParentForChildren(), + isClean()); + versionInfo_ = null; + } + return versionInfoBuilder_; + } + // @@protoc_insertion_point(builder_scope:ConnectionHeader) } @@ -5883,6 +7486,11 @@ public final class RPCProtos { private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_UserInformation_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_VersionInfo_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_VersionInfo_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_ConnectionHeader_descriptor; private static @@ -5919,23 +7527,27 @@ public final class RPCProtos { java.lang.String[] descriptorData = { "\n\tRPC.proto\032\rTracing.proto\032\013HBase.proto\"" + "<\n\017UserInformation\022\026\n\016effective_user\030\001 \002" + - "(\t\022\021\n\treal_user\030\002 \001(\t\"\222\001\n\020ConnectionHead" + - "er\022#\n\tuser_info\030\001 \001(\0132\020.UserInformation\022" + - "\024\n\014service_name\030\002 \001(\t\022\036\n\026cell_block_code" + - "c_class\030\003 \001(\t\022#\n\033cell_block_compressor_c" + - "lass\030\004 \001(\t\"\037\n\rCellBlockMeta\022\016\n\006length\030\001 " + - "\001(\r\"|\n\021ExceptionResponse\022\034\n\024exception_cl" + - "ass_name\030\001 \001(\t\022\023\n\013stack_trace\030\002 \001(\t\022\020\n\010h" + - "ostname\030\003 \001(\t\022\014\n\004port\030\004 \001(\005\022\024\n\014do_not_re", - "try\030\005 \001(\010\"\246\001\n\rRequestHeader\022\017\n\007call_id\030\001" + - " \001(\r\022\035\n\ntrace_info\030\002 \001(\0132\t.RPCTInfo\022\023\n\013m" + - "ethod_name\030\003 \001(\t\022\025\n\rrequest_param\030\004 \001(\010\022" + - "\'\n\017cell_block_meta\030\005 \001(\0132\016.CellBlockMeta" + - "\022\020\n\010priority\030\006 \001(\r\"q\n\016ResponseHeader\022\017\n\007" + - "call_id\030\001 \001(\r\022%\n\texception\030\002 \001(\0132\022.Excep" + - "tionResponse\022\'\n\017cell_block_meta\030\003 \001(\0132\016." + - "CellBlockMetaB<\n*org.apache.hadoop.hbase" + - ".protobuf.generatedB\tRPCProtosH\001\240\001\001" + "(\t\022\021\n\treal_user\030\002 \001(\t\"o\n\013VersionInfo\022\017\n\007" + + "version\030\001 \002(\t\022\013\n\003url\030\002 \002(\t\022\020\n\010revision\030\003" + + " \002(\t\022\014\n\004user\030\004 \002(\t\022\014\n\004date\030\005 \002(\t\022\024\n\014src_" + + "checksum\030\006 \002(\t\"\266\001\n\020ConnectionHeader\022#\n\tu" + + "ser_info\030\001 \001(\0132\020.UserInformation\022\024\n\014serv" + + "ice_name\030\002 \001(\t\022\036\n\026cell_block_codec_class" + + "\030\003 \001(\t\022#\n\033cell_block_compressor_class\030\004 " + + "\001(\t\022\"\n\014version_info\030\005 \001(\0132\014.VersionInfo\"", + "\037\n\rCellBlockMeta\022\016\n\006length\030\001 \001(\r\"|\n\021Exce" + + "ptionResponse\022\034\n\024exception_class_name\030\001 " + + "\001(\t\022\023\n\013stack_trace\030\002 \001(\t\022\020\n\010hostname\030\003 \001" + + "(\t\022\014\n\004port\030\004 \001(\005\022\024\n\014do_not_retry\030\005 \001(\010\"\246" + + "\001\n\rRequestHeader\022\017\n\007call_id\030\001 \001(\r\022\035\n\ntra" + + "ce_info\030\002 \001(\0132\t.RPCTInfo\022\023\n\013method_name\030" + + "\003 \001(\t\022\025\n\rrequest_param\030\004 \001(\010\022\'\n\017cell_blo" + + "ck_meta\030\005 \001(\0132\016.CellBlockMeta\022\020\n\010priorit" + + "y\030\006 \001(\r\"q\n\016ResponseHeader\022\017\n\007call_id\030\001 \001" + + "(\r\022%\n\texception\030\002 \001(\0132\022.ExceptionRespons", + "e\022\'\n\017cell_block_meta\030\003 \001(\0132\016.CellBlockMe" + + "taB<\n*org.apache.hadoop.hbase.protobuf.g" + + "eneratedB\tRPCProtosH\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -5948,32 +7560,38 @@ public final class RPCProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_UserInformation_descriptor, new java.lang.String[] { "EffectiveUser", "RealUser", }); - internal_static_ConnectionHeader_descriptor = + internal_static_VersionInfo_descriptor = getDescriptor().getMessageTypes().get(1); + internal_static_VersionInfo_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_VersionInfo_descriptor, + new java.lang.String[] { "Version", "Url", "Revision", "User", "Date", "SrcChecksum", }); + internal_static_ConnectionHeader_descriptor = + getDescriptor().getMessageTypes().get(2); internal_static_ConnectionHeader_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ConnectionHeader_descriptor, - new java.lang.String[] { "UserInfo", "ServiceName", "CellBlockCodecClass", "CellBlockCompressorClass", }); + new java.lang.String[] { "UserInfo", "ServiceName", "CellBlockCodecClass", "CellBlockCompressorClass", "VersionInfo", }); internal_static_CellBlockMeta_descriptor = - getDescriptor().getMessageTypes().get(2); + getDescriptor().getMessageTypes().get(3); internal_static_CellBlockMeta_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_CellBlockMeta_descriptor, new java.lang.String[] { "Length", }); internal_static_ExceptionResponse_descriptor = - getDescriptor().getMessageTypes().get(3); + getDescriptor().getMessageTypes().get(4); internal_static_ExceptionResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ExceptionResponse_descriptor, new java.lang.String[] { "ExceptionClassName", "StackTrace", "Hostname", "Port", "DoNotRetry", }); internal_static_RequestHeader_descriptor = - getDescriptor().getMessageTypes().get(4); + getDescriptor().getMessageTypes().get(5); internal_static_RequestHeader_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_RequestHeader_descriptor, new java.lang.String[] { "CallId", "TraceInfo", "MethodName", "RequestParam", "CellBlockMeta", "Priority", }); internal_static_ResponseHeader_descriptor = - getDescriptor().getMessageTypes().get(5); + getDescriptor().getMessageTypes().get(6); internal_static_ResponseHeader_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ResponseHeader_descriptor, diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RegionServerStatusProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RegionServerStatusProtos.java index ec169d54f2f..a8cd58a50f4 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RegionServerStatusProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RegionServerStatusProtos.java @@ -64,6 +64,33 @@ public final class RegionServerStatusProtos { * */ long getServerCurrentTime(); + + // optional string use_this_hostname_instead = 4; + /** + * optional string use_this_hostname_instead = 4; + * + *
      +     ** hostname for region server, optional 
      +     * 
      + */ + boolean hasUseThisHostnameInstead(); + /** + * optional string use_this_hostname_instead = 4; + * + *
      +     ** hostname for region server, optional 
      +     * 
      + */ + java.lang.String getUseThisHostnameInstead(); + /** + * optional string use_this_hostname_instead = 4; + * + *
      +     ** hostname for region server, optional 
      +     * 
      + */ + com.google.protobuf.ByteString + getUseThisHostnameInsteadBytes(); } /** * Protobuf type {@code RegionServerStartupRequest} @@ -131,6 +158,11 @@ public final class RegionServerStatusProtos { serverCurrentTime_ = input.readUInt64(); break; } + case 34: { + bitField0_ |= 0x00000008; + useThisHostnameInstead_ = input.readBytes(); + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -243,10 +275,66 @@ public final class RegionServerStatusProtos { return serverCurrentTime_; } + // optional string use_this_hostname_instead = 4; + public static final int USE_THIS_HOSTNAME_INSTEAD_FIELD_NUMBER = 4; + private java.lang.Object useThisHostnameInstead_; + /** + * optional string use_this_hostname_instead = 4; + * + *
      +     ** hostname for region server, optional 
      +     * 
      + */ + public boolean hasUseThisHostnameInstead() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional string use_this_hostname_instead = 4; + * + *
      +     ** hostname for region server, optional 
      +     * 
      + */ + public java.lang.String getUseThisHostnameInstead() { + java.lang.Object ref = useThisHostnameInstead_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + useThisHostnameInstead_ = s; + } + return s; + } + } + /** + * optional string use_this_hostname_instead = 4; + * + *
      +     ** hostname for region server, optional 
      +     * 
      + */ + public com.google.protobuf.ByteString + getUseThisHostnameInsteadBytes() { + java.lang.Object ref = useThisHostnameInstead_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + useThisHostnameInstead_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + private void initFields() { port_ = 0; serverStartCode_ = 0L; serverCurrentTime_ = 0L; + useThisHostnameInstead_ = ""; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -281,6 +369,9 @@ public final class RegionServerStatusProtos { if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeUInt64(3, serverCurrentTime_); } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeBytes(4, getUseThisHostnameInsteadBytes()); + } getUnknownFields().writeTo(output); } @@ -302,6 +393,10 @@ public final class RegionServerStatusProtos { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(3, serverCurrentTime_); } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(4, getUseThisHostnameInsteadBytes()); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -340,6 +435,11 @@ public final class RegionServerStatusProtos { result = result && (getServerCurrentTime() == other.getServerCurrentTime()); } + result = result && (hasUseThisHostnameInstead() == other.hasUseThisHostnameInstead()); + if (hasUseThisHostnameInstead()) { + result = result && getUseThisHostnameInstead() + .equals(other.getUseThisHostnameInstead()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -365,6 +465,10 @@ public final class RegionServerStatusProtos { hash = (37 * hash) + SERVER_CURRENT_TIME_FIELD_NUMBER; hash = (53 * hash) + hashLong(getServerCurrentTime()); } + if (hasUseThisHostnameInstead()) { + hash = (37 * hash) + USE_THIS_HOSTNAME_INSTEAD_FIELD_NUMBER; + hash = (53 * hash) + getUseThisHostnameInstead().hashCode(); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -480,6 +584,8 @@ public final class RegionServerStatusProtos { bitField0_ = (bitField0_ & ~0x00000002); serverCurrentTime_ = 0L; bitField0_ = (bitField0_ & ~0x00000004); + useThisHostnameInstead_ = ""; + bitField0_ = (bitField0_ & ~0x00000008); return this; } @@ -520,6 +626,10 @@ public final class RegionServerStatusProtos { to_bitField0_ |= 0x00000004; } result.serverCurrentTime_ = serverCurrentTime_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + result.useThisHostnameInstead_ = useThisHostnameInstead_; result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -545,6 +655,11 @@ public final class RegionServerStatusProtos { if (other.hasServerCurrentTime()) { setServerCurrentTime(other.getServerCurrentTime()); } + if (other.hasUseThisHostnameInstead()) { + bitField0_ |= 0x00000008; + useThisHostnameInstead_ = other.useThisHostnameInstead_; + onChanged(); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -731,6 +846,104 @@ public final class RegionServerStatusProtos { return this; } + // optional string use_this_hostname_instead = 4; + private java.lang.Object useThisHostnameInstead_ = ""; + /** + * optional string use_this_hostname_instead = 4; + * + *
      +       ** hostname for region server, optional 
      +       * 
      + */ + public boolean hasUseThisHostnameInstead() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * optional string use_this_hostname_instead = 4; + * + *
      +       ** hostname for region server, optional 
      +       * 
      + */ + public java.lang.String getUseThisHostnameInstead() { + java.lang.Object ref = useThisHostnameInstead_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + useThisHostnameInstead_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string use_this_hostname_instead = 4; + * + *
      +       ** hostname for region server, optional 
      +       * 
      + */ + public com.google.protobuf.ByteString + getUseThisHostnameInsteadBytes() { + java.lang.Object ref = useThisHostnameInstead_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + useThisHostnameInstead_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string use_this_hostname_instead = 4; + * + *
      +       ** hostname for region server, optional 
      +       * 
      + */ + public Builder setUseThisHostnameInstead( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000008; + useThisHostnameInstead_ = value; + onChanged(); + return this; + } + /** + * optional string use_this_hostname_instead = 4; + * + *
      +       ** hostname for region server, optional 
      +       * 
      + */ + public Builder clearUseThisHostnameInstead() { + bitField0_ = (bitField0_ & ~0x00000008); + useThisHostnameInstead_ = getDefaultInstance().getUseThisHostnameInstead(); + onChanged(); + return this; + } + /** + * optional string use_this_hostname_instead = 4; + * + *
      +       ** hostname for region server, optional 
      +       * 
      + */ + public Builder setUseThisHostnameInsteadBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000008; + useThisHostnameInstead_ = value; + onChanged(); + return this; + } + // @@protoc_insertion_point(builder_scope:RegionServerStartupRequest) } @@ -4496,7 +4709,7 @@ public final class RegionServerStatusProtos { * required uint64 last_flushed_sequence_id = 1; * *
      -     * the last WAL sequence id flushed from MemStore to HFile for the region 
      +     ** the last WAL sequence id flushed from MemStore to HFile for the region 
            * 
      */ boolean hasLastFlushedSequenceId(); @@ -4504,10 +4717,55 @@ public final class RegionServerStatusProtos { * required uint64 last_flushed_sequence_id = 1; * *
      -     * the last WAL sequence id flushed from MemStore to HFile for the region 
      +     ** the last WAL sequence id flushed from MemStore to HFile for the region 
            * 
      */ long getLastFlushedSequenceId(); + + // repeated .StoreSequenceId store_last_flushed_sequence_id = 2; + /** + * repeated .StoreSequenceId store_last_flushed_sequence_id = 2; + * + *
      +     ** the last WAL sequence id flushed from MemStore to HFile for stores of the region 
      +     * 
      + */ + java.util.List + getStoreLastFlushedSequenceIdList(); + /** + * repeated .StoreSequenceId store_last_flushed_sequence_id = 2; + * + *
      +     ** the last WAL sequence id flushed from MemStore to HFile for stores of the region 
      +     * 
      + */ + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId getStoreLastFlushedSequenceId(int index); + /** + * repeated .StoreSequenceId store_last_flushed_sequence_id = 2; + * + *
      +     ** the last WAL sequence id flushed from MemStore to HFile for stores of the region 
      +     * 
      + */ + int getStoreLastFlushedSequenceIdCount(); + /** + * repeated .StoreSequenceId store_last_flushed_sequence_id = 2; + * + *
      +     ** the last WAL sequence id flushed from MemStore to HFile for stores of the region 
      +     * 
      + */ + java.util.List + getStoreLastFlushedSequenceIdOrBuilderList(); + /** + * repeated .StoreSequenceId store_last_flushed_sequence_id = 2; + * + *
      +     ** the last WAL sequence id flushed from MemStore to HFile for stores of the region 
      +     * 
      + */ + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder getStoreLastFlushedSequenceIdOrBuilder( + int index); } /** * Protobuf type {@code GetLastFlushedSequenceIdResponse} @@ -4565,6 +4823,14 @@ public final class RegionServerStatusProtos { lastFlushedSequenceId_ = input.readUInt64(); break; } + case 18: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + storeLastFlushedSequenceId_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000002; + } + storeLastFlushedSequenceId_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.PARSER, extensionRegistry)); + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -4573,6 +4839,9 @@ public final class RegionServerStatusProtos { throw new com.google.protobuf.InvalidProtocolBufferException( e.getMessage()).setUnfinishedMessage(this); } finally { + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + storeLastFlushedSequenceId_ = java.util.Collections.unmodifiableList(storeLastFlushedSequenceId_); + } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); } @@ -4612,7 +4881,7 @@ public final class RegionServerStatusProtos { * required uint64 last_flushed_sequence_id = 1; * *
      -     * the last WAL sequence id flushed from MemStore to HFile for the region 
      +     ** the last WAL sequence id flushed from MemStore to HFile for the region 
            * 
      */ public boolean hasLastFlushedSequenceId() { @@ -4622,15 +4891,72 @@ public final class RegionServerStatusProtos { * required uint64 last_flushed_sequence_id = 1; * *
      -     * the last WAL sequence id flushed from MemStore to HFile for the region 
      +     ** the last WAL sequence id flushed from MemStore to HFile for the region 
            * 
      */ public long getLastFlushedSequenceId() { return lastFlushedSequenceId_; } + // repeated .StoreSequenceId store_last_flushed_sequence_id = 2; + public static final int STORE_LAST_FLUSHED_SEQUENCE_ID_FIELD_NUMBER = 2; + private java.util.List storeLastFlushedSequenceId_; + /** + * repeated .StoreSequenceId store_last_flushed_sequence_id = 2; + * + *
      +     ** the last WAL sequence id flushed from MemStore to HFile for stores of the region 
      +     * 
      + */ + public java.util.List getStoreLastFlushedSequenceIdList() { + return storeLastFlushedSequenceId_; + } + /** + * repeated .StoreSequenceId store_last_flushed_sequence_id = 2; + * + *
      +     ** the last WAL sequence id flushed from MemStore to HFile for stores of the region 
      +     * 
      + */ + public java.util.List + getStoreLastFlushedSequenceIdOrBuilderList() { + return storeLastFlushedSequenceId_; + } + /** + * repeated .StoreSequenceId store_last_flushed_sequence_id = 2; + * + *
      +     ** the last WAL sequence id flushed from MemStore to HFile for stores of the region 
      +     * 
      + */ + public int getStoreLastFlushedSequenceIdCount() { + return storeLastFlushedSequenceId_.size(); + } + /** + * repeated .StoreSequenceId store_last_flushed_sequence_id = 2; + * + *
      +     ** the last WAL sequence id flushed from MemStore to HFile for stores of the region 
      +     * 
      + */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId getStoreLastFlushedSequenceId(int index) { + return storeLastFlushedSequenceId_.get(index); + } + /** + * repeated .StoreSequenceId store_last_flushed_sequence_id = 2; + * + *
      +     ** the last WAL sequence id flushed from MemStore to HFile for stores of the region 
      +     * 
      + */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder getStoreLastFlushedSequenceIdOrBuilder( + int index) { + return storeLastFlushedSequenceId_.get(index); + } + private void initFields() { lastFlushedSequenceId_ = 0L; + storeLastFlushedSequenceId_ = java.util.Collections.emptyList(); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -4641,6 +4967,12 @@ public final class RegionServerStatusProtos { memoizedIsInitialized = 0; return false; } + for (int i = 0; i < getStoreLastFlushedSequenceIdCount(); i++) { + if (!getStoreLastFlushedSequenceId(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } memoizedIsInitialized = 1; return true; } @@ -4651,6 +4983,9 @@ public final class RegionServerStatusProtos { if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeUInt64(1, lastFlushedSequenceId_); } + for (int i = 0; i < storeLastFlushedSequenceId_.size(); i++) { + output.writeMessage(2, storeLastFlushedSequenceId_.get(i)); + } getUnknownFields().writeTo(output); } @@ -4664,6 +4999,10 @@ public final class RegionServerStatusProtos { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(1, lastFlushedSequenceId_); } + for (int i = 0; i < storeLastFlushedSequenceId_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, storeLastFlushedSequenceId_.get(i)); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -4692,6 +5031,8 @@ public final class RegionServerStatusProtos { result = result && (getLastFlushedSequenceId() == other.getLastFlushedSequenceId()); } + result = result && getStoreLastFlushedSequenceIdList() + .equals(other.getStoreLastFlushedSequenceIdList()); result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -4709,6 +5050,10 @@ public final class RegionServerStatusProtos { hash = (37 * hash) + LAST_FLUSHED_SEQUENCE_ID_FIELD_NUMBER; hash = (53 * hash) + hashLong(getLastFlushedSequenceId()); } + if (getStoreLastFlushedSequenceIdCount() > 0) { + hash = (37 * hash) + STORE_LAST_FLUSHED_SEQUENCE_ID_FIELD_NUMBER; + hash = (53 * hash) + getStoreLastFlushedSequenceIdList().hashCode(); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -4810,6 +5155,7 @@ public final class RegionServerStatusProtos { } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getStoreLastFlushedSequenceIdFieldBuilder(); } } private static Builder create() { @@ -4820,6 +5166,12 @@ public final class RegionServerStatusProtos { super.clear(); lastFlushedSequenceId_ = 0L; bitField0_ = (bitField0_ & ~0x00000001); + if (storeLastFlushedSequenceIdBuilder_ == null) { + storeLastFlushedSequenceId_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + } else { + storeLastFlushedSequenceIdBuilder_.clear(); + } return this; } @@ -4852,6 +5204,15 @@ public final class RegionServerStatusProtos { to_bitField0_ |= 0x00000001; } result.lastFlushedSequenceId_ = lastFlushedSequenceId_; + if (storeLastFlushedSequenceIdBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { + storeLastFlushedSequenceId_ = java.util.Collections.unmodifiableList(storeLastFlushedSequenceId_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.storeLastFlushedSequenceId_ = storeLastFlushedSequenceId_; + } else { + result.storeLastFlushedSequenceId_ = storeLastFlushedSequenceIdBuilder_.build(); + } result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -4871,6 +5232,32 @@ public final class RegionServerStatusProtos { if (other.hasLastFlushedSequenceId()) { setLastFlushedSequenceId(other.getLastFlushedSequenceId()); } + if (storeLastFlushedSequenceIdBuilder_ == null) { + if (!other.storeLastFlushedSequenceId_.isEmpty()) { + if (storeLastFlushedSequenceId_.isEmpty()) { + storeLastFlushedSequenceId_ = other.storeLastFlushedSequenceId_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureStoreLastFlushedSequenceIdIsMutable(); + storeLastFlushedSequenceId_.addAll(other.storeLastFlushedSequenceId_); + } + onChanged(); + } + } else { + if (!other.storeLastFlushedSequenceId_.isEmpty()) { + if (storeLastFlushedSequenceIdBuilder_.isEmpty()) { + storeLastFlushedSequenceIdBuilder_.dispose(); + storeLastFlushedSequenceIdBuilder_ = null; + storeLastFlushedSequenceId_ = other.storeLastFlushedSequenceId_; + bitField0_ = (bitField0_ & ~0x00000002); + storeLastFlushedSequenceIdBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getStoreLastFlushedSequenceIdFieldBuilder() : null; + } else { + storeLastFlushedSequenceIdBuilder_.addAllMessages(other.storeLastFlushedSequenceId_); + } + } + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -4880,6 +5267,12 @@ public final class RegionServerStatusProtos { return false; } + for (int i = 0; i < getStoreLastFlushedSequenceIdCount(); i++) { + if (!getStoreLastFlushedSequenceId(i).isInitialized()) { + + return false; + } + } return true; } @@ -4908,7 +5301,7 @@ public final class RegionServerStatusProtos { * required uint64 last_flushed_sequence_id = 1; * *
      -       * the last WAL sequence id flushed from MemStore to HFile for the region 
      +       ** the last WAL sequence id flushed from MemStore to HFile for the region 
              * 
      */ public boolean hasLastFlushedSequenceId() { @@ -4918,7 +5311,7 @@ public final class RegionServerStatusProtos { * required uint64 last_flushed_sequence_id = 1; * *
      -       * the last WAL sequence id flushed from MemStore to HFile for the region 
      +       ** the last WAL sequence id flushed from MemStore to HFile for the region 
              * 
      */ public long getLastFlushedSequenceId() { @@ -4928,7 +5321,7 @@ public final class RegionServerStatusProtos { * required uint64 last_flushed_sequence_id = 1; * *
      -       * the last WAL sequence id flushed from MemStore to HFile for the region 
      +       ** the last WAL sequence id flushed from MemStore to HFile for the region 
              * 
      */ public Builder setLastFlushedSequenceId(long value) { @@ -4941,7 +5334,7 @@ public final class RegionServerStatusProtos { * required uint64 last_flushed_sequence_id = 1; * *
      -       * the last WAL sequence id flushed from MemStore to HFile for the region 
      +       ** the last WAL sequence id flushed from MemStore to HFile for the region 
              * 
      */ public Builder clearLastFlushedSequenceId() { @@ -4951,6 +5344,318 @@ public final class RegionServerStatusProtos { return this; } + // repeated .StoreSequenceId store_last_flushed_sequence_id = 2; + private java.util.List storeLastFlushedSequenceId_ = + java.util.Collections.emptyList(); + private void ensureStoreLastFlushedSequenceIdIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + storeLastFlushedSequenceId_ = new java.util.ArrayList(storeLastFlushedSequenceId_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder> storeLastFlushedSequenceIdBuilder_; + + /** + * repeated .StoreSequenceId store_last_flushed_sequence_id = 2; + * + *
      +       ** the last WAL sequence id flushed from MemStore to HFile for stores of the region 
      +       * 
      + */ + public java.util.List getStoreLastFlushedSequenceIdList() { + if (storeLastFlushedSequenceIdBuilder_ == null) { + return java.util.Collections.unmodifiableList(storeLastFlushedSequenceId_); + } else { + return storeLastFlushedSequenceIdBuilder_.getMessageList(); + } + } + /** + * repeated .StoreSequenceId store_last_flushed_sequence_id = 2; + * + *
      +       ** the last WAL sequence id flushed from MemStore to HFile for stores of the region 
      +       * 
      + */ + public int getStoreLastFlushedSequenceIdCount() { + if (storeLastFlushedSequenceIdBuilder_ == null) { + return storeLastFlushedSequenceId_.size(); + } else { + return storeLastFlushedSequenceIdBuilder_.getCount(); + } + } + /** + * repeated .StoreSequenceId store_last_flushed_sequence_id = 2; + * + *
      +       ** the last WAL sequence id flushed from MemStore to HFile for stores of the region 
      +       * 
      + */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId getStoreLastFlushedSequenceId(int index) { + if (storeLastFlushedSequenceIdBuilder_ == null) { + return storeLastFlushedSequenceId_.get(index); + } else { + return storeLastFlushedSequenceIdBuilder_.getMessage(index); + } + } + /** + * repeated .StoreSequenceId store_last_flushed_sequence_id = 2; + * + *
      +       ** the last WAL sequence id flushed from MemStore to HFile for stores of the region 
      +       * 
      + */ + public Builder setStoreLastFlushedSequenceId( + int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId value) { + if (storeLastFlushedSequenceIdBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStoreLastFlushedSequenceIdIsMutable(); + storeLastFlushedSequenceId_.set(index, value); + onChanged(); + } else { + storeLastFlushedSequenceIdBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .StoreSequenceId store_last_flushed_sequence_id = 2; + * + *
      +       ** the last WAL sequence id flushed from MemStore to HFile for stores of the region 
      +       * 
      + */ + public Builder setStoreLastFlushedSequenceId( + int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder builderForValue) { + if (storeLastFlushedSequenceIdBuilder_ == null) { + ensureStoreLastFlushedSequenceIdIsMutable(); + storeLastFlushedSequenceId_.set(index, builderForValue.build()); + onChanged(); + } else { + storeLastFlushedSequenceIdBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .StoreSequenceId store_last_flushed_sequence_id = 2; + * + *
      +       ** the last WAL sequence id flushed from MemStore to HFile for stores of the region 
      +       * 
      + */ + public Builder addStoreLastFlushedSequenceId(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId value) { + if (storeLastFlushedSequenceIdBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStoreLastFlushedSequenceIdIsMutable(); + storeLastFlushedSequenceId_.add(value); + onChanged(); + } else { + storeLastFlushedSequenceIdBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .StoreSequenceId store_last_flushed_sequence_id = 2; + * + *
      +       ** the last WAL sequence id flushed from MemStore to HFile for stores of the region 
      +       * 
      + */ + public Builder addStoreLastFlushedSequenceId( + int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId value) { + if (storeLastFlushedSequenceIdBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStoreLastFlushedSequenceIdIsMutable(); + storeLastFlushedSequenceId_.add(index, value); + onChanged(); + } else { + storeLastFlushedSequenceIdBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .StoreSequenceId store_last_flushed_sequence_id = 2; + * + *
      +       ** the last WAL sequence id flushed from MemStore to HFile for stores of the region 
      +       * 
      + */ + public Builder addStoreLastFlushedSequenceId( + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder builderForValue) { + if (storeLastFlushedSequenceIdBuilder_ == null) { + ensureStoreLastFlushedSequenceIdIsMutable(); + storeLastFlushedSequenceId_.add(builderForValue.build()); + onChanged(); + } else { + storeLastFlushedSequenceIdBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .StoreSequenceId store_last_flushed_sequence_id = 2; + * + *
      +       ** the last WAL sequence id flushed from MemStore to HFile for stores of the region 
      +       * 
      + */ + public Builder addStoreLastFlushedSequenceId( + int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder builderForValue) { + if (storeLastFlushedSequenceIdBuilder_ == null) { + ensureStoreLastFlushedSequenceIdIsMutable(); + storeLastFlushedSequenceId_.add(index, builderForValue.build()); + onChanged(); + } else { + storeLastFlushedSequenceIdBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .StoreSequenceId store_last_flushed_sequence_id = 2; + * + *
      +       ** the last WAL sequence id flushed from MemStore to HFile for stores of the region 
      +       * 
      + */ + public Builder addAllStoreLastFlushedSequenceId( + java.lang.Iterable values) { + if (storeLastFlushedSequenceIdBuilder_ == null) { + ensureStoreLastFlushedSequenceIdIsMutable(); + super.addAll(values, storeLastFlushedSequenceId_); + onChanged(); + } else { + storeLastFlushedSequenceIdBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .StoreSequenceId store_last_flushed_sequence_id = 2; + * + *
      +       ** the last WAL sequence id flushed from MemStore to HFile for stores of the region 
      +       * 
      + */ + public Builder clearStoreLastFlushedSequenceId() { + if (storeLastFlushedSequenceIdBuilder_ == null) { + storeLastFlushedSequenceId_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + storeLastFlushedSequenceIdBuilder_.clear(); + } + return this; + } + /** + * repeated .StoreSequenceId store_last_flushed_sequence_id = 2; + * + *
      +       ** the last WAL sequence id flushed from MemStore to HFile for stores of the region 
      +       * 
      + */ + public Builder removeStoreLastFlushedSequenceId(int index) { + if (storeLastFlushedSequenceIdBuilder_ == null) { + ensureStoreLastFlushedSequenceIdIsMutable(); + storeLastFlushedSequenceId_.remove(index); + onChanged(); + } else { + storeLastFlushedSequenceIdBuilder_.remove(index); + } + return this; + } + /** + * repeated .StoreSequenceId store_last_flushed_sequence_id = 2; + * + *
      +       ** the last WAL sequence id flushed from MemStore to HFile for stores of the region 
      +       * 
      + */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder getStoreLastFlushedSequenceIdBuilder( + int index) { + return getStoreLastFlushedSequenceIdFieldBuilder().getBuilder(index); + } + /** + * repeated .StoreSequenceId store_last_flushed_sequence_id = 2; + * + *
      +       ** the last WAL sequence id flushed from MemStore to HFile for stores of the region 
      +       * 
      + */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder getStoreLastFlushedSequenceIdOrBuilder( + int index) { + if (storeLastFlushedSequenceIdBuilder_ == null) { + return storeLastFlushedSequenceId_.get(index); } else { + return storeLastFlushedSequenceIdBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .StoreSequenceId store_last_flushed_sequence_id = 2; + * + *
      +       ** the last WAL sequence id flushed from MemStore to HFile for stores of the region 
      +       * 
      + */ + public java.util.List + getStoreLastFlushedSequenceIdOrBuilderList() { + if (storeLastFlushedSequenceIdBuilder_ != null) { + return storeLastFlushedSequenceIdBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(storeLastFlushedSequenceId_); + } + } + /** + * repeated .StoreSequenceId store_last_flushed_sequence_id = 2; + * + *
      +       ** the last WAL sequence id flushed from MemStore to HFile for stores of the region 
      +       * 
      + */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder addStoreLastFlushedSequenceIdBuilder() { + return getStoreLastFlushedSequenceIdFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.getDefaultInstance()); + } + /** + * repeated .StoreSequenceId store_last_flushed_sequence_id = 2; + * + *
      +       ** the last WAL sequence id flushed from MemStore to HFile for stores of the region 
      +       * 
      + */ + public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder addStoreLastFlushedSequenceIdBuilder( + int index) { + return getStoreLastFlushedSequenceIdFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.getDefaultInstance()); + } + /** + * repeated .StoreSequenceId store_last_flushed_sequence_id = 2; + * + *
      +       ** the last WAL sequence id flushed from MemStore to HFile for stores of the region 
      +       * 
      + */ + public java.util.List + getStoreLastFlushedSequenceIdBuilderList() { + return getStoreLastFlushedSequenceIdFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder> + getStoreLastFlushedSequenceIdFieldBuilder() { + if (storeLastFlushedSequenceIdBuilder_ == null) { + storeLastFlushedSequenceIdBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder>( + storeLastFlushedSequenceId_, + ((bitField0_ & 0x00000002) == 0x00000002), + getParentForChildren(), + isClean()); + storeLastFlushedSequenceId_ = null; + } + return storeLastFlushedSequenceIdBuilder_; + } + // @@protoc_insertion_point(builder_scope:GetLastFlushedSequenceIdResponse) } @@ -8399,47 +9104,49 @@ public final class RegionServerStatusProtos { static { java.lang.String[] descriptorData = { "\n\030RegionServerStatus.proto\032\013HBase.proto\032" + - "\023ClusterStatus.proto\"b\n\032RegionServerStar" + - "tupRequest\022\014\n\004port\030\001 \002(\r\022\031\n\021server_start" + - "_code\030\002 \002(\004\022\033\n\023server_current_time\030\003 \002(\004" + - "\"C\n\033RegionServerStartupResponse\022$\n\013map_e" + - "ntries\030\001 \003(\0132\017.NameStringPair\"S\n\031RegionS" + - "erverReportRequest\022\033\n\006server\030\001 \002(\0132\013.Ser" + - "verName\022\031\n\004load\030\002 \001(\0132\013.ServerLoad\"\034\n\032Re" + - "gionServerReportResponse\"O\n\031ReportRSFata" + - "lErrorRequest\022\033\n\006server\030\001 \002(\0132\013.ServerNa", - "me\022\025\n\rerror_message\030\002 \002(\t\"\034\n\032ReportRSFat" + - "alErrorResponse\"6\n\037GetLastFlushedSequenc" + - "eIdRequest\022\023\n\013region_name\030\001 \002(\014\"D\n GetLa" + - "stFlushedSequenceIdResponse\022 \n\030last_flus" + - "hed_sequence_id\030\001 \002(\004\"\322\002\n\025RegionStateTra" + - "nsition\022>\n\017transition_code\030\001 \002(\0162%.Regio" + - "nStateTransition.TransitionCode\022 \n\013regio" + - "n_info\030\002 \003(\0132\013.RegionInfo\022\024\n\014open_seq_nu" + - "m\030\003 \001(\004\"\300\001\n\016TransitionCode\022\n\n\006OPENED\020\000\022\017" + - "\n\013FAILED_OPEN\020\001\022\n\n\006CLOSED\020\002\022\022\n\016READY_TO_", - "SPLIT\020\003\022\022\n\016READY_TO_MERGE\020\004\022\016\n\nSPLIT_PON" + - "R\020\005\022\016\n\nMERGE_PONR\020\006\022\t\n\005SPLIT\020\007\022\n\n\006MERGED" + - "\020\010\022\022\n\016SPLIT_REVERTED\020\t\022\022\n\016MERGE_REVERTED" + - "\020\n\"m\n\"ReportRegionStateTransitionRequest" + - "\022\033\n\006server\030\001 \002(\0132\013.ServerName\022*\n\ntransit" + - "ion\030\002 \003(\0132\026.RegionStateTransition\"<\n#Rep" + - "ortRegionStateTransitionResponse\022\025\n\rerro" + - "r_message\030\001 \001(\t2\326\003\n\031RegionServerStatusSe" + - "rvice\022P\n\023RegionServerStartup\022\033.RegionSer" + - "verStartupRequest\032\034.RegionServerStartupR", - "esponse\022M\n\022RegionServerReport\022\032.RegionSe" + - "rverReportRequest\032\033.RegionServerReportRe" + - "sponse\022M\n\022ReportRSFatalError\022\032.ReportRSF" + - "atalErrorRequest\032\033.ReportRSFatalErrorRes" + - "ponse\022_\n\030GetLastFlushedSequenceId\022 .GetL" + - "astFlushedSequenceIdRequest\032!.GetLastFlu" + - "shedSequenceIdResponse\022h\n\033ReportRegionSt" + - "ateTransition\022#.ReportRegionStateTransit" + - "ionRequest\032$.ReportRegionStateTransition" + - "ResponseBN\n*org.apache.hadoop.hbase.prot", - "obuf.generatedB\030RegionServerStatusProtos" + - "H\001\210\001\001\240\001\001" + "\023ClusterStatus.proto\"\205\001\n\032RegionServerSta" + + "rtupRequest\022\014\n\004port\030\001 \002(\r\022\031\n\021server_star" + + "t_code\030\002 \002(\004\022\033\n\023server_current_time\030\003 \002(" + + "\004\022!\n\031use_this_hostname_instead\030\004 \001(\t\"C\n\033" + + "RegionServerStartupResponse\022$\n\013map_entri" + + "es\030\001 \003(\0132\017.NameStringPair\"S\n\031RegionServe" + + "rReportRequest\022\033\n\006server\030\001 \002(\0132\013.ServerN" + + "ame\022\031\n\004load\030\002 \001(\0132\013.ServerLoad\"\034\n\032Region" + + "ServerReportResponse\"O\n\031ReportRSFatalErr", + "orRequest\022\033\n\006server\030\001 \002(\0132\013.ServerName\022\025" + + "\n\rerror_message\030\002 \002(\t\"\034\n\032ReportRSFatalEr" + + "rorResponse\"6\n\037GetLastFlushedSequenceIdR" + + "equest\022\023\n\013region_name\030\001 \002(\014\"~\n GetLastFl" + + "ushedSequenceIdResponse\022 \n\030last_flushed_" + + "sequence_id\030\001 \002(\004\0228\n\036store_last_flushed_" + + "sequence_id\030\002 \003(\0132\020.StoreSequenceId\"\322\002\n\025" + + "RegionStateTransition\022>\n\017transition_code" + + "\030\001 \002(\0162%.RegionStateTransition.Transitio" + + "nCode\022 \n\013region_info\030\002 \003(\0132\013.RegionInfo\022", + "\024\n\014open_seq_num\030\003 \001(\004\"\300\001\n\016TransitionCode" + + "\022\n\n\006OPENED\020\000\022\017\n\013FAILED_OPEN\020\001\022\n\n\006CLOSED\020" + + "\002\022\022\n\016READY_TO_SPLIT\020\003\022\022\n\016READY_TO_MERGE\020" + + "\004\022\016\n\nSPLIT_PONR\020\005\022\016\n\nMERGE_PONR\020\006\022\t\n\005SPL" + + "IT\020\007\022\n\n\006MERGED\020\010\022\022\n\016SPLIT_REVERTED\020\t\022\022\n\016" + + "MERGE_REVERTED\020\n\"m\n\"ReportRegionStateTra" + + "nsitionRequest\022\033\n\006server\030\001 \002(\0132\013.ServerN" + + "ame\022*\n\ntransition\030\002 \003(\0132\026.RegionStateTra" + + "nsition\"<\n#ReportRegionStateTransitionRe" + + "sponse\022\025\n\rerror_message\030\001 \001(\t2\326\003\n\031Region", + "ServerStatusService\022P\n\023RegionServerStart" + + "up\022\033.RegionServerStartupRequest\032\034.Region" + + "ServerStartupResponse\022M\n\022RegionServerRep" + + "ort\022\032.RegionServerReportRequest\032\033.Region" + + "ServerReportResponse\022M\n\022ReportRSFatalErr" + + "or\022\032.ReportRSFatalErrorRequest\032\033.ReportR" + + "SFatalErrorResponse\022_\n\030GetLastFlushedSeq" + + "uenceId\022 .GetLastFlushedSequenceIdReques" + + "t\032!.GetLastFlushedSequenceIdResponse\022h\n\033" + + "ReportRegionStateTransition\022#.ReportRegi", + "onStateTransitionRequest\032$.ReportRegionS" + + "tateTransitionResponseBN\n*org.apache.had" + + "oop.hbase.protobuf.generatedB\030RegionServ" + + "erStatusProtosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -8451,7 +9158,7 @@ public final class RegionServerStatusProtos { internal_static_RegionServerStartupRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_RegionServerStartupRequest_descriptor, - new java.lang.String[] { "Port", "ServerStartCode", "ServerCurrentTime", }); + new java.lang.String[] { "Port", "ServerStartCode", "ServerCurrentTime", "UseThisHostnameInstead", }); internal_static_RegionServerStartupResponse_descriptor = getDescriptor().getMessageTypes().get(1); internal_static_RegionServerStartupResponse_fieldAccessorTable = new @@ -8493,7 +9200,7 @@ public final class RegionServerStatusProtos { internal_static_GetLastFlushedSequenceIdResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_GetLastFlushedSequenceIdResponse_descriptor, - new java.lang.String[] { "LastFlushedSequenceId", }); + new java.lang.String[] { "LastFlushedSequenceId", "StoreLastFlushedSequenceId", }); internal_static_RegionStateTransition_descriptor = getDescriptor().getMessageTypes().get(8); internal_static_RegionStateTransition_fieldAccessorTable = new diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/WALProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/WALProtos.java index c9fa8548063..fa73077498a 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/WALProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/WALProtos.java @@ -5522,6 +5522,24 @@ public final class WALProtos { */ org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptorOrBuilder getStoreFlushesOrBuilder( int index); + + // optional bytes region_name = 6; + /** + * optional bytes region_name = 6; + * + *
      +     * full region name
      +     * 
      + */ + boolean hasRegionName(); + /** + * optional bytes region_name = 6; + * + *
      +     * full region name
      +     * 
      + */ + com.google.protobuf.ByteString getRegionName(); } /** * Protobuf type {@code FlushDescriptor} @@ -5613,6 +5631,11 @@ public final class WALProtos { storeFlushes_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor.PARSER, extensionRegistry)); break; } + case 50: { + bitField0_ |= 0x00000010; + regionName_ = input.readBytes(); + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -5672,6 +5695,14 @@ public final class WALProtos { * ABORT_FLUSH = 2; */ ABORT_FLUSH(2, 2), + /** + * CANNOT_FLUSH = 3; + * + *
      +       * marker for indicating that a flush has been requested but cannot complete
      +       * 
      + */ + CANNOT_FLUSH(3, 3), ; /** @@ -5686,6 +5717,14 @@ public final class WALProtos { * ABORT_FLUSH = 2; */ public static final int ABORT_FLUSH_VALUE = 2; + /** + * CANNOT_FLUSH = 3; + * + *
      +       * marker for indicating that a flush has been requested but cannot complete
      +       * 
      + */ + public static final int CANNOT_FLUSH_VALUE = 3; public final int getNumber() { return value; } @@ -5695,6 +5734,7 @@ public final class WALProtos { case 0: return START_FLUSH; case 1: return COMMIT_FLUSH; case 2: return ABORT_FLUSH; + case 3: return CANNOT_FLUSH; default: return null; } } @@ -6772,12 +6812,37 @@ public final class WALProtos { return storeFlushes_.get(index); } + // optional bytes region_name = 6; + public static final int REGION_NAME_FIELD_NUMBER = 6; + private com.google.protobuf.ByteString regionName_; + /** + * optional bytes region_name = 6; + * + *
      +     * full region name
      +     * 
      + */ + public boolean hasRegionName() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional bytes region_name = 6; + * + *
      +     * full region name
      +     * 
      + */ + public com.google.protobuf.ByteString getRegionName() { + return regionName_; + } + private void initFields() { action_ = org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.FlushAction.START_FLUSH; tableName_ = com.google.protobuf.ByteString.EMPTY; encodedRegionName_ = com.google.protobuf.ByteString.EMPTY; flushSequenceNumber_ = 0L; storeFlushes_ = java.util.Collections.emptyList(); + regionName_ = com.google.protobuf.ByteString.EMPTY; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -6824,6 +6889,9 @@ public final class WALProtos { for (int i = 0; i < storeFlushes_.size(); i++) { output.writeMessage(5, storeFlushes_.get(i)); } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeBytes(6, regionName_); + } getUnknownFields().writeTo(output); } @@ -6853,6 +6921,10 @@ public final class WALProtos { size += com.google.protobuf.CodedOutputStream .computeMessageSize(5, storeFlushes_.get(i)); } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(6, regionName_); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -6898,6 +6970,11 @@ public final class WALProtos { } result = result && getStoreFlushesList() .equals(other.getStoreFlushesList()); + result = result && (hasRegionName() == other.hasRegionName()); + if (hasRegionName()) { + result = result && getRegionName() + .equals(other.getRegionName()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -6931,6 +7008,10 @@ public final class WALProtos { hash = (37 * hash) + STORE_FLUSHES_FIELD_NUMBER; hash = (53 * hash) + getStoreFlushesList().hashCode(); } + if (hasRegionName()) { + hash = (37 * hash) + REGION_NAME_FIELD_NUMBER; + hash = (53 * hash) + getRegionName().hashCode(); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -7060,6 +7141,8 @@ public final class WALProtos { } else { storeFlushesBuilder_.clear(); } + regionName_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000020); return this; } @@ -7113,6 +7196,10 @@ public final class WALProtos { } else { result.storeFlushes_ = storeFlushesBuilder_.build(); } + if (((from_bitField0_ & 0x00000020) == 0x00000020)) { + to_bitField0_ |= 0x00000010; + } + result.regionName_ = regionName_; result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -7167,6 +7254,9 @@ public final class WALProtos { } } } + if (other.hasRegionName()) { + setRegionName(other.getRegionName()); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -7593,6 +7683,58 @@ public final class WALProtos { return storeFlushesBuilder_; } + // optional bytes region_name = 6; + private com.google.protobuf.ByteString regionName_ = com.google.protobuf.ByteString.EMPTY; + /** + * optional bytes region_name = 6; + * + *
      +       * full region name
      +       * 
      + */ + public boolean hasRegionName() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + /** + * optional bytes region_name = 6; + * + *
      +       * full region name
      +       * 
      + */ + public com.google.protobuf.ByteString getRegionName() { + return regionName_; + } + /** + * optional bytes region_name = 6; + * + *
      +       * full region name
      +       * 
      + */ + public Builder setRegionName(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000020; + regionName_ = value; + onChanged(); + return this; + } + /** + * optional bytes region_name = 6; + * + *
      +       * full region name
      +       * 
      + */ + public Builder clearRegionName() { + bitField0_ = (bitField0_ & ~0x00000020); + regionName_ = getDefaultInstance().getRegionName(); + onChanged(); + return this; + } + // @@protoc_insertion_point(builder_scope:FlushDescriptor) } @@ -9772,6 +9914,24 @@ public final class WALProtos { * */ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerOrBuilder(); + + // optional bytes region_name = 7; + /** + * optional bytes region_name = 7; + * + *
      +     * full region name
      +     * 
      + */ + boolean hasRegionName(); + /** + * optional bytes region_name = 7; + * + *
      +     * full region name
      +     * 
      + */ + com.google.protobuf.ByteString getRegionName(); } /** * Protobuf type {@code RegionEventDescriptor} @@ -9876,6 +10036,11 @@ public final class WALProtos { bitField0_ |= 0x00000010; break; } + case 58: { + bitField0_ |= 0x00000020; + regionName_ = input.readBytes(); + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -10135,6 +10300,30 @@ public final class WALProtos { return server_; } + // optional bytes region_name = 7; + public static final int REGION_NAME_FIELD_NUMBER = 7; + private com.google.protobuf.ByteString regionName_; + /** + * optional bytes region_name = 7; + * + *
      +     * full region name
      +     * 
      + */ + public boolean hasRegionName() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + /** + * optional bytes region_name = 7; + * + *
      +     * full region name
      +     * 
      + */ + public com.google.protobuf.ByteString getRegionName() { + return regionName_; + } + private void initFields() { eventType_ = org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.EventType.REGION_OPEN; tableName_ = com.google.protobuf.ByteString.EMPTY; @@ -10142,6 +10331,7 @@ public final class WALProtos { logSequenceNumber_ = 0L; stores_ = java.util.Collections.emptyList(); server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); + regionName_ = com.google.protobuf.ByteString.EMPTY; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -10197,6 +10387,9 @@ public final class WALProtos { if (((bitField0_ & 0x00000010) == 0x00000010)) { output.writeMessage(6, server_); } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + output.writeBytes(7, regionName_); + } getUnknownFields().writeTo(output); } @@ -10230,6 +10423,10 @@ public final class WALProtos { size += com.google.protobuf.CodedOutputStream .computeMessageSize(6, server_); } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(7, regionName_); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -10280,6 +10477,11 @@ public final class WALProtos { result = result && getServer() .equals(other.getServer()); } + result = result && (hasRegionName() == other.hasRegionName()); + if (hasRegionName()) { + result = result && getRegionName() + .equals(other.getRegionName()); + } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -10317,6 +10519,10 @@ public final class WALProtos { hash = (37 * hash) + SERVER_FIELD_NUMBER; hash = (53 * hash) + getServer().hashCode(); } + if (hasRegionName()) { + hash = (37 * hash) + REGION_NAME_FIELD_NUMBER; + hash = (53 * hash) + getRegionName().hashCode(); + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -10453,6 +10659,8 @@ public final class WALProtos { serverBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000020); + regionName_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000040); return this; } @@ -10514,6 +10722,10 @@ public final class WALProtos { } else { result.server_ = serverBuilder_.build(); } + if (((from_bitField0_ & 0x00000040) == 0x00000040)) { + to_bitField0_ |= 0x00000020; + } + result.regionName_ = regionName_; result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -10571,6 +10783,9 @@ public final class WALProtos { if (other.hasServer()) { mergeServer(other.getServer()); } + if (other.hasRegionName()) { + setRegionName(other.getRegionName()); + } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -11156,6 +11371,58 @@ public final class WALProtos { return serverBuilder_; } + // optional bytes region_name = 7; + private com.google.protobuf.ByteString regionName_ = com.google.protobuf.ByteString.EMPTY; + /** + * optional bytes region_name = 7; + * + *
      +       * full region name
      +       * 
      + */ + public boolean hasRegionName() { + return ((bitField0_ & 0x00000040) == 0x00000040); + } + /** + * optional bytes region_name = 7; + * + *
      +       * full region name
      +       * 
      + */ + public com.google.protobuf.ByteString getRegionName() { + return regionName_; + } + /** + * optional bytes region_name = 7; + * + *
      +       * full region name
      +       * 
      + */ + public Builder setRegionName(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000040; + regionName_ = value; + onChanged(); + return this; + } + /** + * optional bytes region_name = 7; + * + *
      +       * full region name
      +       * 
      + */ + public Builder clearRegionName() { + bitField0_ = (bitField0_ & ~0x00000040); + regionName_ = getDefaultInstance().getRegionName(); + onChanged(); + return this; + } + // @@protoc_insertion_point(builder_scope:RegionEventDescriptor) } @@ -11598,32 +11865,34 @@ public final class WALProtos { "n_name\030\002 \002(\014\022\023\n\013family_name\030\003 \002(\014\022\030\n\020com" + "paction_input\030\004 \003(\t\022\031\n\021compaction_output" + "\030\005 \003(\t\022\026\n\016store_home_dir\030\006 \002(\t\022\023\n\013region" + - "_name\030\007 \001(\014\"\353\002\n\017FlushDescriptor\022,\n\006actio" + + "_name\030\007 \001(\014\"\222\003\n\017FlushDescriptor\022,\n\006actio" + "n\030\001 \002(\0162\034.FlushDescriptor.FlushAction\022\022\n", "\ntable_name\030\002 \002(\014\022\033\n\023encoded_region_name" + "\030\003 \002(\014\022\035\n\025flush_sequence_number\030\004 \001(\004\022<\n" + "\rstore_flushes\030\005 \003(\0132%.FlushDescriptor.S" + - "toreFlushDescriptor\032Y\n\024StoreFlushDescrip" + - "tor\022\023\n\013family_name\030\001 \002(\014\022\026\n\016store_home_d" + - "ir\030\002 \002(\t\022\024\n\014flush_output\030\003 \003(\t\"A\n\013FlushA" + - "ction\022\017\n\013START_FLUSH\020\000\022\020\n\014COMMIT_FLUSH\020\001" + - "\022\017\n\013ABORT_FLUSH\020\002\"R\n\017StoreDescriptor\022\023\n\013" + - "family_name\030\001 \002(\014\022\026\n\016store_home_dir\030\002 \002(" + - "\t\022\022\n\nstore_file\030\003 \003(\t\"\215\001\n\022BulkLoadDescri", - "ptor\022\036\n\ntable_name\030\001 \002(\0132\n.TableName\022\033\n\023" + - "encoded_region_name\030\002 \002(\014\022 \n\006stores\030\003 \003(" + - "\0132\020.StoreDescriptor\022\030\n\020bulkload_seq_num\030" + - "\004 \002(\003\"\212\002\n\025RegionEventDescriptor\0224\n\nevent" + - "_type\030\001 \002(\0162 .RegionEventDescriptor.Even" + - "tType\022\022\n\ntable_name\030\002 \002(\014\022\033\n\023encoded_reg" + - "ion_name\030\003 \002(\014\022\033\n\023log_sequence_number\030\004 " + - "\001(\004\022 \n\006stores\030\005 \003(\0132\020.StoreDescriptor\022\033\n" + - "\006server\030\006 \001(\0132\013.ServerName\".\n\tEventType\022" + - "\017\n\013REGION_OPEN\020\000\022\020\n\014REGION_CLOSE\020\001\"\014\n\nWA", - "LTrailer*F\n\tScopeType\022\033\n\027REPLICATION_SCO" + - "PE_LOCAL\020\000\022\034\n\030REPLICATION_SCOPE_GLOBAL\020\001" + - "B?\n*org.apache.hadoop.hbase.protobuf.gen" + - "eratedB\tWALProtosH\001\210\001\000\240\001\001" + "toreFlushDescriptor\022\023\n\013region_name\030\006 \001(\014" + + "\032Y\n\024StoreFlushDescriptor\022\023\n\013family_name\030" + + "\001 \002(\014\022\026\n\016store_home_dir\030\002 \002(\t\022\024\n\014flush_o" + + "utput\030\003 \003(\t\"S\n\013FlushAction\022\017\n\013START_FLUS" + + "H\020\000\022\020\n\014COMMIT_FLUSH\020\001\022\017\n\013ABORT_FLUSH\020\002\022\020" + + "\n\014CANNOT_FLUSH\020\003\"R\n\017StoreDescriptor\022\023\n\013f" + + "amily_name\030\001 \002(\014\022\026\n\016store_home_dir\030\002 \002(\t", + "\022\022\n\nstore_file\030\003 \003(\t\"\215\001\n\022BulkLoadDescrip" + + "tor\022\036\n\ntable_name\030\001 \002(\0132\n.TableName\022\033\n\023e" + + "ncoded_region_name\030\002 \002(\014\022 \n\006stores\030\003 \003(\013" + + "2\020.StoreDescriptor\022\030\n\020bulkload_seq_num\030\004" + + " \002(\003\"\237\002\n\025RegionEventDescriptor\0224\n\nevent_" + + "type\030\001 \002(\0162 .RegionEventDescriptor.Event" + + "Type\022\022\n\ntable_name\030\002 \002(\014\022\033\n\023encoded_regi" + + "on_name\030\003 \002(\014\022\033\n\023log_sequence_number\030\004 \001" + + "(\004\022 \n\006stores\030\005 \003(\0132\020.StoreDescriptor\022\033\n\006" + + "server\030\006 \001(\0132\013.ServerName\022\023\n\013region_name", + "\030\007 \001(\014\".\n\tEventType\022\017\n\013REGION_OPEN\020\000\022\020\n\014" + + "REGION_CLOSE\020\001\"\014\n\nWALTrailer*F\n\tScopeTyp" + + "e\022\033\n\027REPLICATION_SCOPE_LOCAL\020\000\022\034\n\030REPLIC" + + "ATION_SCOPE_GLOBAL\020\001B?\n*org.apache.hadoo" + + "p.hbase.protobuf.generatedB\tWALProtosH\001\210" + + "\001\000\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -11659,7 +11928,7 @@ public final class WALProtos { internal_static_FlushDescriptor_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_FlushDescriptor_descriptor, - new java.lang.String[] { "Action", "TableName", "EncodedRegionName", "FlushSequenceNumber", "StoreFlushes", }); + new java.lang.String[] { "Action", "TableName", "EncodedRegionName", "FlushSequenceNumber", "StoreFlushes", "RegionName", }); internal_static_FlushDescriptor_StoreFlushDescriptor_descriptor = internal_static_FlushDescriptor_descriptor.getNestedTypes().get(0); internal_static_FlushDescriptor_StoreFlushDescriptor_fieldAccessorTable = new @@ -11683,7 +11952,7 @@ public final class WALProtos { internal_static_RegionEventDescriptor_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_RegionEventDescriptor_descriptor, - new java.lang.String[] { "EventType", "TableName", "EncodedRegionName", "LogSequenceNumber", "Stores", "Server", }); + new java.lang.String[] { "EventType", "TableName", "EncodedRegionName", "LogSequenceNumber", "Stores", "Server", "RegionName", }); internal_static_WALTrailer_descriptor = getDescriptor().getMessageTypes().get(8); internal_static_WALTrailer_fieldAccessorTable = new diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java index 5a1fbf13bed..f15e980cd2d 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java @@ -8196,1393 +8196,6 @@ public final class ZooKeeperProtos { // @@protoc_insertion_point(class_scope:TableLock) } - public interface StoreSequenceIdOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required bytes family_name = 1; - /** - * required bytes family_name = 1; - */ - boolean hasFamilyName(); - /** - * required bytes family_name = 1; - */ - com.google.protobuf.ByteString getFamilyName(); - - // required uint64 sequence_id = 2; - /** - * required uint64 sequence_id = 2; - */ - boolean hasSequenceId(); - /** - * required uint64 sequence_id = 2; - */ - long getSequenceId(); - } - /** - * Protobuf type {@code StoreSequenceId} - * - *
      -   **
      -   * sequence Id of a store
      -   * 
      - */ - public static final class StoreSequenceId extends - com.google.protobuf.GeneratedMessage - implements StoreSequenceIdOrBuilder { - // Use StoreSequenceId.newBuilder() to construct. - private StoreSequenceId(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private StoreSequenceId(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final StoreSequenceId defaultInstance; - public static StoreSequenceId getDefaultInstance() { - return defaultInstance; - } - - public StoreSequenceId getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private StoreSequenceId( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 10: { - bitField0_ |= 0x00000001; - familyName_ = input.readBytes(); - break; - } - case 16: { - bitField0_ |= 0x00000002; - sequenceId_ = input.readUInt64(); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_StoreSequenceId_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_StoreSequenceId_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public StoreSequenceId parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new StoreSequenceId(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - private int bitField0_; - // required bytes family_name = 1; - public static final int FAMILY_NAME_FIELD_NUMBER = 1; - private com.google.protobuf.ByteString familyName_; - /** - * required bytes family_name = 1; - */ - public boolean hasFamilyName() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required bytes family_name = 1; - */ - public com.google.protobuf.ByteString getFamilyName() { - return familyName_; - } - - // required uint64 sequence_id = 2; - public static final int SEQUENCE_ID_FIELD_NUMBER = 2; - private long sequenceId_; - /** - * required uint64 sequence_id = 2; - */ - public boolean hasSequenceId() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * required uint64 sequence_id = 2; - */ - public long getSequenceId() { - return sequenceId_; - } - - private void initFields() { - familyName_ = com.google.protobuf.ByteString.EMPTY; - sequenceId_ = 0L; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasFamilyName()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasSequenceId()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBytes(1, familyName_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeUInt64(2, sequenceId_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(1, familyName_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(2, sequenceId_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId)) { - return super.equals(obj); - } - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId) obj; - - boolean result = true; - result = result && (hasFamilyName() == other.hasFamilyName()); - if (hasFamilyName()) { - result = result && getFamilyName() - .equals(other.getFamilyName()); - } - result = result && (hasSequenceId() == other.hasSequenceId()); - if (hasSequenceId()) { - result = result && (getSequenceId() - == other.getSequenceId()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - private int memoizedHashCode = 0; - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; - } - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasFamilyName()) { - hash = (37 * hash) + FAMILY_NAME_FIELD_NUMBER; - hash = (53 * hash) + getFamilyName().hashCode(); - } - if (hasSequenceId()) { - hash = (37 * hash) + SEQUENCE_ID_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getSequenceId()); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - memoizedHashCode = hash; - return hash; - } - - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code StoreSequenceId} - * - *
      -     **
      -     * sequence Id of a store
      -     * 
      - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceIdOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_StoreSequenceId_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_StoreSequenceId_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.Builder.class); - } - - // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - familyName_ = com.google.protobuf.ByteString.EMPTY; - bitField0_ = (bitField0_ & ~0x00000001); - sequenceId_ = 0L; - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_StoreSequenceId_descriptor; - } - - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.getDefaultInstance(); - } - - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId build() { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.familyName_ = familyName_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.sequenceId_ = sequenceId_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.getDefaultInstance()) return this; - if (other.hasFamilyName()) { - setFamilyName(other.getFamilyName()); - } - if (other.hasSequenceId()) { - setSequenceId(other.getSequenceId()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasFamilyName()) { - - return false; - } - if (!hasSequenceId()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // required bytes family_name = 1; - private com.google.protobuf.ByteString familyName_ = com.google.protobuf.ByteString.EMPTY; - /** - * required bytes family_name = 1; - */ - public boolean hasFamilyName() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required bytes family_name = 1; - */ - public com.google.protobuf.ByteString getFamilyName() { - return familyName_; - } - /** - * required bytes family_name = 1; - */ - public Builder setFamilyName(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - familyName_ = value; - onChanged(); - return this; - } - /** - * required bytes family_name = 1; - */ - public Builder clearFamilyName() { - bitField0_ = (bitField0_ & ~0x00000001); - familyName_ = getDefaultInstance().getFamilyName(); - onChanged(); - return this; - } - - // required uint64 sequence_id = 2; - private long sequenceId_ ; - /** - * required uint64 sequence_id = 2; - */ - public boolean hasSequenceId() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * required uint64 sequence_id = 2; - */ - public long getSequenceId() { - return sequenceId_; - } - /** - * required uint64 sequence_id = 2; - */ - public Builder setSequenceId(long value) { - bitField0_ |= 0x00000002; - sequenceId_ = value; - onChanged(); - return this; - } - /** - * required uint64 sequence_id = 2; - */ - public Builder clearSequenceId() { - bitField0_ = (bitField0_ & ~0x00000002); - sequenceId_ = 0L; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:StoreSequenceId) - } - - static { - defaultInstance = new StoreSequenceId(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:StoreSequenceId) - } - - public interface RegionStoreSequenceIdsOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required uint64 last_flushed_sequence_id = 1; - /** - * required uint64 last_flushed_sequence_id = 1; - */ - boolean hasLastFlushedSequenceId(); - /** - * required uint64 last_flushed_sequence_id = 1; - */ - long getLastFlushedSequenceId(); - - // repeated .StoreSequenceId store_sequence_id = 2; - /** - * repeated .StoreSequenceId store_sequence_id = 2; - */ - java.util.List - getStoreSequenceIdList(); - /** - * repeated .StoreSequenceId store_sequence_id = 2; - */ - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId getStoreSequenceId(int index); - /** - * repeated .StoreSequenceId store_sequence_id = 2; - */ - int getStoreSequenceIdCount(); - /** - * repeated .StoreSequenceId store_sequence_id = 2; - */ - java.util.List - getStoreSequenceIdOrBuilderList(); - /** - * repeated .StoreSequenceId store_sequence_id = 2; - */ - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceIdOrBuilder getStoreSequenceIdOrBuilder( - int index); - } - /** - * Protobuf type {@code RegionStoreSequenceIds} - * - *
      -   **
      -   * contains a sequence id of a region which should be the minimum of its store sequence ids and 
      -   * list sequence ids of the region's stores
      -   * 
      - */ - public static final class RegionStoreSequenceIds extends - com.google.protobuf.GeneratedMessage - implements RegionStoreSequenceIdsOrBuilder { - // Use RegionStoreSequenceIds.newBuilder() to construct. - private RegionStoreSequenceIds(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private RegionStoreSequenceIds(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final RegionStoreSequenceIds defaultInstance; - public static RegionStoreSequenceIds getDefaultInstance() { - return defaultInstance; - } - - public RegionStoreSequenceIds getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private RegionStoreSequenceIds( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 8: { - bitField0_ |= 0x00000001; - lastFlushedSequenceId_ = input.readUInt64(); - break; - } - case 18: { - if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { - storeSequenceId_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000002; - } - storeSequenceId_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.PARSER, extensionRegistry)); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { - storeSequenceId_ = java.util.Collections.unmodifiableList(storeSequenceId_); - } - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_RegionStoreSequenceIds_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_RegionStoreSequenceIds_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public RegionStoreSequenceIds parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new RegionStoreSequenceIds(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - private int bitField0_; - // required uint64 last_flushed_sequence_id = 1; - public static final int LAST_FLUSHED_SEQUENCE_ID_FIELD_NUMBER = 1; - private long lastFlushedSequenceId_; - /** - * required uint64 last_flushed_sequence_id = 1; - */ - public boolean hasLastFlushedSequenceId() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required uint64 last_flushed_sequence_id = 1; - */ - public long getLastFlushedSequenceId() { - return lastFlushedSequenceId_; - } - - // repeated .StoreSequenceId store_sequence_id = 2; - public static final int STORE_SEQUENCE_ID_FIELD_NUMBER = 2; - private java.util.List storeSequenceId_; - /** - * repeated .StoreSequenceId store_sequence_id = 2; - */ - public java.util.List getStoreSequenceIdList() { - return storeSequenceId_; - } - /** - * repeated .StoreSequenceId store_sequence_id = 2; - */ - public java.util.List - getStoreSequenceIdOrBuilderList() { - return storeSequenceId_; - } - /** - * repeated .StoreSequenceId store_sequence_id = 2; - */ - public int getStoreSequenceIdCount() { - return storeSequenceId_.size(); - } - /** - * repeated .StoreSequenceId store_sequence_id = 2; - */ - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId getStoreSequenceId(int index) { - return storeSequenceId_.get(index); - } - /** - * repeated .StoreSequenceId store_sequence_id = 2; - */ - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceIdOrBuilder getStoreSequenceIdOrBuilder( - int index) { - return storeSequenceId_.get(index); - } - - private void initFields() { - lastFlushedSequenceId_ = 0L; - storeSequenceId_ = java.util.Collections.emptyList(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasLastFlushedSequenceId()) { - memoizedIsInitialized = 0; - return false; - } - for (int i = 0; i < getStoreSequenceIdCount(); i++) { - if (!getStoreSequenceId(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeUInt64(1, lastFlushedSequenceId_); - } - for (int i = 0; i < storeSequenceId_.size(); i++) { - output.writeMessage(2, storeSequenceId_.get(i)); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(1, lastFlushedSequenceId_); - } - for (int i = 0; i < storeSequenceId_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(2, storeSequenceId_.get(i)); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds)) { - return super.equals(obj); - } - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds) obj; - - boolean result = true; - result = result && (hasLastFlushedSequenceId() == other.hasLastFlushedSequenceId()); - if (hasLastFlushedSequenceId()) { - result = result && (getLastFlushedSequenceId() - == other.getLastFlushedSequenceId()); - } - result = result && getStoreSequenceIdList() - .equals(other.getStoreSequenceIdList()); - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - private int memoizedHashCode = 0; - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; - } - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasLastFlushedSequenceId()) { - hash = (37 * hash) + LAST_FLUSHED_SEQUENCE_ID_FIELD_NUMBER; - hash = (53 * hash) + hashLong(getLastFlushedSequenceId()); - } - if (getStoreSequenceIdCount() > 0) { - hash = (37 * hash) + STORE_SEQUENCE_ID_FIELD_NUMBER; - hash = (53 * hash) + getStoreSequenceIdList().hashCode(); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - memoizedHashCode = hash; - return hash; - } - - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code RegionStoreSequenceIds} - * - *
      -     **
      -     * contains a sequence id of a region which should be the minimum of its store sequence ids and 
      -     * list sequence ids of the region's stores
      -     * 
      - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIdsOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_RegionStoreSequenceIds_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_RegionStoreSequenceIds_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds.Builder.class); - } - - // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getStoreSequenceIdFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - lastFlushedSequenceId_ = 0L; - bitField0_ = (bitField0_ & ~0x00000001); - if (storeSequenceIdBuilder_ == null) { - storeSequenceId_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000002); - } else { - storeSequenceIdBuilder_.clear(); - } - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_RegionStoreSequenceIds_descriptor; - } - - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds.getDefaultInstance(); - } - - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds build() { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.lastFlushedSequenceId_ = lastFlushedSequenceId_; - if (storeSequenceIdBuilder_ == null) { - if (((bitField0_ & 0x00000002) == 0x00000002)) { - storeSequenceId_ = java.util.Collections.unmodifiableList(storeSequenceId_); - bitField0_ = (bitField0_ & ~0x00000002); - } - result.storeSequenceId_ = storeSequenceId_; - } else { - result.storeSequenceId_ = storeSequenceIdBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds.getDefaultInstance()) return this; - if (other.hasLastFlushedSequenceId()) { - setLastFlushedSequenceId(other.getLastFlushedSequenceId()); - } - if (storeSequenceIdBuilder_ == null) { - if (!other.storeSequenceId_.isEmpty()) { - if (storeSequenceId_.isEmpty()) { - storeSequenceId_ = other.storeSequenceId_; - bitField0_ = (bitField0_ & ~0x00000002); - } else { - ensureStoreSequenceIdIsMutable(); - storeSequenceId_.addAll(other.storeSequenceId_); - } - onChanged(); - } - } else { - if (!other.storeSequenceId_.isEmpty()) { - if (storeSequenceIdBuilder_.isEmpty()) { - storeSequenceIdBuilder_.dispose(); - storeSequenceIdBuilder_ = null; - storeSequenceId_ = other.storeSequenceId_; - bitField0_ = (bitField0_ & ~0x00000002); - storeSequenceIdBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getStoreSequenceIdFieldBuilder() : null; - } else { - storeSequenceIdBuilder_.addAllMessages(other.storeSequenceId_); - } - } - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasLastFlushedSequenceId()) { - - return false; - } - for (int i = 0; i < getStoreSequenceIdCount(); i++) { - if (!getStoreSequenceId(i).isInitialized()) { - - return false; - } - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // required uint64 last_flushed_sequence_id = 1; - private long lastFlushedSequenceId_ ; - /** - * required uint64 last_flushed_sequence_id = 1; - */ - public boolean hasLastFlushedSequenceId() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required uint64 last_flushed_sequence_id = 1; - */ - public long getLastFlushedSequenceId() { - return lastFlushedSequenceId_; - } - /** - * required uint64 last_flushed_sequence_id = 1; - */ - public Builder setLastFlushedSequenceId(long value) { - bitField0_ |= 0x00000001; - lastFlushedSequenceId_ = value; - onChanged(); - return this; - } - /** - * required uint64 last_flushed_sequence_id = 1; - */ - public Builder clearLastFlushedSequenceId() { - bitField0_ = (bitField0_ & ~0x00000001); - lastFlushedSequenceId_ = 0L; - onChanged(); - return this; - } - - // repeated .StoreSequenceId store_sequence_id = 2; - private java.util.List storeSequenceId_ = - java.util.Collections.emptyList(); - private void ensureStoreSequenceIdIsMutable() { - if (!((bitField0_ & 0x00000002) == 0x00000002)) { - storeSequenceId_ = new java.util.ArrayList(storeSequenceId_); - bitField0_ |= 0x00000002; - } - } - - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.Builder, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceIdOrBuilder> storeSequenceIdBuilder_; - - /** - * repeated .StoreSequenceId store_sequence_id = 2; - */ - public java.util.List getStoreSequenceIdList() { - if (storeSequenceIdBuilder_ == null) { - return java.util.Collections.unmodifiableList(storeSequenceId_); - } else { - return storeSequenceIdBuilder_.getMessageList(); - } - } - /** - * repeated .StoreSequenceId store_sequence_id = 2; - */ - public int getStoreSequenceIdCount() { - if (storeSequenceIdBuilder_ == null) { - return storeSequenceId_.size(); - } else { - return storeSequenceIdBuilder_.getCount(); - } - } - /** - * repeated .StoreSequenceId store_sequence_id = 2; - */ - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId getStoreSequenceId(int index) { - if (storeSequenceIdBuilder_ == null) { - return storeSequenceId_.get(index); - } else { - return storeSequenceIdBuilder_.getMessage(index); - } - } - /** - * repeated .StoreSequenceId store_sequence_id = 2; - */ - public Builder setStoreSequenceId( - int index, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId value) { - if (storeSequenceIdBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureStoreSequenceIdIsMutable(); - storeSequenceId_.set(index, value); - onChanged(); - } else { - storeSequenceIdBuilder_.setMessage(index, value); - } - return this; - } - /** - * repeated .StoreSequenceId store_sequence_id = 2; - */ - public Builder setStoreSequenceId( - int index, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.Builder builderForValue) { - if (storeSequenceIdBuilder_ == null) { - ensureStoreSequenceIdIsMutable(); - storeSequenceId_.set(index, builderForValue.build()); - onChanged(); - } else { - storeSequenceIdBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .StoreSequenceId store_sequence_id = 2; - */ - public Builder addStoreSequenceId(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId value) { - if (storeSequenceIdBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureStoreSequenceIdIsMutable(); - storeSequenceId_.add(value); - onChanged(); - } else { - storeSequenceIdBuilder_.addMessage(value); - } - return this; - } - /** - * repeated .StoreSequenceId store_sequence_id = 2; - */ - public Builder addStoreSequenceId( - int index, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId value) { - if (storeSequenceIdBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureStoreSequenceIdIsMutable(); - storeSequenceId_.add(index, value); - onChanged(); - } else { - storeSequenceIdBuilder_.addMessage(index, value); - } - return this; - } - /** - * repeated .StoreSequenceId store_sequence_id = 2; - */ - public Builder addStoreSequenceId( - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.Builder builderForValue) { - if (storeSequenceIdBuilder_ == null) { - ensureStoreSequenceIdIsMutable(); - storeSequenceId_.add(builderForValue.build()); - onChanged(); - } else { - storeSequenceIdBuilder_.addMessage(builderForValue.build()); - } - return this; - } - /** - * repeated .StoreSequenceId store_sequence_id = 2; - */ - public Builder addStoreSequenceId( - int index, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.Builder builderForValue) { - if (storeSequenceIdBuilder_ == null) { - ensureStoreSequenceIdIsMutable(); - storeSequenceId_.add(index, builderForValue.build()); - onChanged(); - } else { - storeSequenceIdBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .StoreSequenceId store_sequence_id = 2; - */ - public Builder addAllStoreSequenceId( - java.lang.Iterable values) { - if (storeSequenceIdBuilder_ == null) { - ensureStoreSequenceIdIsMutable(); - super.addAll(values, storeSequenceId_); - onChanged(); - } else { - storeSequenceIdBuilder_.addAllMessages(values); - } - return this; - } - /** - * repeated .StoreSequenceId store_sequence_id = 2; - */ - public Builder clearStoreSequenceId() { - if (storeSequenceIdBuilder_ == null) { - storeSequenceId_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000002); - onChanged(); - } else { - storeSequenceIdBuilder_.clear(); - } - return this; - } - /** - * repeated .StoreSequenceId store_sequence_id = 2; - */ - public Builder removeStoreSequenceId(int index) { - if (storeSequenceIdBuilder_ == null) { - ensureStoreSequenceIdIsMutable(); - storeSequenceId_.remove(index); - onChanged(); - } else { - storeSequenceIdBuilder_.remove(index); - } - return this; - } - /** - * repeated .StoreSequenceId store_sequence_id = 2; - */ - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.Builder getStoreSequenceIdBuilder( - int index) { - return getStoreSequenceIdFieldBuilder().getBuilder(index); - } - /** - * repeated .StoreSequenceId store_sequence_id = 2; - */ - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceIdOrBuilder getStoreSequenceIdOrBuilder( - int index) { - if (storeSequenceIdBuilder_ == null) { - return storeSequenceId_.get(index); } else { - return storeSequenceIdBuilder_.getMessageOrBuilder(index); - } - } - /** - * repeated .StoreSequenceId store_sequence_id = 2; - */ - public java.util.List - getStoreSequenceIdOrBuilderList() { - if (storeSequenceIdBuilder_ != null) { - return storeSequenceIdBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(storeSequenceId_); - } - } - /** - * repeated .StoreSequenceId store_sequence_id = 2; - */ - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.Builder addStoreSequenceIdBuilder() { - return getStoreSequenceIdFieldBuilder().addBuilder( - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.getDefaultInstance()); - } - /** - * repeated .StoreSequenceId store_sequence_id = 2; - */ - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.Builder addStoreSequenceIdBuilder( - int index) { - return getStoreSequenceIdFieldBuilder().addBuilder( - index, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.getDefaultInstance()); - } - /** - * repeated .StoreSequenceId store_sequence_id = 2; - */ - public java.util.List - getStoreSequenceIdBuilderList() { - return getStoreSequenceIdFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.Builder, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceIdOrBuilder> - getStoreSequenceIdFieldBuilder() { - if (storeSequenceIdBuilder_ == null) { - storeSequenceIdBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.Builder, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceIdOrBuilder>( - storeSequenceId_, - ((bitField0_ & 0x00000002) == 0x00000002), - getParentForChildren(), - isClean()); - storeSequenceId_ = null; - } - return storeSequenceIdBuilder_; - } - - // @@protoc_insertion_point(builder_scope:RegionStoreSequenceIds) - } - - static { - defaultInstance = new RegionStoreSequenceIds(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:RegionStoreSequenceIds) - } - private static com.google.protobuf.Descriptors.Descriptor internal_static_MetaRegionServer_descriptor; private static @@ -9633,16 +8246,6 @@ public final class ZooKeeperProtos { private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_TableLock_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_StoreSequenceId_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_StoreSequenceId_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_RegionStoreSequenceIds_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_RegionStoreSequenceIds_fieldAccessorTable; public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { @@ -9681,12 +8284,8 @@ public final class ZooKeeperProtos { "2\n.TableName\022\037\n\nlock_owner\030\002 \001(\0132\013.Serve" + "rName\022\021\n\tthread_id\030\003 \001(\003\022\021\n\tis_shared\030\004 " + "\001(\010\022\017\n\007purpose\030\005 \001(\t\022\023\n\013create_time\030\006 \001(" + - "\003\";\n\017StoreSequenceId\022\023\n\013family_name\030\001 \002(", - "\014\022\023\n\013sequence_id\030\002 \002(\004\"g\n\026RegionStoreSeq" + - "uenceIds\022 \n\030last_flushed_sequence_id\030\001 \002" + - "(\004\022+\n\021store_sequence_id\030\002 \003(\0132\020.StoreSeq" + - "uenceIdBE\n*org.apache.hadoop.hbase.proto" + - "buf.generatedB\017ZooKeeperProtosH\001\210\001\001\240\001\001" + "\003BE\n*org.apache.hadoop.hbase.protobuf.ge", + "neratedB\017ZooKeeperProtosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -9753,18 +8352,6 @@ public final class ZooKeeperProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_TableLock_descriptor, new java.lang.String[] { "TableName", "LockOwner", "ThreadId", "IsShared", "Purpose", "CreateTime", }); - internal_static_StoreSequenceId_descriptor = - getDescriptor().getMessageTypes().get(10); - internal_static_StoreSequenceId_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_StoreSequenceId_descriptor, - new java.lang.String[] { "FamilyName", "SequenceId", }); - internal_static_RegionStoreSequenceIds_descriptor = - getDescriptor().getMessageTypes().get(11); - internal_static_RegionStoreSequenceIds_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_RegionStoreSequenceIds_descriptor, - new java.lang.String[] { "LastFlushedSequenceId", "StoreSequenceId", }); return null; } }; diff --git a/hbase-protocol/src/main/protobuf/Admin.proto b/hbase-protocol/src/main/protobuf/Admin.proto index fcc4e1d0248..1df0958ac76 100644 --- a/hbase-protocol/src/main/protobuf/Admin.proto +++ b/hbase-protocol/src/main/protobuf/Admin.proto @@ -90,6 +90,14 @@ message OpenRegionResponse { } } +message WarmupRegionRequest { + + required RegionInfo regionInfo = 1; +} + +message WarmupRegionResponse { +} + /** * Closes the specified region and will use or not use ZK during the close * according to the specified flag. @@ -115,11 +123,13 @@ message CloseRegionResponse { message FlushRegionRequest { required RegionSpecifier region = 1; optional uint64 if_older_than_ts = 2; + optional bool write_flush_wal_marker = 3; // whether to write a marker to WAL even if not flushed } message FlushRegionResponse { required uint64 last_flush_time = 1; optional bool flushed = 2; + optional bool wrote_flush_wal_marker = 3; } /** @@ -251,6 +261,9 @@ service AdminService { rpc OpenRegion(OpenRegionRequest) returns(OpenRegionResponse); + rpc WarmupRegion(WarmupRegionRequest) + returns(WarmupRegionResponse); + rpc CloseRegion(CloseRegionRequest) returns(CloseRegionResponse); diff --git a/hbase-protocol/src/main/protobuf/Client.proto b/hbase-protocol/src/main/protobuf/Client.proto index 606ca8df131..e0c370b3c4f 100644 --- a/hbase-protocol/src/main/protobuf/Client.proto +++ b/hbase-protocol/src/main/protobuf/Client.proto @@ -105,6 +105,12 @@ message Result { // Whether or not the results are coming from possibly stale data optional bool stale = 4 [default = false]; + + // Whether or not the entire result could be returned. Results will be split when + // the RPC chunk size limit is reached. Partial results contain only a subset of the + // cells for a row and must be combined with a result containing the remaining cells + // to form a complete result + optional bool partial = 5 [default = false]; } /** @@ -268,6 +274,7 @@ message ScanRequest { optional uint32 number_of_rows = 4; optional bool close_scanner = 5; optional uint64 next_call_seq = 6; + optional bool client_handles_partials = 7; } /** @@ -283,6 +290,7 @@ message ScanResponse { // has 3, 3, 3 in it, then we know that on the client, we are to make // three Results each of three Cells each. repeated uint32 cells_per_result = 1; + optional uint64 scanner_id = 2; optional bool more_results = 3; optional uint32 ttl = 4; @@ -291,6 +299,20 @@ message ScanResponse { // be inside the pb'd Result) repeated Result results = 5; optional bool stale = 6; + + // This field is filled in if we are doing cellblocks. In the event that a row + // could not fit all of its cells into a single RPC chunk, the results will be + // returned as partials, and reconstructed into a complete result on the client + // side. This field is a list of flags indicating whether or not the result + // that the cells belong to is a partial result. For example, if this field + // has false, false, true in it, then we know that on the client side, we need to + // make another RPC request since the last result was only a partial. + repeated bool partial_flag_per_result = 7; + + // A server may choose to limit the number of results returned to the client for + // reasons such as the size in bytes or quantity of results accumulated. This field + // will true when more results exist in the current region. + optional bool more_results_in_region = 8; } /** diff --git a/hbase-protocol/src/main/protobuf/ClusterStatus.proto b/hbase-protocol/src/main/protobuf/ClusterStatus.proto index 2b2d9eb8ac0..305e08a8111 100644 --- a/hbase-protocol/src/main/protobuf/ClusterStatus.proto +++ b/hbase-protocol/src/main/protobuf/ClusterStatus.proto @@ -59,6 +59,23 @@ message RegionInTransition { required RegionState region_state = 2; } +/** + * sequence Id of a store + */ +message StoreSequenceId { + required bytes family_name = 1; + required uint64 sequence_id = 2; +} + +/** + * contains a sequence id of a region which should be the minimum of its store sequence ids and + * list of sequence ids of the region's stores + */ +message RegionStoreSequenceIds { + required uint64 last_flushed_sequence_id = 1; + repeated StoreSequenceId store_sequence_id = 2; +} + message RegionLoad { /** the region specifier */ required RegionSpecifier region_specifier = 1; @@ -115,10 +132,26 @@ message RegionLoad { optional float data_locality = 16; optional uint64 last_major_compaction_ts = 17 [default = 0]; + + /** the most recent sequence Id of store from cache flush */ + repeated StoreSequenceId store_complete_sequence_id = 18; } /* Server-level protobufs */ +message ReplicationLoadSink { + required uint64 ageOfLastAppliedOp = 1; + required uint64 timeStampsOfLastAppliedOp = 2; +} + +message ReplicationLoadSource { + required string peerID = 1; + required uint64 ageOfLastShippedOp = 2; + required uint32 sizeOfLogQueue = 3; + required uint64 timeStampOfLastShippedOp = 4; + required uint64 replicationLag = 5; +} + message ServerLoad { /** Number of requests since last report. */ optional uint32 number_of_requests = 1; @@ -160,6 +193,16 @@ message ServerLoad { * The port number that this region server is hosing an info server on. */ optional uint32 info_server_port = 9; + + /** + * The replicationLoadSource for the replication Source status of this region server. + */ + repeated ReplicationLoadSource replLoadSource = 10; + + /** + * The replicationLoadSink for the replication Sink status of this region server. + */ + optional ReplicationLoadSink replLoadSink = 11; } message LiveServerInfo { diff --git a/hbase-protocol/src/main/protobuf/Master.proto b/hbase-protocol/src/main/protobuf/Master.proto index e7a3a999536..d5f4275bd1b 100644 --- a/hbase-protocol/src/main/protobuf/Master.proto +++ b/hbase-protocol/src/main/protobuf/Master.proto @@ -28,6 +28,7 @@ option optimize_for = SPEED; import "HBase.proto"; import "Client.proto"; import "ClusterStatus.proto"; +import "ErrorHandling.proto"; import "Quota.proto"; /* Column-level protobufs */ @@ -108,6 +109,7 @@ message CreateTableRequest { } message CreateTableResponse { + optional uint64 proc_id = 1; } message DeleteTableRequest { @@ -115,6 +117,7 @@ message DeleteTableRequest { } message DeleteTableResponse { + optional uint64 proc_id = 1; } message TruncateTableRequest { @@ -130,6 +133,7 @@ message EnableTableRequest { } message EnableTableResponse { + optional uint64 proc_id = 1; } message DisableTableRequest { @@ -137,6 +141,7 @@ message DisableTableRequest { } message DisableTableResponse { + optional uint64 proc_id = 1; } message ModifyTableRequest { @@ -232,6 +237,13 @@ message SetBalancerRunningResponse { optional bool prev_balance_value = 1; } +message IsBalancerEnabledRequest { +} + +message IsBalancerEnabledResponse { + required bool enabled = 1; +} + message RunCatalogScanRequest { } @@ -373,6 +385,24 @@ message IsProcedureDoneResponse { optional ProcedureDescription snapshot = 2; } +message GetProcedureResultRequest { + required uint64 proc_id = 1; +} + +message GetProcedureResultResponse { + enum State { + NOT_FOUND = 0; + RUNNING = 1; + FINISHED = 2; + } + + required State state = 1; + optional uint64 start_time = 2; + optional uint64 last_update = 3; + optional bytes result = 4; + optional ForeignExceptionMessage exception = 5; +} + message SetQuotaRequest { optional string user_name = 1; optional string user_group = 2; @@ -508,6 +538,12 @@ service MasterService { rpc SetBalancerRunning(SetBalancerRunningRequest) returns(SetBalancerRunningResponse); + /** + * Query whether the Region Balancer is running. + */ + rpc IsBalancerEnabled(IsBalancerEnabledRequest) + returns(IsBalancerEnabledResponse); + /** Get a run of the catalog janitor */ rpc RunCatalogScan(RunCatalogScanRequest) returns(RunCatalogScanResponse); @@ -621,4 +657,7 @@ service MasterService { /** Returns the timestamp of the last major compaction */ rpc getLastMajorCompactionTimestampForRegion(MajorCompactionTimestampForRegionRequest) returns(MajorCompactionTimestampResponse); + + rpc getProcedureResult(GetProcedureResultRequest) + returns(GetProcedureResultResponse); } diff --git a/hbase-protocol/src/main/protobuf/MasterProcedure.proto b/hbase-protocol/src/main/protobuf/MasterProcedure.proto new file mode 100644 index 00000000000..e1c6880838f --- /dev/null +++ b/hbase-protocol/src/main/protobuf/MasterProcedure.proto @@ -0,0 +1,185 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +option java_package = "org.apache.hadoop.hbase.protobuf.generated"; +option java_outer_classname = "MasterProcedureProtos"; +option java_generic_services = true; +option java_generate_equals_and_hash = true; +option optimize_for = SPEED; + +import "HBase.proto"; +import "RPC.proto"; + +// ============================================================================ +// WARNING - Compatibility rules +// ============================================================================ +// This .proto contains the data serialized by the master procedures. +// Each procedure has some state stored to know, which step were executed +// and what were the parameters or data created by the previous steps. +// new code should be able to handle the old format or at least fail cleanly +// triggering a rollback/cleanup. +// +// Procedures that are inheriting from a StateMachineProcedure have an enum: +// - Do not change the number of the 'State' enums. +// doing so, will cause executing the wrong 'step' on the pending +// procedures when they will be replayed. +// - Do not remove items from the enum, new code must be able to handle +// all the previous 'steps'. There may be pending procedure ready to be +// recovered replayed. alternative you can make sure that not-known state +// will result in a failure that will rollback the already executed steps. +// ============================================================================ + +enum CreateTableState { + CREATE_TABLE_PRE_OPERATION = 1; + CREATE_TABLE_WRITE_FS_LAYOUT = 2; + CREATE_TABLE_ADD_TO_META = 3; + CREATE_TABLE_ASSIGN_REGIONS = 4; + CREATE_TABLE_UPDATE_DESC_CACHE = 5; + CREATE_TABLE_POST_OPERATION = 6; +} + +message CreateTableStateData { + required UserInformation user_info = 1; + required TableSchema table_schema = 2; + repeated RegionInfo region_info = 3; +} + +enum ModifyTableState { + MODIFY_TABLE_PREPARE = 1; + MODIFY_TABLE_PRE_OPERATION = 2; + MODIFY_TABLE_UPDATE_TABLE_DESCRIPTOR = 3; + MODIFY_TABLE_REMOVE_REPLICA_COLUMN = 4; + MODIFY_TABLE_DELETE_FS_LAYOUT = 5; + MODIFY_TABLE_POST_OPERATION = 6; + MODIFY_TABLE_REOPEN_ALL_REGIONS = 7; +} + +message ModifyTableStateData { + required UserInformation user_info = 1; + optional TableSchema unmodified_table_schema = 2; + required TableSchema modified_table_schema = 3; + required bool delete_column_family_in_modify = 4; +} + +enum TruncateTableState { + TRUNCATE_TABLE_PRE_OPERATION = 1; + TRUNCATE_TABLE_REMOVE_FROM_META = 2; + TRUNCATE_TABLE_CLEAR_FS_LAYOUT = 3; + TRUNCATE_TABLE_CREATE_FS_LAYOUT = 4; + TRUNCATE_TABLE_ADD_TO_META = 5; + TRUNCATE_TABLE_ASSIGN_REGIONS = 6; + TRUNCATE_TABLE_POST_OPERATION = 7; +} + +message TruncateTableStateData { + required UserInformation user_info = 1; + required bool preserve_splits = 2; + optional TableName table_name = 3; + optional TableSchema table_schema = 4; + repeated RegionInfo region_info = 5; +} + +enum DeleteTableState { + DELETE_TABLE_PRE_OPERATION = 1; + DELETE_TABLE_REMOVE_FROM_META = 2; + DELETE_TABLE_CLEAR_FS_LAYOUT = 3; + DELETE_TABLE_UPDATE_DESC_CACHE = 4; + DELETE_TABLE_UNASSIGN_REGIONS = 5; + DELETE_TABLE_POST_OPERATION = 6; +} + +message DeleteTableStateData { + required UserInformation user_info = 1; + required TableName table_name = 2; + repeated RegionInfo region_info = 3; +} + +enum AddColumnFamilyState { + ADD_COLUMN_FAMILY_PREPARE = 1; + ADD_COLUMN_FAMILY_PRE_OPERATION = 2; + ADD_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR = 3; + ADD_COLUMN_FAMILY_POST_OPERATION = 4; + ADD_COLUMN_FAMILY_REOPEN_ALL_REGIONS = 5; +} + +message AddColumnFamilyStateData { + required UserInformation user_info = 1; + required TableName table_name = 2; + required ColumnFamilySchema columnfamily_schema = 3; + optional TableSchema unmodified_table_schema = 4; +} + +enum ModifyColumnFamilyState { + MODIFY_COLUMN_FAMILY_PREPARE = 1; + MODIFY_COLUMN_FAMILY_PRE_OPERATION = 2; + MODIFY_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR = 3; + MODIFY_COLUMN_FAMILY_POST_OPERATION = 4; + MODIFY_COLUMN_FAMILY_REOPEN_ALL_REGIONS = 5; +} + +message ModifyColumnFamilyStateData { + required UserInformation user_info = 1; + required TableName table_name = 2; + required ColumnFamilySchema columnfamily_schema = 3; + optional TableSchema unmodified_table_schema = 4; +} + +enum DeleteColumnFamilyState { + DELETE_COLUMN_FAMILY_PREPARE = 1; + DELETE_COLUMN_FAMILY_PRE_OPERATION = 2; + DELETE_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR = 3; + DELETE_COLUMN_FAMILY_DELETE_FS_LAYOUT = 4; + DELETE_COLUMN_FAMILY_POST_OPERATION = 5; + DELETE_COLUMN_FAMILY_REOPEN_ALL_REGIONS = 6; +} + +message DeleteColumnFamilyStateData { + required UserInformation user_info = 1; + required TableName table_name = 2; + required bytes columnfamily_name = 3; + optional TableSchema unmodified_table_schema = 4; +} + +enum EnableTableState { + ENABLE_TABLE_PREPARE = 1; + ENABLE_TABLE_PRE_OPERATION = 2; + ENABLE_TABLE_SET_ENABLING_TABLE_STATE = 3; + ENABLE_TABLE_MARK_REGIONS_ONLINE = 4; + ENABLE_TABLE_SET_ENABLED_TABLE_STATE = 5; + ENABLE_TABLE_POST_OPERATION = 6; +} + +message EnableTableStateData { + required UserInformation user_info = 1; + required TableName table_name = 2; + required bool skip_table_state_check = 3; +} + +enum DisableTableState { + DISABLE_TABLE_PREPARE = 1; + DISABLE_TABLE_PRE_OPERATION = 2; + DISABLE_TABLE_SET_DISABLING_TABLE_STATE = 3; + DISABLE_TABLE_MARK_REGIONS_OFFLINE = 4; + DISABLE_TABLE_SET_DISABLED_TABLE_STATE = 5; + DISABLE_TABLE_POST_OPERATION = 6; +} + +message DisableTableStateData { + required UserInformation user_info = 1; + required TableName table_name = 2; + required bool skip_table_state_check = 3; +} diff --git a/hbase-protocol/src/main/protobuf/Procedure.proto b/hbase-protocol/src/main/protobuf/Procedure.proto new file mode 100644 index 00000000000..232c2903d4d --- /dev/null +++ b/hbase-protocol/src/main/protobuf/Procedure.proto @@ -0,0 +1,114 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +option java_package = "org.apache.hadoop.hbase.protobuf.generated"; +option java_outer_classname = "ProcedureProtos"; +option java_generic_services = true; +option java_generate_equals_and_hash = true; +option optimize_for = SPEED; + +import "ErrorHandling.proto"; + +enum ProcedureState { + INITIALIZING = 1; // Procedure in construction, not yet added to the executor + RUNNABLE = 2; // Procedure added to the executor, and ready to be executed + WAITING = 3; // The procedure is waiting on children to be completed + WAITING_TIMEOUT = 4; // The procedure is waiting a timout or an external event + ROLLEDBACK = 5; // The procedure failed and was rolledback + FINISHED = 6; // The procedure execution is completed. may need a rollback if failed. +} + +/** + * Procedure metadata, serialized by the ProcedureStore to be able to recover the old state. + */ +message Procedure { + // internal "static" state + required string class_name = 1; // full classname to be able to instantiate the procedure + optional uint64 parent_id = 2; // parent if not a root-procedure otherwise not set + required uint64 proc_id = 3; + required uint64 start_time = 4; + optional string owner = 5; + + // internal "runtime" state + required ProcedureState state = 6; + repeated uint32 stack_id = 7; // stack indices in case the procedure was running + required uint64 last_update = 8; + optional uint32 timeout = 9; + + // user state/results + optional ForeignExceptionMessage exception = 10; + optional bytes result = 11; // opaque (user) result structure + optional bytes state_data = 12; // opaque (user) procedure internal-state +} + +/** + * SequentialProcedure data + */ +message SequentialProcedureData { + required bool executed = 1; +} + +/** + * StateMachineProcedure data + */ +message StateMachineProcedureData { + repeated uint32 state = 1; +} + +/** + * Procedure WAL header + */ +message ProcedureWALHeader { + required uint32 version = 1; + required uint32 type = 2; + required uint64 log_id = 3; + required uint64 min_proc_id = 4; +} + +/** + * Procedure WAL trailer + */ +message ProcedureWALTrailer { + required uint32 version = 1; + required uint64 tracker_pos = 2; +} + +message ProcedureStoreTracker { + message TrackerNode { + required uint64 start_id = 1; + repeated uint64 updated = 2; + repeated uint64 deleted = 3; + } + + repeated TrackerNode node = 1; +} + +message ProcedureWALEntry { + enum Type { + EOF = 1; + INIT = 2; + INSERT = 3; + UPDATE = 4; + DELETE = 5; + COMPACT = 6; + } + + required Type type = 1; + repeated Procedure procedure = 2; + optional uint64 proc_id = 3; +} diff --git a/hbase-protocol/src/main/protobuf/RPC.proto b/hbase-protocol/src/main/protobuf/RPC.proto index adef37395a9..a5d60d8197a 100644 --- a/hbase-protocol/src/main/protobuf/RPC.proto +++ b/hbase-protocol/src/main/protobuf/RPC.proto @@ -76,6 +76,16 @@ message UserInformation { optional string real_user = 2; } +// Rpc client version info proto. Included in ConnectionHeader on connection setup +message VersionInfo { + required string version = 1; + required string url = 2; + required string revision = 3; + required string user = 4; + required string date = 5; + required string src_checksum = 6; +} + // This is sent on connection setup after the connection preamble is sent. message ConnectionHeader { optional UserInformation user_info = 1; @@ -86,6 +96,7 @@ message ConnectionHeader { // Compressor we will use if cell block is compressed. Server will throw exception if not supported. // Class must implement hadoop's CompressionCodec Interface. Can't compress if no codec. optional string cell_block_compressor_class = 4; + optional VersionInfo version_info = 5; } // Optional Cell block Message. Included in client RequestHeader diff --git a/hbase-protocol/src/main/protobuf/RegionServerStatus.proto b/hbase-protocol/src/main/protobuf/RegionServerStatus.proto index 75e5ae4903b..33de5010a96 100644 --- a/hbase-protocol/src/main/protobuf/RegionServerStatus.proto +++ b/hbase-protocol/src/main/protobuf/RegionServerStatus.proto @@ -36,6 +36,9 @@ message RegionServerStartupRequest { /** Current time of the region server in ms */ required uint64 server_current_time = 3; + + /** hostname for region server, optional */ + optional string use_this_hostname_instead = 4; } message RegionServerStartupResponse { @@ -74,8 +77,11 @@ message GetLastFlushedSequenceIdRequest { } message GetLastFlushedSequenceIdResponse { - /* the last WAL sequence id flushed from MemStore to HFile for the region */ + /** the last WAL sequence id flushed from MemStore to HFile for the region */ required uint64 last_flushed_sequence_id = 1; + + /** the last WAL sequence id flushed from MemStore to HFile for stores of the region */ + repeated StoreSequenceId store_last_flushed_sequence_id = 2; } message RegionStateTransition { diff --git a/hbase-protocol/src/main/protobuf/WAL.proto b/hbase-protocol/src/main/protobuf/WAL.proto index 169a9b2c3e4..9853e36467f 100644 --- a/hbase-protocol/src/main/protobuf/WAL.proto +++ b/hbase-protocol/src/main/protobuf/WAL.proto @@ -109,6 +109,7 @@ message FlushDescriptor { START_FLUSH = 0; COMMIT_FLUSH = 1; ABORT_FLUSH = 2; + CANNOT_FLUSH = 3; // marker for indicating that a flush has been requested but cannot complete } message StoreFlushDescriptor { @@ -122,6 +123,7 @@ message FlushDescriptor { required bytes encoded_region_name = 3; optional uint64 flush_sequence_number = 4; repeated StoreFlushDescriptor store_flushes = 5; + optional bytes region_name = 6; // full region name } message StoreDescriptor { @@ -155,6 +157,7 @@ message RegionEventDescriptor { optional uint64 log_sequence_number = 4; repeated StoreDescriptor stores = 5; optional ServerName server = 6; // Server who opened the region + optional bytes region_name = 7; // full region name } /** diff --git a/hbase-protocol/src/main/protobuf/ZooKeeper.proto b/hbase-protocol/src/main/protobuf/ZooKeeper.proto index bac881bc1f2..617e2cf0f3c 100644 --- a/hbase-protocol/src/main/protobuf/ZooKeeper.proto +++ b/hbase-protocol/src/main/protobuf/ZooKeeper.proto @@ -152,20 +152,3 @@ message TableLock { optional string purpose = 5; optional int64 create_time = 6; } - -/** - * sequence Id of a store - */ -message StoreSequenceId { - required bytes family_name = 1; - required uint64 sequence_id = 2; -} - -/** - * contains a sequence id of a region which should be the minimum of its store sequence ids and - * list sequence ids of the region's stores - */ -message RegionStoreSequenceIds { - required uint64 last_flushed_sequence_id = 1; - repeated StoreSequenceId store_sequence_id = 2; -} diff --git a/hbase-rest/pom.xml b/hbase-rest/pom.xml index 5fd7319afc8..e82f029096d 100644 --- a/hbase-rest/pom.xml +++ b/hbase-rest/pom.xml @@ -147,6 +147,36 @@ + + + + + org.eclipse.m2e + lifecycle-mapping + 1.0.0 + + + + + + org.apache.maven.plugins + maven-antrun-plugin + [1.6,) + + run + + + + + + + + + + + + diff --git a/hbase-rest/src/main/asciidoc/.gitignore b/hbase-rest/src/main/asciidoc/.gitignore deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java index bb52fdb914b..370a083b980 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java @@ -127,8 +127,7 @@ public class RESTServer implements Constants { // check for user-defined port setting, if so override the conf if (commandLine != null && commandLine.hasOption("port")) { String val = commandLine.getOptionValue("port"); - servlet.getConfiguration() - .setInt("hbase.rest.port", Integer.valueOf(val)); + servlet.getConfiguration().setInt("hbase.rest.port", Integer.parseInt(val)); LOG.debug("port set to " + val); } @@ -141,8 +140,7 @@ public class RESTServer implements Constants { // check for user-defined info server port setting, if so override the conf if (commandLine != null && commandLine.hasOption("infoport")) { String val = commandLine.getOptionValue("infoport"); - servlet.getConfiguration() - .setInt("hbase.rest.info.port", Integer.valueOf(val)); + servlet.getConfiguration().setInt("hbase.rest.info.port", Integer.parseInt(val)); LOG.debug("Web UI port set to " + val); } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java index bb93bc8f930..0ecaf5a87d4 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java @@ -22,7 +22,7 @@ import java.io.IOException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.filter.ParseFilter; import org.apache.hadoop.hbase.security.UserProvider; @@ -101,7 +101,7 @@ public class RESTServlet implements Constants { } } - HBaseAdmin getAdmin() throws IOException { + Admin getAdmin() throws IOException { return connectionCache.getAdmin(); } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServletContainer.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServletContainer.java index 2ce8ede13e4..b5ecb351526 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServletContainer.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServletContainer.java @@ -56,7 +56,8 @@ public class RESTServletContainer extends ServletContainer { if (!servlet.supportsProxyuser()) { throw new ServletException("Support for proxyuser is not configured"); } - UserGroupInformation ugi = servlet.getRealUser(); + // Authenticated remote user is attempting to do 'doAs' proxy user. + UserGroupInformation ugi = UserGroupInformation.createRemoteUser(request.getRemoteUser()); // create and attempt to authorize a proxy user (the client is attempting // to do proxy user) ugi = UserGroupInformation.createProxyUser(doAsUserFromQuery, ugi); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java index 001c6b5718f..48721bb545a 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java @@ -33,6 +33,7 @@ import javax.ws.rs.core.Response.ResponseBuilder; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HRegionInfo; @@ -40,7 +41,6 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; -import org.apache.hadoop.hbase.client.MetaScanner; import org.apache.hadoop.hbase.rest.model.TableInfoModel; import org.apache.hadoop.hbase.rest.model.TableRegionModel; @@ -80,7 +80,9 @@ public class RegionsResource extends ResourceBase { TableInfoModel model = new TableInfoModel(tableName.getNameAsString()); Connection connection = ConnectionFactory.createConnection(servlet.getConfiguration()); - Map regions = MetaScanner.allTableRegions(connection, tableName); + @SuppressWarnings("deprecation") + Map regions = MetaTableAccessor + .allTableRegions(connection, tableName); connection.close(); for (Map.Entry e: regions.entrySet()) { HRegionInfo hri = e.getKey(); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java index dad5a32d3e3..ff1345cd615 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java @@ -76,7 +76,7 @@ public class RowResource extends ResourceBase { this.tableResource = tableResource; this.rowspec = new RowSpec(rowspec); if (versions != null) { - this.rowspec.setMaxVersions(Integer.valueOf(versions)); + this.rowspec.setMaxVersions(Integer.parseInt(versions)); } this.check = check; } @@ -271,7 +271,7 @@ public class RowResource extends ResourceBase { } vals = headers.getRequestHeader("X-Timestamp"); if (vals != null && !vals.isEmpty()) { - timestamp = Long.valueOf(vals.get(0)); + timestamp = Long.parseLong(vals.get(0)); } if (column == null) { servlet.getMetrics().incrementFailedPutRequests(1); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowSpec.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowSpec.java index b6c1ca8ccc6..cc51c85965f 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowSpec.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowSpec.java @@ -164,7 +164,7 @@ public class RowSpec { i++; } try { - time0 = Long.valueOf(URLDecoder.decode(stamp.toString(), + time0 = Long.parseLong(URLDecoder.decode(stamp.toString(), HConstants.UTF8_ENCODING)); } catch (NumberFormatException e) { throw new IllegalArgumentException(e); @@ -177,7 +177,7 @@ public class RowSpec { i++; } try { - time1 = Long.valueOf(URLDecoder.decode(stamp.toString(), + time1 = Long.parseLong(URLDecoder.decode(stamp.toString(), HConstants.UTF8_ENCODING)); } catch (NumberFormatException e) { throw new IllegalArgumentException(e); @@ -245,7 +245,7 @@ public class RowSpec { } sb.append(c); } - maxVersions = Integer.valueOf(sb.toString()); + maxVersions = Integer.parseInt(sb.toString()); } break; case 'n': { StringBuilder sb = new StringBuilder(); @@ -257,7 +257,7 @@ public class RowSpec { } sb.append(c); } - maxValues = Integer.valueOf(sb.toString()); + maxValues = Integer.parseInt(sb.toString()); } break; default: throw new IllegalArgumentException("unknown parameter '" + c + "'"); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java index 45dd9ee7af8..9826b67a99e 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java @@ -37,18 +37,17 @@ import javax.xml.namespace.QName; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableExistsException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotEnabledException; import org.apache.hadoop.hbase.TableNotFoundException; -import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.rest.model.ColumnSchemaModel; import org.apache.hadoop.hbase.rest.model.TableSchemaModel; -import org.apache.hadoop.hbase.util.Bytes; @InterfaceAudience.Private public class SchemaResource extends ResourceBase { @@ -103,15 +102,15 @@ public class SchemaResource extends ResourceBase { } } - private Response replace(final byte[] name, final TableSchemaModel model, - final UriInfo uriInfo, final HBaseAdmin admin) { + private Response replace(final TableName name, final TableSchemaModel model, + final UriInfo uriInfo, final Admin admin) { if (servlet.isReadOnly()) { return Response.status(Response.Status.FORBIDDEN) .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF) .build(); } try { - HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name)); + HTableDescriptor htd = new HTableDescriptor(name); for (Map.Entry e: model.getAny().entrySet()) { htd.setValue(e.getKey().getLocalPart(), e.getValue().toString()); } @@ -143,8 +142,8 @@ public class SchemaResource extends ResourceBase { } } - private Response update(final byte[] name, final TableSchemaModel model, - final UriInfo uriInfo, final HBaseAdmin admin) { + private Response update(final TableName name, final TableSchemaModel model, + final UriInfo uriInfo, final Admin admin) { if (servlet.isReadOnly()) { return Response.status(Response.Status.FORBIDDEN) .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF) @@ -170,7 +169,7 @@ public class SchemaResource extends ResourceBase { .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF) .build(); } finally { - admin.enableTable(tableResource.getName()); + admin.enableTable(TableName.valueOf(tableResource.getName())); } servlet.getMetrics().incrementSucessfulPutRequests(1); return Response.ok().build(); @@ -183,8 +182,8 @@ public class SchemaResource extends ResourceBase { private Response update(final TableSchemaModel model, final boolean replace, final UriInfo uriInfo) { try { - byte[] name = Bytes.toBytes(tableResource.getName()); - HBaseAdmin admin = servlet.getAdmin(); + TableName name = TableName.valueOf(tableResource.getName()); + Admin admin = servlet.getAdmin(); if (replace || !admin.tableExists(name)) { return replace(name, model, uriInfo, admin); } else { @@ -233,11 +232,11 @@ public class SchemaResource extends ResourceBase { .entity("Forbidden" + CRLF).build(); } try { - HBaseAdmin admin = servlet.getAdmin(); + Admin admin = servlet.getAdmin(); try { - admin.disableTable(tableResource.getName()); + admin.disableTable(TableName.valueOf(tableResource.getName())); } catch (TableNotEnabledException e) { /* this is what we want anyway */ } - admin.deleteTable(tableResource.getName()); + admin.deleteTable(TableName.valueOf(tableResource.getName())); servlet.getMetrics().incrementSucessfulDeleteRequests(1); return Response.ok().build(); } catch (Exception e) { diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java index caf14319fa3..556425ff69b 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java @@ -34,6 +34,7 @@ import javax.ws.rs.core.UriInfo; import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; @@ -69,7 +70,7 @@ public class TableResource extends ResourceBase { * @throws IOException */ boolean exists() throws IOException { - return servlet.getAdmin().tableExists(table); + return servlet.getAdmin().tableExists(TableName.valueOf(table)); } @Path("exists") diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java index 65bf509f9f1..0300ea2c9c8 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java @@ -19,14 +19,18 @@ package org.apache.hadoop.hbase.rest.client; -import com.google.protobuf.Descriptors; -import com.google.protobuf.Message; -import com.google.protobuf.Service; -import com.google.protobuf.ServiceException; +import java.io.IOException; +import java.io.InterruptedIOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeMap; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; @@ -35,11 +39,12 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.HTableInterface; import org.apache.hadoop.hbase.client.Increment; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; @@ -47,6 +52,7 @@ import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Row; import org.apache.hadoop.hbase.client.RowMutations; import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.coprocessor.Batch; import org.apache.hadoop.hbase.client.coprocessor.Batch.Callback; import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; @@ -61,22 +67,17 @@ import org.apache.hadoop.hbase.rest.model.TableSchemaModel; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.util.StringUtils; -import java.io.IOException; -import java.io.InterruptedIOException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.TreeMap; +import com.google.protobuf.Descriptors; +import com.google.protobuf.Message; +import com.google.protobuf.Service; +import com.google.protobuf.ServiceException; /** * HTable interface to remote tables accessed via REST gateway */ @InterfaceAudience.Public @InterfaceStability.Stable -public class RemoteHTable implements HTableInterface { +public class RemoteHTable implements Table { private static final Log LOG = LogFactory.getLog(RemoteHTable.class); @@ -249,10 +250,12 @@ public class RemoteHTable implements HTableInterface { return TableName.valueOf(name); } + @Override public Configuration getConfiguration() { return conf; } + @Override public HTableDescriptor getTableDescriptor() throws IOException { StringBuilder sb = new StringBuilder(); sb.append('/'); @@ -281,10 +284,12 @@ public class RemoteHTable implements HTableInterface { throw new IOException("schema request timed out"); } + @Override public void close() throws IOException { client.shutdown(); } + @Override public Result get(Get get) throws IOException { TimeRange range = get.getTimeRange(); String spec = buildRowSpec(get.getRow(), get.getFamilyMap(), @@ -303,6 +308,7 @@ public class RemoteHTable implements HTableInterface { } } + @Override public Result[] get(List gets) throws IOException { byte[][] rows = new byte[gets.size()][]; int maxVersions = 1; @@ -359,6 +365,7 @@ public class RemoteHTable implements HTableInterface { throw new IOException("get request timed out"); } + @Override public boolean exists(Get get) throws IOException { LOG.warn("exists() is really get(), just use get()"); Result result = get(get); @@ -369,6 +376,7 @@ public class RemoteHTable implements HTableInterface { * exists(List) is really a list of get() calls. Just use get(). * @param gets list of Get to test for the existence */ + @Override public boolean[] existsAll(List gets) throws IOException { LOG.warn("exists(List) is really list of get() calls, just use get()"); boolean[] results = new boolean[gets.size()]; @@ -388,6 +396,7 @@ public class RemoteHTable implements HTableInterface { return objectResults; } + @Override public void put(Put put) throws IOException { CellSetModel model = buildModelFromPut(put); StringBuilder sb = new StringBuilder(); @@ -416,6 +425,7 @@ public class RemoteHTable implements HTableInterface { throw new IOException("put request timed out"); } + @Override public void put(List puts) throws IOException { // this is a trick: The gateway accepts multiple rows in a cell set and // ignores the row specification in the URI @@ -471,6 +481,7 @@ public class RemoteHTable implements HTableInterface { throw new IOException("multiput request timed out"); } + @Override public void delete(Delete delete) throws IOException { String spec = buildRowSpec(delete.getRow(), delete.getFamilyCellMap(), delete.getTimeStamp(), delete.getTimeStamp(), 1); @@ -494,6 +505,7 @@ public class RemoteHTable implements HTableInterface { throw new IOException("delete request timed out"); } + @Override public void delete(List deletes) throws IOException { for (Delete delete: deletes) { delete(delete); @@ -631,19 +643,21 @@ public class RemoteHTable implements HTableInterface { LOG.warn(StringUtils.stringifyException(e)); } } - } + @Override public ResultScanner getScanner(Scan scan) throws IOException { return new Scanner(scan); } + @Override public ResultScanner getScanner(byte[] family) throws IOException { Scan scan = new Scan(); scan.addFamily(family); return new Scanner(scan); } + @Override public ResultScanner getScanner(byte[] family, byte[] qualifier) throws IOException { Scan scan = new Scan(); @@ -659,6 +673,7 @@ public class RemoteHTable implements HTableInterface { throw new IOException("getRowOrBefore not supported"); } + @Override public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, byte[] value, Put put) throws IOException { // column to check-the-value @@ -695,11 +710,13 @@ public class RemoteHTable implements HTableInterface { throw new IOException("checkAndPut request timed out"); } + @Override public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, CompareOp compareOp, byte[] value, Put put) throws IOException { throw new IOException("checkAndPut for non-equal comparison not implemented"); } + @Override public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, byte[] value, Delete delete) throws IOException { Put put = new Put(row); @@ -736,24 +753,29 @@ public class RemoteHTable implements HTableInterface { throw new IOException("checkAndDelete request timed out"); } + @Override public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, CompareOp compareOp, byte[] value, Delete delete) throws IOException { throw new IOException("checkAndDelete for non-equal comparison not implemented"); } + @Override public Result increment(Increment increment) throws IOException { throw new IOException("Increment not supported"); } + @Override public Result append(Append append) throws IOException { throw new IOException("Append not supported"); } + @Override public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount) throws IOException { throw new IOException("incrementColumnValue not supported"); } + @Override public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount, Durability durability) throws IOException { throw new IOException("incrementColumnValue not supported"); @@ -805,21 +827,6 @@ public class RemoteHTable implements HTableInterface { throw new IOException("atomicMutation not supported"); } - @Override - public void setAutoFlush(boolean autoFlush) { - throw new UnsupportedOperationException("setAutoFlush not implemented"); - } - - @Override - public void setAutoFlush(boolean autoFlush, boolean clearBufferOnFail) { - throw new UnsupportedOperationException("setAutoFlush not implemented"); - } - - @Override - public void setAutoFlushTo(boolean autoFlush) { - throw new UnsupportedOperationException("setAutoFlushTo not implemented"); - } - @Override public long getWriteBufferSize() { throw new UnsupportedOperationException("getWriteBufferSize not implemented"); @@ -830,12 +837,6 @@ public class RemoteHTable implements HTableInterface { throw new IOException("setWriteBufferSize not supported"); } - @Override - public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, - long amount, boolean writeToWAL) throws IOException { - throw new IOException("incrementColumnValue not supported"); - } - @Override public Map batchCoprocessorService( Descriptors.MethodDescriptor method, Message request, diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ColumnSchemaModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ColumnSchemaModel.java index ba0eed80d21..8562cdef8fd 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ColumnSchemaModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ColumnSchemaModel.java @@ -138,7 +138,7 @@ public class ColumnSchemaModel implements Serializable { public boolean __getBlockcache() { Object o = attrs.get(BLOCKCACHE); return o != null ? - Boolean.valueOf(o.toString()) : HColumnDescriptor.DEFAULT_BLOCKCACHE; + Boolean.parseBoolean(o.toString()) : HColumnDescriptor.DEFAULT_BLOCKCACHE; } /** @@ -147,7 +147,7 @@ public class ColumnSchemaModel implements Serializable { public int __getBlocksize() { Object o = attrs.get(BLOCKSIZE); return o != null ? - Integer.valueOf(o.toString()) : HColumnDescriptor.DEFAULT_BLOCKSIZE; + Integer.parseInt(o.toString()) : HColumnDescriptor.DEFAULT_BLOCKSIZE; } /** @@ -172,7 +172,7 @@ public class ColumnSchemaModel implements Serializable { public boolean __getInMemory() { Object o = attrs.get(IN_MEMORY); return o != null ? - Boolean.valueOf(o.toString()) : HColumnDescriptor.DEFAULT_IN_MEMORY; + Boolean.parseBoolean(o.toString()) : HColumnDescriptor.DEFAULT_IN_MEMORY; } /** @@ -181,7 +181,7 @@ public class ColumnSchemaModel implements Serializable { public int __getTTL() { Object o = attrs.get(TTL); return o != null ? - Integer.valueOf(o.toString()) : HColumnDescriptor.DEFAULT_TTL; + Integer.parseInt(o.toString()) : HColumnDescriptor.DEFAULT_TTL; } /** @@ -190,7 +190,7 @@ public class ColumnSchemaModel implements Serializable { public int __getVersions() { Object o = attrs.get(VERSIONS); return o != null ? - Integer.valueOf(o.toString()) : HColumnDescriptor.DEFAULT_VERSIONS; + Integer.parseInt(o.toString()) : HColumnDescriptor.DEFAULT_VERSIONS; } /** diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java index 784f7e66ec4..25a6de32e56 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java @@ -413,7 +413,7 @@ public class ScannerModel implements ProtobufMessageHandler, Serializable { } } break; case PageFilter: - filter = new PageFilter(Long.valueOf(value)); + filter = new PageFilter(Long.parseLong(value)); break; case PrefixFilter: filter = new PrefixFilter(Base64.decode(value)); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.java index 3b044e73b1e..2caec662b22 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.java @@ -106,7 +106,9 @@ public class StorageClusterStatusModel /** * Represents a region hosted on a region server. */ - public static class Region { + public static class Region implements Serializable { + private static final long serialVersionUID = -1326683840086398193L; + private byte[] name; private int stores; private int storefiles; diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableRegionModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableRegionModel.java index d9b2b659208..89fe12c62ef 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableRegionModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableRegionModel.java @@ -142,7 +142,7 @@ public class TableRegionModel implements Serializable { this.startKey = Bytes.toBytes(split[1]); String tail = split[2]; split = tail.split("\\."); - id = Long.valueOf(split[0]); + id = Long.parseLong(split[0]); } /** diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableSchemaModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableSchemaModel.java index 9e9fe4763ee..593c3ab3aee 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableSchemaModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/TableSchemaModel.java @@ -221,7 +221,7 @@ public class TableSchemaModel implements Serializable, ProtobufMessageHandler { */ public boolean __getIsMeta() { Object o = attrs.get(IS_META); - return o != null ? Boolean.valueOf(o.toString()) : false; + return o != null ? Boolean.parseBoolean(o.toString()) : false; } /** @@ -229,7 +229,7 @@ public class TableSchemaModel implements Serializable, ProtobufMessageHandler { */ public boolean __getIsRoot() { Object o = attrs.get(IS_ROOT); - return o != null ? Boolean.valueOf(o.toString()) : false; + return o != null ? Boolean.parseBoolean(o.toString()) : false; } /** @@ -237,8 +237,7 @@ public class TableSchemaModel implements Serializable, ProtobufMessageHandler { */ public boolean __getReadOnly() { Object o = attrs.get(READONLY); - return o != null ? - Boolean.valueOf(o.toString()) : HTableDescriptor.DEFAULT_READONLY; + return o != null ? Boolean.parseBoolean(o.toString()) : HTableDescriptor.DEFAULT_READONLY; } /** @@ -285,12 +284,10 @@ public class TableSchemaModel implements Serializable, ProtobufMessageHandler { familyBuilder.addAttrs(attrBuilder); } if (familyAttrs.containsKey(TTL)) { - familyBuilder.setTtl( - Integer.valueOf(familyAttrs.get(TTL).toString())); + familyBuilder.setTtl(Integer.parseInt(familyAttrs.get(TTL).toString())); } if (familyAttrs.containsKey(VERSIONS)) { - familyBuilder.setMaxVersions( - Integer.valueOf(familyAttrs.get(VERSIONS).toString())); + familyBuilder.setMaxVersions(Integer.parseInt(familyAttrs.get(VERSIONS).toString())); } if (familyAttrs.containsKey(COMPRESSION)) { familyBuilder.setCompression(familyAttrs.get(COMPRESSION).toString()); @@ -298,8 +295,7 @@ public class TableSchemaModel implements Serializable, ProtobufMessageHandler { builder.addColumns(familyBuilder); } if (attrs.containsKey(READONLY)) { - builder.setReadOnly( - Boolean.valueOf(attrs.get(READONLY).toString())); + builder.setReadOnly(Boolean.parseBoolean(attrs.get(READONLY).toString())); } return builder.build().toByteArray(); } diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/RowResourceBase.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/RowResourceBase.java index 876b089d40c..0e74b46b4e6 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/RowResourceBase.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/RowResourceBase.java @@ -35,7 +35,6 @@ import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.rest.client.Client; import org.apache.hadoop.hbase.rest.client.Cluster; import org.apache.hadoop.hbase.rest.client.Response; diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGzipFilter.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGzipFilter.java index e4a322a3d75..42d355d8e03 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGzipFilter.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestGzipFilter.java @@ -34,7 +34,6 @@ import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.rest.client.Client; diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java index 5bd8fc842b8..3acddc1aa30 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java @@ -35,7 +35,6 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.*; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Delete; -import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Durability; diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java index 00c2049936e..5fdc631a183 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestStatusResource.java @@ -30,7 +30,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Waiter; -import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.rest.client.Client; import org.apache.hadoop.hbase.rest.client.Cluster; import org.apache.hadoop.hbase.rest.client.Response; diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java index 7cbb2a290b8..b0b8fef407d 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestTableResource.java @@ -19,36 +19,46 @@ package org.apache.hadoop.hbase.rest; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + import java.io.ByteArrayInputStream; import java.io.IOException; import java.net.InetSocketAddress; +import java.util.ArrayList; import java.util.Iterator; -import java.util.Map; +import java.util.List; import javax.xml.bind.JAXBContext; import javax.xml.bind.JAXBException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.*; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.HTable; -import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Durability; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.RegionLocator; +import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.rest.client.Client; import org.apache.hadoop.hbase.rest.client.Cluster; import org.apache.hadoop.hbase.rest.client.Response; -import org.apache.hadoop.hbase.rest.model.TableModel; import org.apache.hadoop.hbase.rest.model.TableInfoModel; import org.apache.hadoop.hbase.rest.model.TableListModel; +import org.apache.hadoop.hbase.rest.model.TableModel; import org.apache.hadoop.hbase.rest.model.TableRegionModel; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RestTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.util.StringUtils; - -import static org.junit.Assert.*; - import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; @@ -61,7 +71,7 @@ public class TestTableResource { private static TableName TABLE = TableName.valueOf("TestTableResource"); private static String COLUMN_FAMILY = "test"; private static String COLUMN = COLUMN_FAMILY + ":qualifier"; - private static Map regionMap; + private static List regionMap; private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); private static final HBaseRESTTestingUtility REST_TEST_UTIL = @@ -87,9 +97,9 @@ public class TestTableResource { HTableDescriptor htd = new HTableDescriptor(TABLE); htd.addFamily(new HColumnDescriptor(COLUMN_FAMILY)); admin.createTable(htd); - HTable table = (HTable) TEST_UTIL.getConnection().getTable(TABLE); byte[] k = new byte[3]; byte [][] famAndQf = KeyValue.parseColumn(Bytes.toBytes(COLUMN)); + List puts = new ArrayList<>(); for (byte b1 = 'a'; b1 < 'z'; b1++) { for (byte b2 = 'a'; b2 < 'z'; b2++) { for (byte b3 = 'a'; b3 < 'z'; b3++) { @@ -99,13 +109,19 @@ public class TestTableResource { Put put = new Put(k); put.setDurability(Durability.SKIP_WAL); put.add(famAndQf[0], famAndQf[1], k); - table.put(put); + puts.add(put); } } } - table.flushCommits(); + Connection connection = TEST_UTIL.getConnection(); + + Table table = connection.getTable(TABLE); + table.put(puts); + table.close(); // get the initial layout (should just be one region) - Map m = table.getRegionLocations(); + + RegionLocator regionLocator = connection.getRegionLocator(TABLE); + List m = regionLocator.getAllRegionLocations(); assertEquals(m.size(), 1); // tell the master to split the table admin.split(TABLE); @@ -119,14 +135,14 @@ public class TestTableResource { LOG.warn(StringUtils.stringifyException(e)); } // check again - m = table.getRegionLocations(); + m = regionLocator.getAllRegionLocations(); } // should have two regions now assertEquals(m.size(), 2); regionMap = m; LOG.info("regions: " + regionMap); - table.close(); + regionLocator.close(); } @AfterClass @@ -156,15 +172,17 @@ public class TestTableResource { while (regions.hasNext()) { TableRegionModel region = regions.next(); boolean found = false; - for (Map.Entry e: regionMap.entrySet()) { - HRegionInfo hri = e.getKey(); + for (HRegionLocation e: regionMap) { + HRegionInfo hri = e.getRegionInfo(); String hriRegionName = hri.getRegionNameAsString(); String regionName = region.getName(); if (hriRegionName.equals(regionName)) { found = true; byte[] startKey = hri.getStartKey(); byte[] endKey = hri.getEndKey(); - InetSocketAddress sa = new InetSocketAddress(e.getValue().getHostname(), e.getValue().getPort()); + ServerName serverName = e.getServerName(); + InetSocketAddress sa = + new InetSocketAddress(serverName.getHostname(), serverName.getPort()); String location = sa.getHostName() + ":" + Integer.valueOf(sa.getPort()); assertEquals(hri.getRegionId(), region.getId()); diff --git a/hbase-rest/src/test/resources/log4j.properties b/hbase-rest/src/test/resources/log4j.properties index 6ee91efc3b2..13a95b4a673 100644 --- a/hbase-rest/src/test/resources/log4j.properties +++ b/hbase-rest/src/test/resources/log4j.properties @@ -63,4 +63,4 @@ log4j.logger.org.apache.hadoop.hbase=DEBUG log4j.org.apache.hadoop.metrics2.impl.MetricsSystemImpl=ERROR log4j.org.apache.hadoop.metrics2.util.MBeans=ERROR # Enable this to get detailed connection error/retry logging. -# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=TRACE +# log4j.logger.org.apache.hadoop.hbase.client.ConnectionImplementation=TRACE diff --git a/hbase-server/pom.xml b/hbase-server/pom.xml index c9ba4dadfeb..4becc4027f6 100644 --- a/hbase-server/pom.xml +++ b/hbase-server/pom.xml @@ -53,20 +53,6 @@ - - maven-compiler-plugin - - - default-compile - - ${java.default.compiler} - true - - - - - org.apache.maven.plugins maven-site-plugin @@ -234,9 +220,9 @@ - + org.eclipse.m2e lifecycle-mapping @@ -276,6 +262,32 @@ + + + org.apache.maven.plugins + maven-dependency-plugin + [2.8,) + + build-classpath + + + + + + + + + org.apache.maven.plugins + maven-compiler-plugin + [3.2,) + + compile + + + + + + @@ -302,6 +314,10 @@ org.apache.hbase hbase-protocol + + org.apache.hbase + hbase-procedure + org.apache.hbase hbase-client @@ -323,6 +339,12 @@ test-jar test + + org.apache.hbase + hbase-procedure + test-jar + test + commons-httpclient commons-httpclient @@ -464,6 +486,11 @@ io.netty netty-all + + net.spy + spymemcached + true + org.apache.htrace @@ -479,6 +506,16 @@ hamcrest-core test + + org.apache.hadoop + hadoop-minikdc + test + + + org.bouncycastle + bcprov-jdk16 + test + diff --git a/hbase-server/src/main/asciidoc/.gitignore b/hbase-server/src/main/asciidoc/.gitignore deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon index c83f2952287..02bbbd5a89d 100644 --- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon +++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon @@ -42,7 +42,6 @@ org.apache.hadoop.hbase.NamespaceDescriptor; org.apache.hadoop.hbase.ServerLoad; org.apache.hadoop.hbase.ServerName; org.apache.hadoop.hbase.client.Admin; -org.apache.hadoop.hbase.client.HConnectionManager; org.apache.hadoop.hbase.HRegionInfo; org.apache.hadoop.hbase.master.RegionState; org.apache.hadoop.hbase.HTableDescriptor; @@ -80,14 +79,13 @@ AssignmentManager assignmentManager = master.getAssignmentManager(); } for(int i = 0; i < zks.length; ++i) { - quorums.append(zks[i].trim()).append(","); + quorums.append(zks[i].trim()); - if ((i+1) % 4 == 0 && i != (zks.length - 1)) { + if (i != (zks.length - 1)) { quorums.append("
      "); } } - quorums.setLength(quorums.length() - 1); return quorums.toString(); } @@ -159,6 +157,13 @@ AssignmentManager assignmentManager = master.getAssignmentManager(); re-enabled from the hbase shell by running the command 'catalogjanitor_switch true' + <%if !master.isBalancerOn() %> +
      + The Load Balancer is not enabled which will eventually cause performance degradation + in HBase as Regions will not be distributed across all RegionServers. The balancer + is only expected to be disabled during rolling upgrade scenarios. +
      +

      Region Servers

      @@ -254,6 +259,11 @@ AssignmentManager assignmentManager = master.getAssignmentManager(); <% formatZKString() %> Addresses of all registered ZK servers. For more, see zk dump. + + Zookeeper Base Path + <% master.getZooKeeper().getBaseZNode() %> + Root node of this cluster in ZK. + HBase Root Directory <% FSUtils.getRootDir(master.getConfiguration()).toString() %> @@ -294,6 +304,11 @@ AssignmentManager assignmentManager = master.getAssignmentManager(); java.util.Arrays.toString(master.getMasterCoprocessors()) %> Coprocessors currently loaded by the master + + LoadBalancer + <% master.getLoadBalancerClassName() %> + LoadBalancer to be used in the Master +
      diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon index f063e740266..49addc79c70 100644 --- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon +++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon @@ -34,7 +34,6 @@ HMaster master; org.apache.hadoop.hbase.ServerLoad; org.apache.hadoop.hbase.ServerName; org.apache.hadoop.hbase.client.HBaseAdmin; - org.apache.hadoop.hbase.client.HConnectionManager; org.apache.hadoop.hbase.HTableDescriptor; org.apache.hadoop.hbase.HBaseConfiguration; diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon index e4ff70fd1ee..04191968740 100644 --- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon +++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/BlockCacheTmpl.jamon @@ -206,6 +206,38 @@ org.apache.hadoop.util.StringUtils; +<%def hits_tmpl> +<%args> + BlockCache bc; + + + Hits + <% String.format("%,d", bc.getStats().getHitCount()) %> + Number requests that were cache hits + + + Hits Caching + <% String.format("%,d", bc.getStats().getHitCachingCount()) %> + Cache hit block requests but only requests set to cache block if a miss + + + Misses + <% String.format("%,d", bc.getStats().getMissCount()) %> + Block requests that were cache misses but set to cache missed blocks + + + Misses Caching + <% String.format("%,d", bc.getStats().getMissCount()) %> + Block requests that were cache misses but only requests set to use block cache + + + Hit Ratio + <% String.format("%,.2f", bc.getStats().getHitRatio() * 100) %><% "%" %> + Hit Count divided by total requests count + + + + <%def bc_stats> <%args> CacheConfig cacheConfig; @@ -235,31 +267,7 @@ org.apache.hadoop.util.StringUtils; Number of blocks in block cache <& evictions_tmpl; bc = cacheConfig.getBlockCache(); &> - - Hits - <% String.format("%,d", cacheConfig.getBlockCache().getStats().getHitCount()) %> - Number requests that were cache hits - - - Hits Caching - <% String.format("%,d", cacheConfig.getBlockCache().getStats().getHitCachingCount()) %> - Cache hit block requests but only requests set to cache block if a miss - - - Misses - <% String.format("%,d", cacheConfig.getBlockCache().getStats().getMissCount()) %> - Block requests that were cache misses but set to cache missed blocks - - - Misses Caching - <% String.format("%,d", cacheConfig.getBlockCache().getStats().getMissCount()) %> - Block requests that were cache misses but only requests set to use block cache - - - Hit Ratio - <% String.format("%,.2f", cacheConfig.getBlockCache().getStats().getHitRatio() * 100) %><% "%" %> - Hit Count divided by total requests count - + <& hits_tmpl; bc = cacheConfig.getBlockCache(); &>

      If block cache is made up of more than one cache -- i.e. a L1 and a L2 -- then the above are combined counts. Request count is sum of hits and misses.

      @@ -349,7 +357,9 @@ are combined counts. Request count is sum of hits and misses.

      Size of DATA Blocks -<%if evictions %><& evictions_tmpl; bc = bc; &> +<& evictions_tmpl; bc = bc; &> +<& hits_tmpl; bc = bc; &> + <%if bucketCache %> Hits per Second diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon index 6ca8ec6dd72..9d219f585d4 100644 --- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon +++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon @@ -26,7 +26,7 @@ org.apache.hadoop.hbase.regionserver.HRegionServer; org.apache.hadoop.hbase.util.Bytes; org.apache.hadoop.hbase.HRegionInfo; - org.apache.hadoop.hbase.regionserver.HRegion; + org.apache.hadoop.hbase.regionserver.Region; org.apache.hadoop.hbase.ServerName; org.apache.hadoop.hbase.HBaseConfiguration; org.apache.hadoop.hbase.protobuf.ProtobufUtil; @@ -254,7 +254,7 @@ <%for HRegionInfo r: onlineRegions %> <%java> - HRegion region = regionServer.getFromOnlineRegions(r.getEncodedName()); + Region region = regionServer.getFromOnlineRegions(r.getEncodedName()); MetricsRegionWrapper mWrap = region == null ? null: region.getMetrics().getRegionWrapper(); diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon index e4e7e897e01..a86a4ea1cf4 100644 --- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon +++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon @@ -76,12 +76,14 @@ java.lang.management.ManagementFactory; Requests Per Second Num. Regions Block locality + Block locality (Secondary replicas) Slow WAL Append Count <% String.format("%.0f", mWrap.getRequestsPerSecond()) %> <% mWrap.getNumOnlineRegions() %> <% mWrap.getPercentFileLocal() %> + <% mWrap.getPercentFileLocalSecondaryRegions() %> <% 0 %> diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java index eacba6f040b..1263318e789 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java @@ -168,12 +168,13 @@ public class LocalHBaseCluster { return addRegionServer(new Configuration(conf), this.regionThreads.size()); } + @SuppressWarnings("unchecked") public JVMClusterUtil.RegionServerThread addRegionServer( Configuration config, final int index) throws IOException { // Create each regionserver with its own Configuration instance so each has // its HConnection instance rather than share (see HBASE_INSTANCES down in - // the guts of HConnectionManager. + // the guts of ConnectionManager). // Also, create separate CoordinatedStateManager instance per Server. // This is special case when we have to have more than 1 CoordinatedStateManager @@ -181,8 +182,9 @@ public class LocalHBaseCluster { CoordinatedStateManager cp = CoordinatedStateManagerFactory.getCoordinatedStateManager(conf); JVMClusterUtil.RegionServerThread rst = - JVMClusterUtil.createRegionServerThread(config, cp, - this.regionServerClass, index); + JVMClusterUtil.createRegionServerThread(config, cp, (Class) conf + .getClass(HConstants.REGION_SERVER_IMPL, this.regionServerClass), index); + this.regionThreads.add(rst); return rst; } @@ -206,7 +208,7 @@ public class LocalHBaseCluster { throws IOException { // Create each master with its own Configuration instance so each has // its HConnection instance rather than share (see HBASE_INSTANCES down in - // the guts of HConnectionManager. + // the guts of ConnectionManager. // Also, create separate CoordinatedStateManager instance per Server. // This is special case when we have to have more than 1 CoordinatedStateManager @@ -261,6 +263,13 @@ public class LocalHBaseCluster { return liveServers; } + /** + * @return the Configuration used by this LocalHBaseCluster + */ + public Configuration getConfiguration() { + return this.conf; + } + /** * Wait for the specified region server to stop * Removes this thread from list of running threads. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java index 85f847114df..365c0b845cb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; @@ -28,7 +29,8 @@ import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; * Defines the set of shared functions implemented by HBase servers (Masters * and RegionServers). */ -@InterfaceAudience.Private +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) +@InterfaceStability.Evolving public interface Server extends Abortable, Stoppable { /** * Gets the configuration object for this server. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java index 2bab21b7acb..5809983385f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java @@ -22,7 +22,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -30,8 +29,10 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.metrics.ScanMetrics; import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.regionserver.NoLimitScannerContext; import org.apache.hadoop.hbase.regionserver.RegionScanner; import org.mortbay.log.Log; @@ -72,7 +73,7 @@ public class ClientSideRegionScanner extends AbstractClientScanner { public Result next() throws IOException { values.clear(); - scanner.nextRaw(values, -1); // pass -1 as limit so that we see the whole row. + scanner.nextRaw(values, NoLimitScannerContext.getInstance()); if (values.isEmpty()) { //we are done return null; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/CoprocessorHConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/CoprocessorHConnection.java index 1db90fb48ef..4ed8add9c85 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/CoprocessorHConnection.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/CoprocessorHConnection.java @@ -24,7 +24,6 @@ import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.client.ConnectionManager.HConnectionImplementation; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.RegionServerServices; @@ -40,15 +39,15 @@ import org.apache.hadoop.hbase.security.UserProvider; */ @InterfaceAudience.Private @InterfaceStability.Evolving -public class CoprocessorHConnection extends HConnectionImplementation { - private static final NonceGenerator NO_NONCE_GEN = new ConnectionManager.NoNonceGenerator(); +public class CoprocessorHConnection extends ConnectionImplementation { + private static final NonceGenerator NO_NONCE_GEN = new NoNonceGenerator(); /** - * Create an unmanaged {@link HConnection} based on the environment in which we are running the + * Create an {@link HConnection} based on the environment in which we are running the * coprocessor. The {@link HConnection} must be externally cleaned up (we bypass the usual HTable * cleanup mechanisms since we own everything). * @param env environment hosting the {@link HConnection} - * @return an unmanaged {@link HConnection}. + * @return instance of {@link HConnection}. * @throws IOException if we cannot create the connection */ public static ClusterConnection getConnectionForEnvironment(CoprocessorEnvironment env) @@ -61,7 +60,7 @@ public class CoprocessorHConnection extends HConnectionImplementation { return new CoprocessorHConnection((HRegionServer) services); } } - return ConnectionManager.createConnectionInternal(env.getConfiguration()); + return (ClusterConnection) ConnectionFactory.createConnection(env.getConfiguration()); } private final ServerName serverName; @@ -96,7 +95,7 @@ public class CoprocessorHConnection extends HConnectionImplementation { * @throws IOException if we cannot create the connection */ public CoprocessorHConnection(Configuration conf, HRegionServer server) throws IOException { - super(conf, false, null, UserProvider.instantiate(conf).getCurrent()); + super(conf, null, UserProvider.instantiate(conf).getCurrent()); this.server = server; this.serverName = server.getServerName(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java index c16b4c3be73..e3641c7d80a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/HTableWrapper.java @@ -58,23 +58,23 @@ import com.google.protobuf.ServiceException; */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @InterfaceStability.Stable -public final class HTableWrapper implements HTableInterface { +public final class HTableWrapper implements Table { - private final HTableInterface table; + private final Table table; private ClusterConnection connection; - private final List openTables; + private final List openTables; /** * @param openTables External list of tables used for tracking wrappers. * @throws IOException */ - public static HTableInterface createWrapper(List openTables, + public static Table createWrapper(List
      openTables, TableName tableName, Environment env, ExecutorService pool) throws IOException { return new HTableWrapper(openTables, tableName, CoprocessorHConnection.getConnectionForEnvironment(env), pool); } - private HTableWrapper(List openTables, TableName tableName, + private HTableWrapper(List
      openTables, TableName tableName, ClusterConnection connection, ExecutorService pool) throws IOException { this.table = connection.getTable(tableName, pool); @@ -116,8 +116,7 @@ public final class HTableWrapper implements HTableInterface { } /** - * @deprecated in 0.99 since setting clearBufferOnFail is deprecated. Use - * {@link #setAutoFlushTo(boolean)}} instead. + * @deprecated in 0.99 since setting clearBufferOnFail is deprecated. */ @Deprecated public Result getRowOrBefore(byte[] row, byte[] family) @@ -215,14 +214,6 @@ public final class HTableWrapper implements HTableInterface { return table.increment(increment); } - public void flushCommits() throws IOException { - table.flushCommits(); - } - - public boolean isAutoFlush() { - return table.isAutoFlush(); - } - public ResultScanner getScanner(Scan scan) throws IOException { return table.getScanner(scan); } @@ -240,11 +231,6 @@ public final class HTableWrapper implements HTableInterface { return table.getTableDescriptor(); } - @Override - public byte[] getTableName() { - return table.getTableName(); - } - @Override public TableName getName() { return table.getName(); @@ -316,30 +302,6 @@ public final class HTableWrapper implements HTableInterface { table.mutateRow(rm); } - @Override - public void setAutoFlush(boolean autoFlush) { - table.setAutoFlush(autoFlush); - } - - @Override - public void setAutoFlush(boolean autoFlush, boolean clearBufferOnFail) { - setAutoFlush(autoFlush); - if (!autoFlush && !clearBufferOnFail) { - // We don't support his combination. In HTable, the implementation is this: - // - // this.clearBufferOnFail = autoFlush || clearBufferOnFail - // - // So if autoFlush == false and clearBufferOnFail is false, that is not supported in - // the new Table Interface so just throwing UnsupportedOperationException here. - throw new UnsupportedOperationException("Can't do this via wrapper"); - } - } - - @Override - public void setAutoFlushTo(boolean autoFlush) { - table.setAutoFlushTo(autoFlush); - } - @Override public long getWriteBufferSize() { return table.getWriteBufferSize(); @@ -350,13 +312,6 @@ public final class HTableWrapper implements HTableInterface { table.setWriteBufferSize(writeBufferSize); } - @Override - public long incrementColumnValue(byte[] row, byte[] family, - byte[] qualifier, long amount, boolean writeToWAL) throws IOException { - return table.incrementColumnValue(row, family, qualifier, amount, - writeToWAL? Durability.USE_DEFAULT: Durability.SKIP_WAL); - } - @Override public Map batchCoprocessorService( MethodDescriptor methodDescriptor, Message request, byte[] startKey, byte[] endKey, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java index baf2aa6112e..a371e3eccaf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java @@ -26,14 +26,14 @@ import java.util.UUID; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest; import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogWorkerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogWorkerCoordination.java index 707850d4f87..82f00b36c3c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogWorkerCoordination.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogWorkerCoordination.java @@ -20,12 +20,12 @@ package org.apache.hadoop.hbase.coordination; import java.io.IOException; import java.util.concurrent.atomic.AtomicLong; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.SplitLogTask; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds; import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.hadoop.hbase.regionserver.SplitLogWorker; import org.apache.hadoop.hbase.regionserver.SplitLogWorker.TaskExecutor; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java index 694ccff980c..0abbd2f0c5f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java @@ -27,13 +27,14 @@ import static org.apache.hadoop.hbase.master.SplitLogManager.TerminationStatus.S import java.io.IOException; import java.io.InterruptedIOException; +import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.Set; import java.util.concurrent.ConcurrentMap; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.CoordinatedStateManager; @@ -43,17 +44,17 @@ import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.SplitLogCounters; import org.apache.hadoop.hbase.SplitLogTask; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.coordination.ZKSplitLogManagerCoordination.TaskFinisher.Status; import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.SplitLogManager.ResubmitDirective; import org.apache.hadoop.hbase.master.SplitLogManager.Task; import org.apache.hadoop.hbase.master.SplitLogManager.TerminationStatus; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.wal.DefaultWALProvider; import org.apache.hadoop.hbase.wal.WALSplitter; -import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.zookeeper.ZKSplitLog; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener; @@ -150,7 +151,7 @@ public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements if (tasks != null) { int listSize = tasks.size(); for (int i = 0; i < listSize; i++) { - if (!ZKSplitLog.isRescanNode(watcher, tasks.get(i))) { + if (!ZKSplitLog.isRescanNode(tasks.get(i))) { count++; } } @@ -302,7 +303,7 @@ public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements if (tasks != null) { int listSize = tasks.size(); for (int i = 0; i < listSize; i++) { - if (!ZKSplitLog.isRescanNode(watcher, tasks.get(i))) { + if (!ZKSplitLog.isRescanNode(tasks.get(i))) { count++; } } @@ -619,7 +620,7 @@ public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements try { long lastSequenceId = this.details.getMaster().getServerManager() - .getLastFlushedSequenceId(regionEncodeName.getBytes()); + .getLastFlushedSequenceId(regionEncodeName.getBytes()).getLastFlushedSequenceId(); /* * znode layout: .../region_id[last known flushed sequence id]/failed server[last known @@ -763,6 +764,21 @@ public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements return this.recoveryMode == RecoveryMode.LOG_SPLITTING; } + private List listSplitLogTasks() throws KeeperException { + List taskOrRescanList = ZKUtil.listChildrenNoWatch(watcher, watcher.splitLogZNode); + if (taskOrRescanList == null || taskOrRescanList.isEmpty()) { + return Collections. emptyList(); + } + List taskList = new ArrayList(); + for (String taskOrRescan : taskOrRescanList) { + // Remove rescan nodes + if (!ZKSplitLog.isRescanNode(taskOrRescan)) { + taskList.add(taskOrRescan); + } + } + return taskList; + } + /** * This function is to set recovery mode from outstanding split log tasks from before or current * configuration setting @@ -801,8 +817,8 @@ public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements } if (previousRecoveryMode == RecoveryMode.UNKNOWN) { // Secondly check if there are outstanding split log task - List tasks = ZKUtil.listChildrenNoWatch(watcher, watcher.splitLogZNode); - if (tasks != null && !tasks.isEmpty()) { + List tasks = listSplitLogTasks(); + if (!tasks.isEmpty()) { hasSplitLogTask = true; if (isForInitialization) { // during initialization, try to get recovery mode from splitlogtask diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java index 9ea6bd70c1d..637920bd6cc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java @@ -30,7 +30,6 @@ import org.apache.commons.lang.math.RandomUtils; import org.apache.commons.lang.mutable.MutableInt; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -38,17 +37,19 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.SplitLogCounters; import org.apache.hadoop.hbase.SplitLogTask; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds; +import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode; -import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.hadoop.hbase.regionserver.SplitLogWorker; import org.apache.hadoop.hbase.regionserver.SplitLogWorker.TaskExecutor; +import org.apache.hadoop.hbase.regionserver.handler.FinishRegionRecoveringHandler; import org.apache.hadoop.hbase.regionserver.handler.WALSplitterHandler; -import org.apache.hadoop.hbase.wal.DefaultWALProvider; import org.apache.hadoop.hbase.util.CancelableProgressable; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.wal.DefaultWALProvider; import org.apache.hadoop.hbase.zookeeper.ZKSplitLog; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener; @@ -444,7 +445,7 @@ public class ZkSplitLogWorkerCoordination extends ZooKeeperListener implements taskReadyLock.wait(checkInterval); if (server != null) { // check to see if we have stale recovering regions in our internal memory state - Map recoveringRegions = server.getRecoveringRegions(); + Map recoveringRegions = server.getRecoveringRegions(); if (!recoveringRegions.isEmpty()) { // Make a local copy to prevent ConcurrentModificationException when other threads // modify recoveringRegions @@ -455,11 +456,8 @@ public class ZkSplitLogWorkerCoordination extends ZooKeeperListener implements String nodePath = ZKUtil.joinZNode(watcher.recoveringRegionsZNode, region); try { if (ZKUtil.checkExists(watcher, nodePath) == -1) { - HRegion r = recoveringRegions.remove(region); - if (r != null) { - r.setRecovering(false); - } - LOG.debug("Mark recovering region:" + region + " up."); + server.getExecutorService().submit( + new FinishRegionRecoveringHandler(server, region, nodePath)); } else { // current check is a defensive(or redundant) mechanism to prevent us from // having stale recovering regions in our internal RS memory state while @@ -583,7 +581,7 @@ public class ZkSplitLogWorkerCoordination extends ZooKeeperListener implements * Next part is related to WALSplitterHandler */ /** - * endTask() can fail and the only way to recover out of it is for the + * endTask() can fail and the only way to recover out of it is for the * {@link org.apache.hadoop.hbase.master.SplitLogManager} to timeout the task node. * @param slt * @param ctr diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/AggregateImplementation.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/AggregateImplementation.java index 1db5671fcfa..81c933bbd01 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/AggregateImplementation.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/AggregateImplementation.java @@ -26,10 +26,10 @@ import java.util.NavigableSet; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.CoprocessorEnvironment; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; @@ -114,7 +114,7 @@ extends AggregateService implements CoprocessorService, Coprocessor { } } log.info("Maximum from this region is " - + env.getRegion().getRegionNameAsString() + ": " + max); + + env.getRegion().getRegionInfo().getRegionNameAsString() + ": " + max); done.run(response); } @@ -167,7 +167,7 @@ extends AggregateService implements CoprocessorService, Coprocessor { } } log.info("Minimum from this region is " - + env.getRegion().getRegionNameAsString() + ": " + min); + + env.getRegion().getRegionInfo().getRegionNameAsString() + ": " + min); done.run(response); } @@ -222,7 +222,7 @@ extends AggregateService implements CoprocessorService, Coprocessor { } } log.debug("Sum from this region is " - + env.getRegion().getRegionNameAsString() + ": " + sum); + + env.getRegion().getRegionInfo().getRegionNameAsString() + ": " + sum); done.run(response); } @@ -273,7 +273,7 @@ extends AggregateService implements CoprocessorService, Coprocessor { } } log.info("Row counter from this region is " - + env.getRegion().getRegionNameAsString() + ": " + counter); + + env.getRegion().getRegionInfo().getRegionNameAsString() + ": " + counter); done.run(response); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java index 215ff16696b..9e0cb9b63d8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java @@ -42,11 +42,11 @@ import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; import org.apache.hadoop.hbase.io.Reference; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.regionserver.DeleteTracker; -import org.apache.hadoop.hbase.regionserver.HRegion; -import org.apache.hadoop.hbase.regionserver.HRegion.Operation; import org.apache.hadoop.hbase.regionserver.InternalScanner; import org.apache.hadoop.hbase.regionserver.KeyValueScanner; import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress; +import org.apache.hadoop.hbase.regionserver.Region; +import org.apache.hadoop.hbase.regionserver.Region.Operation; import org.apache.hadoop.hbase.regionserver.RegionScanner; import org.apache.hadoop.hbase.regionserver.ScanType; import org.apache.hadoop.hbase.regionserver.Store; @@ -152,7 +152,7 @@ public abstract class BaseRegionObserver implements RegionObserver { } @Override - public void postSplit(ObserverContext e, HRegion l, HRegion r) + public void postSplit(ObserverContext e, Region l, Region r) throws IOException { } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionServerObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionServerObserver.java index 1f34f884e67..9fc130f66d0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionServerObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionServerObserver.java @@ -25,7 +25,7 @@ import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry; -import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.replication.ReplicationEndpoint; /** @@ -48,28 +48,28 @@ public class BaseRegionServerObserver implements RegionServerObserver { public void stop(CoprocessorEnvironment env) throws IOException { } @Override - public void preMerge(ObserverContext ctx, HRegion regionA, - HRegion regionB) throws IOException { } + public void preMerge(ObserverContext ctx, Region regionA, + Region regionB) throws IOException { } @Override - public void postMerge(ObserverContext c, HRegion regionA, - HRegion regionB, HRegion mergedRegion) throws IOException { } + public void postMerge(ObserverContext c, Region regionA, + Region regionB, Region mergedRegion) throws IOException { } @Override public void preMergeCommit(ObserverContext ctx, - HRegion regionA, HRegion regionB, List metaEntries) throws IOException { } + Region regionA, Region regionB, List metaEntries) throws IOException { } @Override public void postMergeCommit(ObserverContext ctx, - HRegion regionA, HRegion regionB, HRegion mergedRegion) throws IOException { } + Region regionA, Region regionB, Region mergedRegion) throws IOException { } @Override public void preRollBackMerge(ObserverContext ctx, - HRegion regionA, HRegion regionB) throws IOException { } + Region regionA, Region regionB) throws IOException { } @Override public void postRollBackMerge(ObserverContext ctx, - HRegion regionA, HRegion regionB) throws IOException { } + Region regionA, Region regionB) throws IOException { } @Override public void preRollWALWriterRequest(ObserverContext ctx) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRowProcessorEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRowProcessorEndpoint.java index 7b841aab74a..ab5fc78c891 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRowProcessorEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRowProcessorEndpoint.java @@ -31,7 +31,7 @@ import org.apache.hadoop.hbase.protobuf.ResponseConverter; import org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.ProcessRequest; import org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.ProcessResponse; import org.apache.hadoop.hbase.protobuf.generated.RowProcessorProtos.RowProcessorService; -import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.RowProcessor; import com.google.protobuf.ByteString; @@ -42,7 +42,7 @@ import com.google.protobuf.Service; /** * This class demonstrates how to implement atomic read-modify-writes - * using {@link HRegion#processRowsWithLocks} and Coprocessor endpoints. + * using {@link Region#processRowsWithLocks} and Coprocessor endpoints. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @InterfaceStability.Evolving @@ -50,7 +50,7 @@ public abstract class BaseRowProcessorEndpoint processor = constructRowProcessorFromRequest(request); - HRegion region = env.getRegion(); + Region region = env.getRegion(); long nonceGroup = request.hasNonceGroup() ? request.getNonceGroup() : HConstants.NO_NONCE; long nonce = request.hasNonce() ? request.getNonce() : HConstants.NO_NONCE; region.processRowsWithLocks(processor, nonceGroup, nonce); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java index eeb941ab268..b047d339756 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java @@ -46,8 +46,8 @@ import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.HTable; -import org.apache.hadoop.hbase.client.HTableInterface; import org.apache.hadoop.hbase.client.HTableWrapper; +import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.util.CoprocessorClassLoader; import org.apache.hadoop.hbase.util.SortedCopyOnWriteSet; import org.apache.hadoop.hbase.util.VersionInfo; @@ -73,6 +73,11 @@ public abstract class CoprocessorHost { "hbase.coprocessor.wal.classes"; public static final String ABORT_ON_ERROR_KEY = "hbase.coprocessor.abortonerror"; public static final boolean DEFAULT_ABORT_ON_ERROR = true; + public static final String COPROCESSORS_ENABLED_CONF_KEY = "hbase.coprocessor.enabled"; + public static final boolean DEFAULT_COPROCESSORS_ENABLED = true; + public static final String USER_COPROCESSORS_ENABLED_CONF_KEY = + "hbase.coprocessor.user.enabled"; + public static final boolean DEFAULT_USER_COPROCESSORS_ENABLED = true; private static final Log LOG = LogFactory.getLog(CoprocessorHost.class); protected Abortable abortable; @@ -123,6 +128,12 @@ public abstract class CoprocessorHost { * Called by constructor. */ protected void loadSystemCoprocessors(Configuration conf, String confKey) { + boolean coprocessorsEnabled = conf.getBoolean(COPROCESSORS_ENABLED_CONF_KEY, + DEFAULT_COPROCESSORS_ENABLED); + if (!coprocessorsEnabled) { + return; + } + Class implClass = null; // load default coprocessors from configure file @@ -366,8 +377,8 @@ public abstract class CoprocessorHost { /** Current coprocessor state */ Coprocessor.State state = Coprocessor.State.UNINSTALLED; /** Accounting for tables opened by the coprocessor */ - protected List openTables = - Collections.synchronizedList(new ArrayList()); + protected List
      openTables = + Collections.synchronizedList(new ArrayList
      ()); private int seq; private Configuration conf; private ClassLoader classLoader; @@ -427,7 +438,7 @@ public abstract class CoprocessorHost { " because not active (state="+state.toString()+")"); } // clean up any table references - for (HTableInterface table: openTables) { + for (Table table: openTables) { try { ((HTableWrapper)table).internalClose(); } catch (IOException e) { @@ -482,7 +493,7 @@ public abstract class CoprocessorHost { * @exception java.io.IOException Exception */ @Override - public HTableInterface getTable(TableName tableName) throws IOException { + public Table getTable(TableName tableName) throws IOException { return this.getTable(tableName, HTable.getDefaultExecutor(getConfiguration())); } @@ -493,7 +504,7 @@ public abstract class CoprocessorHost { * @exception java.io.IOException Exception */ @Override - public HTableInterface getTable(TableName tableName, ExecutorService pool) throws IOException { + public Table getTable(TableName tableName, ExecutorService pool) throws IOException { return HTableWrapper.createWrapper(openTables, tableName, this, pool); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionCoprocessorEnvironment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionCoprocessorEnvironment.java index ccb16bf0148..a577748fc69 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionCoprocessorEnvironment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionCoprocessorEnvironment.java @@ -26,14 +26,14 @@ import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.RegionServerServices; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @InterfaceStability.Evolving public interface RegionCoprocessorEnvironment extends CoprocessorEnvironment { /** @return the region associated with this coprocessor */ - HRegion getRegion(); + Region getRegion(); /** @return region information for the region this coprocessor is running on */ HRegionInfo getRegionInfo(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java index 9fede5258a5..7ee5a99451c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java @@ -42,8 +42,8 @@ import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; import org.apache.hadoop.hbase.io.Reference; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.regionserver.DeleteTracker; -import org.apache.hadoop.hbase.regionserver.HRegion; -import org.apache.hadoop.hbase.regionserver.HRegion.Operation; +import org.apache.hadoop.hbase.regionserver.Region; +import org.apache.hadoop.hbase.regionserver.Region.Operation; import org.apache.hadoop.hbase.regionserver.InternalScanner; import org.apache.hadoop.hbase.regionserver.KeyValueScanner; import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress; @@ -368,8 +368,8 @@ public interface RegionObserver extends Coprocessor { * @deprecated Use postCompleteSplit() instead */ @Deprecated - void postSplit(final ObserverContext c, final HRegion l, - final HRegion r) throws IOException; + void postSplit(final ObserverContext c, final Region l, + final Region r) throws IOException; /** * This will be called before PONR step as part of split transaction. Calling @@ -617,7 +617,7 @@ public interface RegionObserver extends Coprocessor { * called after acquiring the locks on the mutating rows and after applying the proper timestamp * for each Mutation at the server. The batch may contain Put/Delete. By setting OperationStatus * of Mutations ({@link MiniBatchOperationInProgress#setOperationStatus(int, OperationStatus)}), - * {@link RegionObserver} can make HRegion to skip these Mutations. + * {@link RegionObserver} can make Region to skip these Mutations. * @param c the environment provided by the region server * @param miniBatchOp batch of Mutations getting applied to region. * @throws IOException if an error occurred on the coprocessor @@ -637,7 +637,7 @@ public interface RegionObserver extends Coprocessor { /** * This will be called for region operations where read lock is acquired in - * {@link HRegion#startRegionOperation()}. + * {@link Region#startRegionOperation()}. * @param ctx * @param operation The operation is about to be taken on the region * @throws IOException @@ -646,7 +646,7 @@ public interface RegionObserver extends Coprocessor { Operation operation) throws IOException; /** - * Called after releasing read lock in {@link HRegion#closeRegionOperation(Operation)}. + * Called after releasing read lock in {@link Region#closeRegionOperation()}. * @param ctx * @param operation * @throws IOException diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerObserver.java index 316bad50881..b1b94ff918e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionServerObserver.java @@ -26,7 +26,7 @@ import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.MetaMutationAnnotation; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry; -import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.replication.ReplicationEndpoint; public interface RegionServerObserver extends Coprocessor { @@ -50,7 +50,7 @@ public interface RegionServerObserver extends Coprocessor { * @throws IOException */ void preMerge(final ObserverContext ctx, - final HRegion regionA, final HRegion regionB) throws IOException; + final Region regionA, final Region regionB) throws IOException; /** * called after the regions merge. @@ -61,7 +61,7 @@ public interface RegionServerObserver extends Coprocessor { * @throws IOException */ void postMerge(final ObserverContext c, - final HRegion regionA, final HRegion regionB, final HRegion mergedRegion) throws IOException; + final Region regionA, final Region regionB, final Region mergedRegion) throws IOException; /** * This will be called before PONR step as part of regions merge transaction. Calling @@ -74,7 +74,7 @@ public interface RegionServerObserver extends Coprocessor { * @throws IOException */ void preMergeCommit(final ObserverContext ctx, - final HRegion regionA, final HRegion regionB, + final Region regionA, final Region regionB, @MetaMutationAnnotation List metaEntries) throws IOException; /** @@ -86,7 +86,7 @@ public interface RegionServerObserver extends Coprocessor { * @throws IOException */ void postMergeCommit(final ObserverContext ctx, - final HRegion regionA, final HRegion regionB, final HRegion mergedRegion) throws IOException; + final Region regionA, final Region regionB, final Region mergedRegion) throws IOException; /** * This will be called before the roll back of the regions merge. @@ -96,7 +96,7 @@ public interface RegionServerObserver extends Coprocessor { * @throws IOException */ void preRollBackMerge(final ObserverContext ctx, - final HRegion regionA, final HRegion regionB) throws IOException; + final Region regionA, final Region regionB) throws IOException; /** * This will be called after the roll back of the regions merge. @@ -106,7 +106,7 @@ public interface RegionServerObserver extends Coprocessor { * @throws IOException */ void postRollBackMerge(final ObserverContext ctx, - final HRegion regionA, final HRegion regionB) throws IOException; + final Region regionA, final Region regionB) throws IOException; /** * This will be called before executing user request to roll a region server WAL. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FileLink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FileLink.java index 7d9692058df..1c5a593b00a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FileLink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FileLink.java @@ -19,8 +19,8 @@ package org.apache.hadoop.hbase.io; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; - import java.io.IOException; import java.io.InputStream; import java.io.FileNotFoundException; @@ -336,6 +336,7 @@ public class FileLink { return locations; } + @Override public String toString() { StringBuilder str = new StringBuilder(getClass().getName()); str.append(" locations=["); @@ -472,5 +473,25 @@ public class FileLink { if (dirPath == null) return false; return dirPath.getName().startsWith(BACK_REFERENCES_DIRECTORY_PREFIX); } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + // Assumes that the ordering of locations between objects are the same. This is true for the + // current subclasses already (HFileLink, WALLink). Otherwise, we may have to sort the locations + // or keep them presorted + if (this.getClass().equals(obj.getClass())) { + return Arrays.equals(this.locations, ((FileLink) obj).locations); + } + + return false; + } + + @Override + public int hashCode() { + return Arrays.hashCode(locations); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java index 05c996f5052..58502bb6323 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java @@ -173,17 +173,6 @@ public class HalfStoreFileReader extends StoreFile.Reader { return true; } - @Override - public boolean seekBefore(byte[] key) throws IOException { - return seekBefore(key, 0, key.length); - } - - @Override - public boolean seekBefore(byte [] key, int offset, int length) - throws IOException { - return seekBefore(new KeyValue.KeyOnlyKeyValue(key, offset, length)); - } - @Override public boolean seekTo() throws IOException { if (top) { @@ -212,29 +201,6 @@ public class HalfStoreFileReader extends StoreFile.Reader { splitkey, 0, splitkey.length) < 0; } - @Override - public int seekTo(byte[] key) throws IOException { - return seekTo(key, 0, key.length); - } - - @Override - public int seekTo(byte[] key, int offset, int length) throws IOException { - return seekTo(new KeyValue.KeyOnlyKeyValue(key, offset, length)); - } - - @Override - public int reseekTo(byte[] key) throws IOException { - return reseekTo(key, 0, key.length); - } - - @Override - public int reseekTo(byte[] key, int offset, int length) - throws IOException { - //This function is identical to the corresponding seekTo function except - //that we call reseekTo (and not seekTo) on the delegate. - return reseekTo(new KeyValue.KeyOnlyKeyValue(key, offset, length)); - } - public org.apache.hadoop.hbase.io.hfile.HFile.Reader getReader() { return this.delegate.getReader(); } @@ -317,6 +283,11 @@ public class HalfStoreFileReader extends StoreFile.Reader { } return ret; } + + @Override + public Cell getNextIndexedKey() { + return null; + } }; } @@ -333,7 +304,7 @@ public class HalfStoreFileReader extends StoreFile.Reader { // Get a scanner that caches the block and that uses pread. HFileScanner scanner = getScanner(true, true); try { - if (scanner.seekBefore(this.splitkey)) { + if (scanner.seekBefore(this.splitCell)) { return Bytes.toBytes(scanner.getKey()); } } catch (IOException e) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileReader.java deleted file mode 100644 index 8c1e7b95651..00000000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileReader.java +++ /dev/null @@ -1,352 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.io.hfile; - -import java.io.IOException; -import java.nio.ByteBuffer; - -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.conf.Configurable; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FSDataInputStream; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValue.KVComparator; -import org.apache.hadoop.hbase.fs.HFileSystem; -import org.apache.hadoop.hbase.io.compress.Compression; -import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; -import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo; - -/** - * Common functionality needed by all versions of {@link HFile} readers. - */ -@InterfaceAudience.Private -@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="URF_UNREAD_PUBLIC_OR_PROTECTED_FIELD") -public abstract class AbstractHFileReader - implements HFile.Reader, Configurable { - /** Stream to read from. Does checksum verifications in file system */ - protected FSDataInputStream istream; // UUF_UNUSED_PUBLIC_OR_PROTECTED_FIELD - - /** The file system stream of the underlying {@link HFile} that - * does not do checksum verification in the file system */ - protected FSDataInputStream istreamNoFsChecksum; // UUF_UNUSED_PUBLIC_OR_PROTECTED_FIELD - - /** Data block index reader keeping the root data index in memory */ - protected HFileBlockIndex.BlockIndexReader dataBlockIndexReader; - - /** Meta block index reader -- always single level */ - protected HFileBlockIndex.BlockIndexReader metaBlockIndexReader; - - protected final FixedFileTrailer trailer; - - /** Filled when we read in the trailer. */ - protected final Compression.Algorithm compressAlgo; - - /** - * What kind of data block encoding should be used while reading, writing, - * and handling cache. - */ - protected HFileDataBlockEncoder dataBlockEncoder = - NoOpDataBlockEncoder.INSTANCE; - - /** Last key in the file. Filled in when we read in the file info */ - protected byte [] lastKey = null; - - /** Average key length read from file info */ - protected int avgKeyLen = -1; - - /** Average value length read from file info */ - protected int avgValueLen = -1; - - /** Key comparator */ - protected KVComparator comparator = new KVComparator(); - - /** Size of this file. */ - protected final long fileSize; - - /** Block cache configuration. */ - protected final CacheConfig cacheConf; - - /** Path of file */ - protected final Path path; - - /** File name to be used for block names */ - protected final String name; - - protected FileInfo fileInfo; - - /** The filesystem used for accesing data */ - protected HFileSystem hfs; - - protected Configuration conf; - - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="URF_UNREAD_PUBLIC_OR_PROTECTED_FIELD") - protected AbstractHFileReader(Path path, FixedFileTrailer trailer, - final long fileSize, final CacheConfig cacheConf, final HFileSystem hfs, - final Configuration conf) { - this.trailer = trailer; - this.compressAlgo = trailer.getCompressionCodec(); - this.cacheConf = cacheConf; - this.fileSize = fileSize; - this.path = path; - this.name = path.getName(); - this.hfs = hfs; // URF_UNREAD_PUBLIC_OR_PROTECTED_FIELD - this.conf = conf; - } - - @SuppressWarnings("serial") - public static class BlockIndexNotLoadedException - extends IllegalStateException { - public BlockIndexNotLoadedException() { - // Add a message in case anyone relies on it as opposed to class name. - super("Block index not loaded"); - } - } - - protected String toStringFirstKey() { - return KeyValue.keyToString(getFirstKey()); - } - - protected String toStringLastKey() { - return KeyValue.keyToString(getLastKey()); - } - - public abstract boolean isFileInfoLoaded(); - - @Override - public String toString() { - return "reader=" + path.toString() + - (!isFileInfoLoaded()? "": - ", compression=" + compressAlgo.getName() + - ", cacheConf=" + cacheConf + - ", firstKey=" + toStringFirstKey() + - ", lastKey=" + toStringLastKey()) + - ", avgKeyLen=" + avgKeyLen + - ", avgValueLen=" + avgValueLen + - ", entries=" + trailer.getEntryCount() + - ", length=" + fileSize; - } - - @Override - public long length() { - return fileSize; - } - - /** - * Create a Scanner on this file. No seeks or reads are done on creation. Call - * {@link HFileScanner#seekTo(byte[])} to position an start the read. There is - * nothing to clean up in a Scanner. Letting go of your references to the - * scanner is sufficient. NOTE: Do not use this overload of getScanner for - * compactions. - * - * @param cacheBlocks True if we should cache blocks read in by this scanner. - * @param pread Use positional read rather than seek+read if true (pread is - * better for random reads, seek+read is better scanning). - * @return Scanner on this file. - */ - @Override - public HFileScanner getScanner(boolean cacheBlocks, final boolean pread) { - return getScanner(cacheBlocks, pread, false); - } - - /** - * @return the first key in the file. May be null if file has no entries. Note - * that this is not the first row key, but rather the byte form of the - * first KeyValue. - */ - @Override - public byte [] getFirstKey() { - if (dataBlockIndexReader == null) { - throw new BlockIndexNotLoadedException(); - } - return dataBlockIndexReader.isEmpty() ? null - : dataBlockIndexReader.getRootBlockKey(0); - } - - /** - * TODO left from {@link HFile} version 1: move this to StoreFile after Ryan's - * patch goes in to eliminate {@link KeyValue} here. - * - * @return the first row key, or null if the file is empty. - */ - @Override - public byte[] getFirstRowKey() { - byte[] firstKey = getFirstKey(); - if (firstKey == null) - return null; - return KeyValue.createKeyValueFromKey(firstKey).getRow(); - } - - /** - * TODO left from {@link HFile} version 1: move this to StoreFile after - * Ryan's patch goes in to eliminate {@link KeyValue} here. - * - * @return the last row key, or null if the file is empty. - */ - @Override - public byte[] getLastRowKey() { - byte[] lastKey = getLastKey(); - if (lastKey == null) - return null; - return KeyValue.createKeyValueFromKey(lastKey).getRow(); - } - - /** @return number of KV entries in this HFile */ - @Override - public long getEntries() { - return trailer.getEntryCount(); - } - - /** @return comparator */ - @Override - public KVComparator getComparator() { - return comparator; - } - - /** @return compression algorithm */ - @Override - public Compression.Algorithm getCompressionAlgorithm() { - return compressAlgo; - } - - /** - * @return the total heap size of data and meta block indexes in bytes. Does - * not take into account non-root blocks of a multilevel data index. - */ - public long indexSize() { - return (dataBlockIndexReader != null ? dataBlockIndexReader.heapSize() : 0) - + ((metaBlockIndexReader != null) ? metaBlockIndexReader.heapSize() - : 0); - } - - @Override - public String getName() { - return name; - } - - @Override - public HFileBlockIndex.BlockIndexReader getDataBlockIndexReader() { - return dataBlockIndexReader; - } - - @Override - public FixedFileTrailer getTrailer() { - return trailer; - } - - @Override - public FileInfo loadFileInfo() throws IOException { - return fileInfo; - } - - /** - * An exception thrown when an operation requiring a scanner to be seeked - * is invoked on a scanner that is not seeked. - */ - @SuppressWarnings("serial") - public static class NotSeekedException extends IllegalStateException { - public NotSeekedException() { - super("Not seeked to a key/value"); - } - } - - protected static abstract class Scanner implements HFileScanner { - protected ByteBuffer blockBuffer; - - protected boolean cacheBlocks; - protected final boolean pread; - protected final boolean isCompaction; - - protected int currKeyLen; - protected int currValueLen; - protected int currMemstoreTSLen; - protected long currMemstoreTS; - - protected int blockFetches; - - protected final HFile.Reader reader; - - public Scanner(final HFile.Reader reader, final boolean cacheBlocks, - final boolean pread, final boolean isCompaction) { - this.reader = reader; - this.cacheBlocks = cacheBlocks; - this.pread = pread; - this.isCompaction = isCompaction; - } - - @Override - public boolean isSeeked(){ - return blockBuffer != null; - } - - @Override - public String toString() { - return "HFileScanner for reader " + String.valueOf(getReader()); - } - - protected void assertSeeked() { - if (!isSeeked()) - throw new NotSeekedException(); - } - - @Override - public int seekTo(byte[] key) throws IOException { - return seekTo(key, 0, key.length); - } - - @Override - public boolean seekBefore(byte[] key) throws IOException { - return seekBefore(key, 0, key.length); - } - - @Override - public int reseekTo(byte[] key) throws IOException { - return reseekTo(key, 0, key.length); - } - - @Override - public HFile.Reader getReader() { - return reader; - } - } - - /** For testing */ - abstract HFileBlock.FSReader getUncachedBlockReader(); - - public Path getPath() { - return path; - } - - @Override - public DataBlockEncoding getDataBlockEncoding() { - return dataBlockEncoder.getDataBlockEncoding(); - } - - public abstract int getMajorVersion(); - - @Override - public Configuration getConf() { - return conf; - } - - @Override - public void setConf(Configuration conf) { - this.conf = conf; - } -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java deleted file mode 100644 index 52491e6b7cc..00000000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java +++ /dev/null @@ -1,266 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.io.hfile; - -import java.io.DataOutputStream; -import java.io.IOException; -import java.net.InetSocketAddress; -import java.util.ArrayList; -import java.util.List; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FSDataOutputStream; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValue.KVComparator; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.io.compress.Compression; -import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; -import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.FSUtils; -import org.apache.hadoop.io.Writable; - -/** - * Common functionality needed by all versions of {@link HFile} writers. - */ -@InterfaceAudience.Private -public abstract class AbstractHFileWriter implements HFile.Writer { - - /** The Cell previously appended. Becomes the last cell in the file.*/ - protected Cell lastCell = null; - - /** FileSystem stream to write into. */ - protected FSDataOutputStream outputStream; - - /** True if we opened the outputStream (and so will close it). */ - protected final boolean closeOutputStream; - - /** A "file info" block: a key-value map of file-wide metadata. */ - protected FileInfo fileInfo = new HFile.FileInfo(); - - /** Total # of key/value entries, i.e. how many times add() was called. */ - protected long entryCount = 0; - - /** Used for calculating the average key length. */ - protected long totalKeyLength = 0; - - /** Used for calculating the average value length. */ - protected long totalValueLength = 0; - - /** Total uncompressed bytes, maybe calculate a compression ratio later. */ - protected long totalUncompressedBytes = 0; - - /** Key comparator. Used to ensure we write in order. */ - protected final KVComparator comparator; - - /** Meta block names. */ - protected List metaNames = new ArrayList(); - - /** {@link Writable}s representing meta block data. */ - protected List metaData = new ArrayList(); - - /** - * First cell in a block. - * This reference should be short-lived since we write hfiles in a burst. - */ - protected Cell firstCellInBlock = null; - - /** May be null if we were passed a stream. */ - protected final Path path; - - - /** Cache configuration for caching data on write. */ - protected final CacheConfig cacheConf; - - /** - * Name for this object used when logging or in toString. Is either - * the result of a toString on stream or else name of passed file Path. - */ - protected final String name; - - /** - * The data block encoding which will be used. - * {@link NoOpDataBlockEncoder#INSTANCE} if there is no encoding. - */ - protected final HFileDataBlockEncoder blockEncoder; - - protected final HFileContext hFileContext; - - public AbstractHFileWriter(CacheConfig cacheConf, - FSDataOutputStream outputStream, Path path, - KVComparator comparator, HFileContext fileContext) { - this.outputStream = outputStream; - this.path = path; - this.name = path != null ? path.getName() : outputStream.toString(); - this.hFileContext = fileContext; - DataBlockEncoding encoding = hFileContext.getDataBlockEncoding(); - if (encoding != DataBlockEncoding.NONE) { - this.blockEncoder = new HFileDataBlockEncoderImpl(encoding); - } else { - this.blockEncoder = NoOpDataBlockEncoder.INSTANCE; - } - this.comparator = comparator != null ? comparator - : KeyValue.COMPARATOR; - - closeOutputStream = path != null; - this.cacheConf = cacheConf; - } - - /** - * Add last bits of metadata to file info before it is written out. - */ - protected void finishFileInfo() throws IOException { - if (lastCell != null) { - // Make a copy. The copy is stuffed into our fileinfo map. Needs a clean - // byte buffer. Won't take a tuple. - byte [] lastKey = CellUtil.getCellKeySerializedAsKeyValueKey(this.lastCell); - fileInfo.append(FileInfo.LASTKEY, lastKey, false); - } - - // Average key length. - int avgKeyLen = - entryCount == 0 ? 0 : (int) (totalKeyLength / entryCount); - fileInfo.append(FileInfo.AVG_KEY_LEN, Bytes.toBytes(avgKeyLen), false); - - // Average value length. - int avgValueLen = - entryCount == 0 ? 0 : (int) (totalValueLength / entryCount); - fileInfo.append(FileInfo.AVG_VALUE_LEN, Bytes.toBytes(avgValueLen), false); - - fileInfo.append(FileInfo.CREATE_TIME_TS, Bytes.toBytes(hFileContext.getFileCreateTime()), - false); - } - - /** - * Add to the file info. All added key/value pairs can be obtained using - * {@link HFile.Reader#loadFileInfo()}. - * - * @param k Key - * @param v Value - * @throws IOException in case the key or the value are invalid - */ - @Override - public void appendFileInfo(final byte[] k, final byte[] v) - throws IOException { - fileInfo.append(k, v, true); - } - - /** - * Sets the file info offset in the trailer, finishes up populating fields in - * the file info, and writes the file info into the given data output. The - * reason the data output is not always {@link #outputStream} is that we store - * file info as a block in version 2. - * - * @param trailer fixed file trailer - * @param out the data output to write the file info to - * @throws IOException - */ - protected final void writeFileInfo(FixedFileTrailer trailer, DataOutputStream out) - throws IOException { - trailer.setFileInfoOffset(outputStream.getPos()); - finishFileInfo(); - fileInfo.write(out); - } - - /** - * Checks that the given Cell's key does not violate the key order. - * - * @param cell Cell whose key to check. - * @return true if the key is duplicate - * @throws IOException if the key or the key order is wrong - */ - protected boolean checkKey(final Cell cell) throws IOException { - boolean isDuplicateKey = false; - - if (cell == null) { - throw new IOException("Key cannot be null or empty"); - } - if (lastCell != null) { - int keyComp = comparator.compareOnlyKeyPortion(lastCell, cell); - - if (keyComp > 0) { - throw new IOException("Added a key not lexically larger than" - + " previous. Current cell = " + cell + ", lastCell = " + lastCell); - } else if (keyComp == 0) { - isDuplicateKey = true; - } - } - return isDuplicateKey; - } - - /** Checks the given value for validity. */ - protected void checkValue(final byte[] value, final int offset, - final int length) throws IOException { - if (value == null) { - throw new IOException("Value cannot be null"); - } - } - - /** - * @return Path or null if we were passed a stream rather than a Path. - */ - @Override - public Path getPath() { - return path; - } - - @Override - public String toString() { - return "writer=" + (path != null ? path.toString() : null) + ", name=" - + name + ", compression=" + hFileContext.getCompression().getName(); - } - - /** - * Sets remaining trailer fields, writes the trailer to disk, and optionally - * closes the output stream. - */ - protected void finishClose(FixedFileTrailer trailer) throws IOException { - trailer.setMetaIndexCount(metaNames.size()); - trailer.setTotalUncompressedBytes(totalUncompressedBytes+ trailer.getTrailerSize()); - trailer.setEntryCount(entryCount); - trailer.setCompressionCodec(hFileContext.getCompression()); - - trailer.serialize(outputStream); - - if (closeOutputStream) { - outputStream.close(); - outputStream = null; - } - } - - public static Compression.Algorithm compressionByName(String algoName) { - if (algoName == null) - return HFile.DEFAULT_COMPRESSION_ALGORITHM; - return Compression.getCompressionAlgorithmByName(algoName); - } - - /** A helper method to create HFile output streams in constructors */ - protected static FSDataOutputStream createOutputStream(Configuration conf, - FileSystem fs, Path path, InetSocketAddress[] favoredNodes) throws IOException { - FsPermission perms = FSUtils.getFilePermissions(fs, conf, - HConstants.DATA_FILE_UMASK_KEY); - return FSUtils.create(fs, path, perms, favoredNodes); - } -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockWithScanInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockWithScanInfo.java index ceb05e35936..4a5bb64103c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockWithScanInfo.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockWithScanInfo.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hbase.io.hfile; +import org.apache.hadoop.hbase.Cell; + /** * BlockWithScanInfo is wrapper class for HFileBlock with other attributes. These attributes are * supposed to be much cheaper to be maintained in each caller thread than in HFileBlock itself. @@ -27,9 +29,9 @@ public class BlockWithScanInfo { * The first key in the next block following this one in the HFile. * If this key is unknown, this is reference-equal with HConstants.NO_NEXT_INDEXED_KEY */ - private final byte[] nextIndexedKey; + private final Cell nextIndexedKey; - public BlockWithScanInfo(HFileBlock hFileBlock, byte[] nextIndexedKey) { + public BlockWithScanInfo(HFileBlock hFileBlock, Cell nextIndexedKey) { this.hFileBlock = hFileBlock; this.nextIndexedKey = nextIndexedKey; } @@ -38,7 +40,7 @@ public class BlockWithScanInfo { return hFileBlock; } - public byte[] getNextIndexedKey() { + public Cell getNextIndexedKey() { return nextIndexedKey; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java index f212f14f45c..5d221c8efb8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory; import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; +import org.apache.hadoop.hbase.util.ReflectionUtils; import org.apache.hadoop.util.StringUtils; import com.google.common.annotations.VisibleForTesting; @@ -126,8 +127,25 @@ public class CacheConfig { */ public static final String BLOCKCACHE_BLOCKSIZE_KEY = "hbase.offheapcache.minblocksize"; - // Defaults + private static final String EXTERNAL_BLOCKCACHE_KEY = "hbase.blockcache.use.external"; + private static final boolean EXTERNAL_BLOCKCACHE_DEFAULT = false; + private static final String EXTERNAL_BLOCKCACHE_CLASS_KEY="hbase.blockcache.external.class"; + + /** + * Enum of all built in external block caches. + * This is used for config. + */ + private static enum ExternalBlockCaches { + memcached(MemcachedBlockCache.class); + // TODO(eclark): Consider more. Redis, etc. + Class clazz; + ExternalBlockCaches(Class clazz) { + this.clazz = clazz; + } + } + + // Defaults public static final boolean DEFAULT_CACHE_DATA_ON_READ = true; public static final boolean DEFAULT_CACHE_DATA_ON_WRITE = false; public static final boolean DEFAULT_IN_MEMORY = false; @@ -478,7 +496,44 @@ public class CacheConfig { * @return Returns L2 block cache instance (for now it is BucketCache BlockCache all the time) * or null if not supposed to be a L2. */ - private static BucketCache getL2(final Configuration c, final MemoryUsage mu) { + private static BlockCache getL2(final Configuration c, final MemoryUsage mu) { + final boolean useExternal = c.getBoolean(EXTERNAL_BLOCKCACHE_KEY, EXTERNAL_BLOCKCACHE_DEFAULT); + if (LOG.isDebugEnabled()) { + LOG.debug("Trying to use " + (useExternal?" External":" Internal") + " l2 cache"); + } + + // If we want to use an external block cache then create that. + if (useExternal) { + return getExternalBlockcache(c); + } + + // otherwise use the bucket cache. + return getBucketCache(c, mu); + + } + + private static BlockCache getExternalBlockcache(Configuration c) { + Class klass = null; + + // Get the class, from the config. s + try { + klass = ExternalBlockCaches.valueOf(c.get(EXTERNAL_BLOCKCACHE_CLASS_KEY, "memcache")).clazz; + } catch (IllegalArgumentException exception) { + klass = c.getClass(EXTERNAL_BLOCKCACHE_CLASS_KEY, MemcachedBlockCache.class); + } + + // Now try and create an instance of the block cache. + try { + LOG.info("Creating external block cache of type: " + klass); + return (BlockCache) ReflectionUtils.newInstance(klass, c); + } catch (Exception e) { + LOG.warn("Error creating external block cache", e); + } + return null; + + } + + private static BlockCache getBucketCache(Configuration c, MemoryUsage mu) { // Check for L2. ioengine name must be non-null. String bucketCacheIOEngineName = c.get(BUCKET_CACHE_IOENGINE_KEY, null); if (bucketCacheIOEngineName == null || bucketCacheIOEngineName.length() <= 0) return null; @@ -533,22 +588,27 @@ public class CacheConfig { LruBlockCache l1 = getL1(conf, mu); // blockCacheDisabled is set as a side-effect of getL1(), so check it again after the call. if (blockCacheDisabled) return null; - BucketCache l2 = getL2(conf, mu); + BlockCache l2 = getL2(conf, mu); if (l2 == null) { GLOBAL_BLOCK_CACHE_INSTANCE = l1; } else { + boolean useExternal = conf.getBoolean(EXTERNAL_BLOCKCACHE_KEY, EXTERNAL_BLOCKCACHE_DEFAULT); boolean combinedWithLru = conf.getBoolean(BUCKET_CACHE_COMBINED_KEY, DEFAULT_BUCKET_CACHE_COMBINED); - if (combinedWithLru) { - GLOBAL_BLOCK_CACHE_INSTANCE = new CombinedBlockCache(l1, l2); + if (useExternal) { + GLOBAL_BLOCK_CACHE_INSTANCE = new InclusiveCombinedBlockCache(l1, l2); } else { - // L1 and L2 are not 'combined'. They are connected via the LruBlockCache victimhandler - // mechanism. It is a little ugly but works according to the following: when the - // background eviction thread runs, blocks evicted from L1 will go to L2 AND when we get - // a block from the L1 cache, if not in L1, we will search L2. - l1.setVictimCache(l2); - GLOBAL_BLOCK_CACHE_INSTANCE = l1; + if (combinedWithLru) { + GLOBAL_BLOCK_CACHE_INSTANCE = new CombinedBlockCache(l1, l2); + } else { + // L1 and L2 are not 'combined'. They are connected via the LruBlockCache victimhandler + // mechanism. It is a little ugly but works according to the following: when the + // background eviction thread runs, blocks evicted from L1 will go to L2 AND when we get + // a block from the L1 cache, if not in L1, we will search L2. + GLOBAL_BLOCK_CACHE_INSTANCE = l1; + } } + l1.setVictimCache(l2); } return GLOBAL_BLOCK_CACHE_INSTANCE; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java index 52a5793a451..7725cf9290b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java @@ -25,32 +25,37 @@ import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory; import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; + /** * CombinedBlockCache is an abstraction layer that combines * {@link LruBlockCache} and {@link BucketCache}. The smaller lruCache is used - * to cache bloom blocks and index blocks. The larger bucketCache is used to + * to cache bloom blocks and index blocks. The larger l2Cache is used to * cache data blocks. {@link #getBlock(BlockCacheKey, boolean, boolean, boolean)} reads - * first from the smaller lruCache before looking for the block in the bucketCache. Blocks evicted + * first from the smaller lruCache before looking for the block in the l2Cache. Blocks evicted * from lruCache are put into the bucket cache. * Metrics are the combined size and hits and misses of both caches. * */ @InterfaceAudience.Private public class CombinedBlockCache implements ResizableBlockCache, HeapSize { - private final LruBlockCache lruCache; - private final BucketCache bucketCache; - private final CombinedCacheStats combinedCacheStats; + protected final LruBlockCache lruCache; + protected final BlockCache l2Cache; + protected final CombinedCacheStats combinedCacheStats; - public CombinedBlockCache(LruBlockCache lruCache, BucketCache bucketCache) { + public CombinedBlockCache(LruBlockCache lruCache, BlockCache l2Cache) { this.lruCache = lruCache; - this.bucketCache = bucketCache; + this.l2Cache = l2Cache; this.combinedCacheStats = new CombinedCacheStats(lruCache.getStats(), - bucketCache.getStats()); + l2Cache.getStats()); } @Override public long heapSize() { - return lruCache.heapSize() + bucketCache.heapSize(); + long l2size = 0; + if (l2Cache instanceof HeapSize) { + l2size = ((HeapSize) l2Cache).heapSize(); + } + return lruCache.heapSize() + l2size; } @Override @@ -60,7 +65,7 @@ public class CombinedBlockCache implements ResizableBlockCache, HeapSize { if (isMetaBlock || cacheDataInL1) { lruCache.cacheBlock(cacheKey, buf, inMemory, cacheDataInL1); } else { - bucketCache.cacheBlock(cacheKey, buf, inMemory, cacheDataInL1); + l2Cache.cacheBlock(cacheKey, buf, inMemory, false); } } @@ -73,22 +78,24 @@ public class CombinedBlockCache implements ResizableBlockCache, HeapSize { public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, boolean repeat, boolean updateCacheMetrics) { // TODO: is there a hole here, or just awkwardness since in the lruCache getBlock - // we end up calling bucketCache.getBlock. + // we end up calling l2Cache.getBlock. if (lruCache.containsBlock(cacheKey)) { return lruCache.getBlock(cacheKey, caching, repeat, updateCacheMetrics); } - return bucketCache.getBlock(cacheKey, caching, repeat, updateCacheMetrics); + Cacheable result = l2Cache.getBlock(cacheKey, caching, repeat, updateCacheMetrics); + + return result; } @Override public boolean evictBlock(BlockCacheKey cacheKey) { - return lruCache.evictBlock(cacheKey) || bucketCache.evictBlock(cacheKey); + return lruCache.evictBlock(cacheKey) || l2Cache.evictBlock(cacheKey); } @Override public int evictBlocksByHfileName(String hfileName) { return lruCache.evictBlocksByHfileName(hfileName) - + bucketCache.evictBlocksByHfileName(hfileName); + + l2Cache.evictBlocksByHfileName(hfileName); } @Override @@ -99,27 +106,27 @@ public class CombinedBlockCache implements ResizableBlockCache, HeapSize { @Override public void shutdown() { lruCache.shutdown(); - bucketCache.shutdown(); + l2Cache.shutdown(); } @Override public long size() { - return lruCache.size() + bucketCache.size(); + return lruCache.size() + l2Cache.size(); } @Override public long getFreeSize() { - return lruCache.getFreeSize() + bucketCache.getFreeSize(); + return lruCache.getFreeSize() + l2Cache.getFreeSize(); } @Override public long getCurrentSize() { - return lruCache.getCurrentSize() + bucketCache.getCurrentSize(); + return lruCache.getCurrentSize() + l2Cache.getCurrentSize(); } @Override public long getBlockCount() { - return lruCache.getBlockCount() + bucketCache.getBlockCount(); + return lruCache.getBlockCount() + l2Cache.getBlockCount(); } private static class CombinedCacheStats extends CacheStats { @@ -205,7 +212,7 @@ public class CombinedBlockCache implements ResizableBlockCache, HeapSize { @Override public BlockCache[] getBlockCaches() { - return new BlockCache [] {this.lruCache, this.bucketCache}; + return new BlockCache [] {this.lruCache, this.l2Cache}; } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java index 56510f06c5a..3dcfc9b67a8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java @@ -238,7 +238,7 @@ public class FixedFileTrailer { BlockType.TRAILER.readAndCheck(inputStream); if (majorVersion > 2 - || (majorVersion == 2 && minorVersion >= HFileReaderV2.PBUF_TRAILER_MINOR_VERSION)) { + || (majorVersion == 2 && minorVersion >= HFileReaderImpl.PBUF_TRAILER_MINOR_VERSION)) { deserializeFromPB(inputStream); } else { deserializeFromWritable(inputStream); @@ -611,7 +611,9 @@ public class FixedFileTrailer { } public byte[] getEncryptionKey() { - expectAtLeastMajorVersion(3); + // This is a v3 feature but if reading a v2 file the encryptionKey will just be null which + // if fine for this feature. + expectAtLeastMajorVersion(2); return encryptionKey; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java index ad62d7186f3..09233a2d3d0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java @@ -67,6 +67,7 @@ import org.apache.hadoop.hbase.util.ChecksumType; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.io.Writable; +import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; /** @@ -197,6 +198,8 @@ public class HFile { /** API required to write an {@link HFile} */ public interface Writer extends Closeable { + /** Max memstore (mvcc) timestamp in FileInfo */ + public static final byte [] MAX_MEMSTORE_TS_KEY = Bytes.toBytes("MAX_MEMSTORE_TS_KEY"); /** Add an element to the file info map. */ void appendFileInfo(byte[] key, byte[] value) throws IOException; @@ -294,7 +297,7 @@ public class HFile { "filesystem/path or path"); } if (path != null) { - ostream = AbstractHFileWriter.createOutputStream(conf, fs, path, favoredNodes); + ostream = HFileWriterImpl.createOutputStream(conf, fs, path, favoredNodes); } return createWriter(fs, path, ostream, comparator, fileContext); @@ -333,9 +336,12 @@ public class HFile { int version = getFormatVersion(conf); switch (version) { case 2: - return new HFileWriterV2.WriterFactoryV2(conf, cacheConf); + throw new IllegalArgumentException("This should never happen. " + + "Did you change hfile.format.version to read v2? This version of the software writes v3" + + " hfiles only (but it can read v2 files without having to update hfile.format.version " + + "in hbase-site.xml)"); case 3: - return new HFileWriterV3.WriterFactoryV3(conf, cacheConf); + return new HFileWriterFactory(conf, cacheConf); default: throw new IllegalArgumentException("Cannot create writer for HFile " + "format version " + version); @@ -440,6 +446,18 @@ public class HFile { * Return the file context of the HFile this reader belongs to */ HFileContext getFileContext(); + + boolean shouldIncludeMemstoreTS(); + + boolean isDecodeMemstoreTS(); + + DataBlockEncoding getEffectiveEncodingInCache(boolean isCompaction); + + @VisibleForTesting + HFileBlock.FSReader getUncachedBlockReader(); + + @VisibleForTesting + boolean prefetchComplete(); } /** @@ -463,9 +481,10 @@ public class HFile { trailer = FixedFileTrailer.readFromStream(fsdis.getStream(isHBaseChecksum), size); switch (trailer.getMajorVersion()) { case 2: - return new HFileReaderV2(path, trailer, fsdis, size, cacheConf, hfs, conf); + LOG.debug("Opening HFile v2 with v3 reader"); + // Fall through. case 3 : - return new HFileReaderV3(path, trailer, fsdis, size, cacheConf, hfs, conf); + return new HFileReaderImpl(path, trailer, fsdis, size, cacheConf, hfs, conf); default: throw new IllegalArgumentException("Invalid HFile version " + trailer.getMajorVersion()); } @@ -489,6 +508,7 @@ public class HFile { * @return A version specific Hfile Reader * @throws IOException If file is invalid, will throw CorruptHFileException flavored IOException */ + @SuppressWarnings("resource") public static Reader createReader(FileSystem fs, Path path, FSDataInputStreamWrapper fsdis, long size, CacheConfig cacheConf, Configuration conf) throws IOException { @@ -532,6 +552,47 @@ public class HFile { return pickReaderVersion(path, wrapper, size, cacheConf, null, conf); } + /** + * Returns true if the specified file has a valid HFile Trailer. + * @param fs filesystem + * @param path Path to file to verify + * @return true if the file has a valid HFile Trailer, otherwise false + * @throws IOException if failed to read from the underlying stream + */ + public static boolean isHFileFormat(final FileSystem fs, final Path path) throws IOException { + return isHFileFormat(fs, fs.getFileStatus(path)); + } + + /** + * Returns true if the specified file has a valid HFile Trailer. + * @param fs filesystem + * @param fileStatus the file to verify + * @return true if the file has a valid HFile Trailer, otherwise false + * @throws IOException if failed to read from the underlying stream + */ + public static boolean isHFileFormat(final FileSystem fs, final FileStatus fileStatus) + throws IOException { + final Path path = fileStatus.getPath(); + final long size = fileStatus.getLen(); + FSDataInputStreamWrapper fsdis = new FSDataInputStreamWrapper(fs, path); + try { + boolean isHBaseChecksum = fsdis.shouldUseHBaseChecksum(); + assert !isHBaseChecksum; // Initially we must read with FS checksum. + FixedFileTrailer.readFromStream(fsdis.getStream(isHBaseChecksum), size); + return true; + } catch (IllegalArgumentException e) { + return false; + } catch (IOException e) { + throw e; + } finally { + try { + fsdis.close(); + } catch (Throwable t) { + LOG.warn("Error closing fsdis FSDataInputStreamWrapper: " + path, t); + } + } + } + /** * Metadata for this file. Conjured by the writer. Read in by the reader. */ @@ -813,6 +874,18 @@ public class HFile { } } + + public static void checkHFileVersion(final Configuration c) { + int version = c.getInt(FORMAT_VERSION_KEY, MAX_FORMAT_VERSION); + if (version < MAX_FORMAT_VERSION || version > MAX_FORMAT_VERSION) { + throw new IllegalArgumentException("The setting for " + FORMAT_VERSION_KEY + + " (in your hbase-*.xml files) is " + version + " which does not match " + + MAX_FORMAT_VERSION + + "; are you running with a configuration from an older or newer hbase install (an " + + "incompatible hbase-default.xml or hbase-site.xml on your CLASSPATH)?"); + } + } + public static void main(String[] args) throws Exception { // delegate to preserve old behavior HFilePrettyPrinter.main(args); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java index b096185df5c..a64bb948cdf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java @@ -116,7 +116,7 @@ public class HFileBlock implements Cacheable { */ static final int CHECKSUM_SIZE = Bytes.SIZEOF_INT; - private static final CacheableDeserializer blockDeserializer = + static final CacheableDeserializer blockDeserializer = new CacheableDeserializer() { public HFileBlock deserialize(ByteBuffer buf, boolean reuse) throws IOException{ buf.limit(buf.limit() - HFileBlock.EXTRA_SERIALIZATION_SPACE).rewind(); @@ -130,13 +130,13 @@ public class HFileBlock implements Cacheable { buf.position(buf.limit()); buf.limit(buf.limit() + HFileBlock.EXTRA_SERIALIZATION_SPACE); boolean usesChecksum = buf.get() == (byte)1; - HFileBlock ourBuffer = new HFileBlock(newByteBuffer, usesChecksum); - ourBuffer.offset = buf.getLong(); - ourBuffer.nextBlockOnDiskSizeWithHeader = buf.getInt(); - if (ourBuffer.hasNextBlockHeader()) { - ourBuffer.buf.limit(ourBuffer.buf.limit() - ourBuffer.headerSize()); + HFileBlock hFileBlock = new HFileBlock(newByteBuffer, usesChecksum); + hFileBlock.offset = buf.getLong(); + hFileBlock.nextBlockOnDiskSizeWithHeader = buf.getInt(); + if (hFileBlock.hasNextBlockHeader()) { + hFileBlock.buf.limit(hFileBlock.buf.limit() - hFileBlock.headerSize()); } - return ourBuffer; + return hFileBlock; } @Override @@ -670,7 +670,7 @@ public class HFileBlock implements Cacheable { * @return true if succeeded reading the extra bytes * @throws IOException if failed to read the necessary bytes */ - public static boolean readWithExtra(InputStream in, byte buf[], + public static boolean readWithExtra(InputStream in, byte[] buf, int bufOffset, int necessaryLen, int extraLen) throws IOException { int bytesRemaining = necessaryLen + extraLen; while (bytesRemaining > 0) { @@ -776,7 +776,8 @@ public class HFileBlock implements Cacheable { /** * Valid in the READY state. Contains the header and the uncompressed (but * potentially encoded, if this is a data block) bytes, so the length is - * {@link #uncompressedSizeWithoutHeader} + {@link org.apache.hadoop.hbase.HConstants#HFILEBLOCK_HEADER_SIZE}. + * {@link #uncompressedSizeWithoutHeader} + + * {@link org.apache.hadoop.hbase.HConstants#HFILEBLOCK_HEADER_SIZE}. * Does not store checksums. */ private byte[] uncompressedBytesWithHeader; @@ -1059,7 +1060,9 @@ public class HFileBlock implements Cacheable { */ int getOnDiskSizeWithoutHeader() { expectState(State.BLOCK_READY); - return onDiskBytesWithHeader.length + onDiskChecksum.length - HConstants.HFILEBLOCK_HEADER_SIZE; + return onDiskBytesWithHeader.length + + onDiskChecksum.length + - HConstants.HFILEBLOCK_HEADER_SIZE; } /** @@ -1253,13 +1256,40 @@ public class HFileBlock implements Cacheable { /** Get the default decoder for blocks from this file. */ HFileBlockDecodingContext getDefaultBlockDecodingContext(); + + void setIncludesMemstoreTS(boolean includesMemstoreTS); + void setDataBlockEncoder(HFileDataBlockEncoder encoder); } /** - * A common implementation of some methods of {@link FSReader} and some - * tools for implementing HFile format version-specific block readers. + * We always prefetch the header of the next block, so that we know its + * on-disk size in advance and can read it in one operation. */ - private abstract static class AbstractFSReader implements FSReader { + private static class PrefetchedHeader { + long offset = -1; + byte[] header = new byte[HConstants.HFILEBLOCK_HEADER_SIZE]; + final ByteBuffer buf = ByteBuffer.wrap(header, 0, HConstants.HFILEBLOCK_HEADER_SIZE); + } + + /** Reads version 2 blocks from the filesystem. */ + static class FSReaderImpl implements FSReader { + /** The file system stream of the underlying {@link HFile} that + * does or doesn't do checksum validations in the filesystem */ + protected FSDataInputStreamWrapper streamWrapper; + + private HFileBlockDecodingContext encodedBlockDecodingCtx; + + /** Default context used when BlockType != {@link BlockType#ENCODED_DATA}. */ + private final HFileBlockDefaultDecodingContext defaultDecodingCtx; + + private ThreadLocal prefetchedHeaderForThread = + new ThreadLocal() { + @Override + public PrefetchedHeader initialValue() { + return new PrefetchedHeader(); + } + }; + /** Compression algorithm used by the {@link HFile} */ /** The size of the file we are reading from, or -1 if unknown. */ @@ -1281,18 +1311,31 @@ public class HFileBlock implements Cacheable { protected HFileContext fileContext; - public AbstractFSReader(long fileSize, HFileSystem hfs, Path path, HFileContext fileContext) - throws IOException { + public FSReaderImpl(FSDataInputStreamWrapper stream, long fileSize, HFileSystem hfs, Path path, + HFileContext fileContext) throws IOException { this.fileSize = fileSize; this.hfs = hfs; this.path = path; this.fileContext = fileContext; this.hdrSize = headerSize(fileContext.isUseHBaseChecksum()); + + this.streamWrapper = stream; + // Older versions of HBase didn't support checksum. + this.streamWrapper.prepareForBlockReader(!fileContext.isUseHBaseChecksum()); + defaultDecodingCtx = new HFileBlockDefaultDecodingContext(fileContext); + encodedBlockDecodingCtx = defaultDecodingCtx; } - @Override - public BlockIterator blockRange(final long startOffset, - final long endOffset) { + /** + * A constructor that reads files with the latest minor version. + * This is used by unit tests only. + */ + FSReaderImpl(FSDataInputStream istream, long fileSize, HFileContext fileContext) + throws IOException { + this(new FSDataInputStreamWrapper(istream), fileSize, null, null, fileContext); + } + + public BlockIterator blockRange(final long startOffset, final long endOffset) { final FSReader owner = this; // handle for inner class return new BlockIterator() { private long offset = startOffset; @@ -1389,56 +1432,6 @@ public class HFileBlock implements Cacheable { return Bytes.toInt(dest, destOffset + size + BlockType.MAGIC_LENGTH) + hdrSize; } - } - - /** - * We always prefetch the header of the next block, so that we know its - * on-disk size in advance and can read it in one operation. - */ - private static class PrefetchedHeader { - long offset = -1; - byte[] header = new byte[HConstants.HFILEBLOCK_HEADER_SIZE]; - final ByteBuffer buf = ByteBuffer.wrap(header, 0, HConstants.HFILEBLOCK_HEADER_SIZE); - } - - /** Reads version 2 blocks from the filesystem. */ - static class FSReaderImpl extends AbstractFSReader { - /** The file system stream of the underlying {@link HFile} that - * does or doesn't do checksum validations in the filesystem */ - protected FSDataInputStreamWrapper streamWrapper; - - private HFileBlockDecodingContext encodedBlockDecodingCtx; - - /** Default context used when BlockType != {@link BlockType#ENCODED_DATA}. */ - private final HFileBlockDefaultDecodingContext defaultDecodingCtx; - - private ThreadLocal prefetchedHeaderForThread = - new ThreadLocal() { - @Override - public PrefetchedHeader initialValue() { - return new PrefetchedHeader(); - } - }; - - public FSReaderImpl(FSDataInputStreamWrapper stream, long fileSize, HFileSystem hfs, Path path, - HFileContext fileContext) throws IOException { - super(fileSize, hfs, path, fileContext); - this.streamWrapper = stream; - // Older versions of HBase didn't support checksum. - this.streamWrapper.prepareForBlockReader(!fileContext.isUseHBaseChecksum()); - defaultDecodingCtx = new HFileBlockDefaultDecodingContext(fileContext); - encodedBlockDecodingCtx = defaultDecodingCtx; - } - - /** - * A constructor that reads files with the latest minor version. - * This is used by unit tests only. - */ - FSReaderImpl(FSDataInputStream istream, long fileSize, HFileContext fileContext) - throws IOException { - this(new FSDataInputStreamWrapper(istream), fileSize, null, null, fileContext); - } - /** * Reads a version 2 block (version 1 blocks not supported and not expected). Tries to do as * little memory allocation as possible, using the provided on-disk size. @@ -1679,11 +1672,11 @@ public class HFileBlock implements Cacheable { return b; } - void setIncludesMemstoreTS(boolean includesMemstoreTS) { + public void setIncludesMemstoreTS(boolean includesMemstoreTS) { this.fileContext.setIncludesMvcc(includesMemstoreTS); } - void setDataBlockEncoder(HFileDataBlockEncoder encoder) { + public void setDataBlockEncoder(HFileDataBlockEncoder encoder) { encodedBlockDecodingCtx = encoder.newDataBlockDecodingContext(this.fileContext); } @@ -1748,6 +1741,19 @@ public class HFileBlock implements Cacheable { return HFileBlock.blockDeserializer; } + @Override + public int hashCode() { + int result = 1; + result = result * 31 + blockType.hashCode(); + result = result * 31 + nextBlockOnDiskSizeWithHeader; + result = result * 31 + (int) (offset ^ (offset >>> 32)); + result = result * 31 + onDiskSizeWithoutHeader; + result = result * 31 + (int) (prevBlockOffset ^ (prevBlockOffset >>> 32)); + result = result * 31 + uncompressedSizeWithoutHeader; + result = result * 31 + buf.hashCode(); + return result; + } + @Override public boolean equals(Object comparison) { if (this == comparison) { @@ -1819,7 +1825,8 @@ public class HFileBlock implements Cacheable { if (!fileContext.isUseHBaseChecksum() || this.fileContext.getBytesPerChecksum() == 0) { return 0; } - return (int)ChecksumUtil.numBytes(onDiskDataSizeWithHeader, this.fileContext.getBytesPerChecksum()); + return (int) ChecksumUtil.numBytes(onDiskDataSizeWithHeader, + this.fileContext.getBytesPerChecksum()); } /** @@ -1873,8 +1880,8 @@ public class HFileBlock implements Cacheable { byte[] magicBuf = new byte[Math.min(buf.limit() - buf.position(), BlockType.MAGIC_LENGTH)]; buf.get(magicBuf); BlockType bt = BlockType.parse(magicBuf, 0, BlockType.MAGIC_LENGTH); - int compressedBlockSizeNoHeader = buf.getInt();; - int uncompressedBlockSizeNoHeader = buf.getInt();; + int compressedBlockSizeNoHeader = buf.getInt(); + int uncompressedBlockSizeNoHeader = buf.getInt(); long prevBlockOffset = buf.getLong(); byte cksumtype = buf.get(); long bytesPerChecksum = buf.getInt(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java index 94133640411..5b54807a2da 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java @@ -55,8 +55,8 @@ import org.apache.hadoop.util.StringUtils; * * Examples of how to use the block index writer can be found in * {@link org.apache.hadoop.hbase.util.CompoundBloomFilterWriter} and - * {@link HFileWriterV2}. Examples of how to use the reader can be - * found in {@link HFileReaderV2} and TestHFileBlockIndex. + * {@link HFileWriterImpl}. Examples of how to use the reader can be + * found in {@link HFileWriterImpl} and TestHFileBlockIndex. */ @InterfaceAudience.Private public class HFileBlockIndex { @@ -218,14 +218,14 @@ public class HFileBlockIndex { } // the next indexed key - byte[] nextIndexedKey = null; + Cell nextIndexedKey = null; // Read the next-level (intermediate or leaf) index block. long currentOffset = blockOffsets[rootLevelIndex]; int currentOnDiskSize = blockDataSizes[rootLevelIndex]; if (rootLevelIndex < blockKeys.length - 1) { - nextIndexedKey = blockKeys[rootLevelIndex + 1]; + nextIndexedKey = new KeyValue.KeyOnlyKeyValue(blockKeys[rootLevelIndex + 1]); } else { nextIndexedKey = HConstants.NO_NEXT_INDEXED_KEY; } @@ -298,7 +298,7 @@ public class HFileBlockIndex { // Only update next indexed key if there is a next indexed key in the current level byte[] tmpNextIndexedKey = getNonRootIndexedKey(buffer, index + 1); if (tmpNextIndexedKey != null) { - nextIndexedKey = tmpNextIndexedKey; + nextIndexedKey = new KeyValue.KeyOnlyKeyValue(tmpNextIndexedKey); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java index 7b92df91274..41779a7624e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java @@ -264,7 +264,7 @@ public class HFilePrettyPrinter extends Configured implements Tool { if (this.isSeekToRow) { // seek to the first kv on this row shouldScanKeysValues = - (scanner.seekTo(KeyValueUtil.createFirstOnRow(this.row).getKey()) != -1); + (scanner.seekTo(KeyValueUtil.createFirstOnRow(this.row)) != -1); } else { shouldScanKeysValues = scanner.seekTo(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java similarity index 69% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java rename to hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java index 250001efe25..1e84e6a0407 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java @@ -1,4 +1,5 @@ /* + * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,12 +21,15 @@ package org.apache.hadoop.hbase.io.hfile; import java.io.DataInput; import java.io.IOException; import java.nio.ByteBuffer; +import java.security.Key; +import java.security.KeyException; import java.util.ArrayList; import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; @@ -35,10 +39,15 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; +import org.apache.hadoop.hbase.io.compress.Compression; +import org.apache.hadoop.hbase.io.crypto.Cipher; +import org.apache.hadoop.hbase.io.crypto.Encryption; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoder; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.io.encoding.HFileBlockDecodingContext; import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo; +import org.apache.hadoop.hbase.security.EncryptionUtil; +import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.ByteBufferUtils; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.IdLock; @@ -49,32 +58,63 @@ import org.apache.htrace.TraceScope; import com.google.common.annotations.VisibleForTesting; /** - * {@link HFile} reader for version 2. + * Implementation that can handle all hfile versions of {@link HFile.Reader}. */ @InterfaceAudience.Private -public class HFileReaderV2 extends AbstractHFileReader { +@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="URF_UNREAD_PUBLIC_OR_PROTECTED_FIELD") +public class HFileReaderImpl implements HFile.Reader, Configurable { + // This class is HFileReaderV3 + HFileReaderV2 + AbstractHFileReader all squashed together into + // one file. Ditto for all the HFileReader.ScannerV? implementations. I was running up against + // the MaxInlineLevel limit because too many tiers involved reading from an hfile. Was also hard + // to navigate the source code when so many classes participating in read. + private static final Log LOG = LogFactory.getLog(HFileReaderImpl.class); - private static final Log LOG = LogFactory.getLog(HFileReaderV2.class); + /** Data block index reader keeping the root data index in memory */ + private HFileBlockIndex.BlockIndexReader dataBlockIndexReader; - /** Minor versions in HFile V2 starting with this number have hbase checksums */ - public static final int MINOR_VERSION_WITH_CHECKSUM = 1; - /** In HFile V2 minor version that does not support checksums */ - public static final int MINOR_VERSION_NO_CHECKSUM = 0; + /** Meta block index reader -- always single level */ + private HFileBlockIndex.BlockIndexReader metaBlockIndexReader; - /** HFile minor version that introduced pbuf filetrailer */ - public static final int PBUF_TRAILER_MINOR_VERSION = 2; + private final FixedFileTrailer trailer; + + /** Filled when we read in the trailer. */ + private final Compression.Algorithm compressAlgo; /** - * The size of a (key length, value length) tuple that prefixes each entry in - * a data block. + * What kind of data block encoding should be used while reading, writing, + * and handling cache. */ - public final static int KEY_VALUE_LEN_SIZE = 2 * Bytes.SIZEOF_INT; + private HFileDataBlockEncoder dataBlockEncoder = NoOpDataBlockEncoder.INSTANCE; - protected boolean includesMemstoreTS = false; - protected boolean decodeMemstoreTS = false; - protected boolean shouldIncludeMemstoreTS() { - return includesMemstoreTS; - } + /** Last key in the file. Filled in when we read in the file info */ + private byte [] lastKey = null; + + /** Average key length read from file info */ + private int avgKeyLen = -1; + + /** Average value length read from file info */ + private int avgValueLen = -1; + + /** Key comparator */ + private KVComparator comparator = new KVComparator(); + + /** Size of this file. */ + private final long fileSize; + + /** Block cache configuration. */ + private final CacheConfig cacheConf; + + /** Path of file */ + private final Path path; + + /** File name to be used for block names */ + private final String name; + + private FileInfo fileInfo; + + private Configuration conf; + + private HFileContext hfileContext; /** Filesystem-level block reader. */ protected HFileBlock.FSReader fsBlockReader; @@ -101,34 +141,48 @@ public class HFileReaderV2 extends AbstractHFileReader { // the file. This version can read Writables version 1. static final int MAX_MINOR_VERSION = 3; + /** + * We can read files whose major version is v2 IFF their minor version is at least 3. + */ + private static final int MIN_V2_MINOR_VERSION_WITH_PB = 3; + /** Minor versions starting with this number have faked index key */ static final int MINOR_VERSION_WITH_FAKED_KEY = 3; - protected HFileContext hfileContext; - /** * Opens a HFile. You must load the index before you can use it by calling * {@link #loadFileInfo()}. - * - * @param path Path to HFile. - * @param trailer File trailer. - * @param fsdis input stream. - * @param size Length of the stream. - * @param cacheConf Cache configuration. + * @param path + * Path to HFile. + * @param trailer + * File trailer. + * @param fsdis + * input stream. + * @param fileSize + * Length of the stream. + * @param cacheConf + * Cache configuration. * @param hfs + * The file system. * @param conf + * Configuration */ - public HFileReaderV2(final Path path, final FixedFileTrailer trailer, - final FSDataInputStreamWrapper fsdis, final long size, final CacheConfig cacheConf, - final HFileSystem hfs, final Configuration conf) throws IOException { - super(path, trailer, size, cacheConf, hfs, conf); + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="URF_UNREAD_PUBLIC_OR_PROTECTED_FIELD") + public HFileReaderImpl(final Path path, FixedFileTrailer trailer, + final FSDataInputStreamWrapper fsdis, + final long fileSize, final CacheConfig cacheConf, final HFileSystem hfs, + final Configuration conf) + throws IOException { + this.trailer = trailer; + this.compressAlgo = trailer.getCompressionCodec(); + this.cacheConf = cacheConf; + this.fileSize = fileSize; + this.path = path; + this.name = path.getName(); this.conf = conf; - trailer.expectMajorVersion(getMajorVersion()); - validateMinorVersion(path, trailer.getMinorVersion()); + checkFileVersion(); this.hfileContext = createHFileContext(fsdis, fileSize, hfs, path, trailer); - HFileBlock.FSReaderImpl fsBlockReaderV2 = - new HFileBlock.FSReaderImpl(fsdis, fileSize, hfs, path, hfileContext); - this.fsBlockReader = fsBlockReaderV2; // upcast + this.fsBlockReader = new HFileBlock.FSReaderImpl(fsdis, fileSize, hfs, path, hfileContext); // Comparator class name is stored in the trailer in version 2. comparator = trailer.createComparator(); @@ -139,7 +193,7 @@ public class HFileReaderV2 extends AbstractHFileReader { // Parse load-on-open data. - HFileBlock.BlockIterator blockIter = fsBlockReaderV2.blockRange( + HFileBlock.BlockIterator blockIter = fsBlockReader.blockRange( trailer.getLoadOnOpenDataOffset(), fileSize - trailer.getTrailerSize()); @@ -158,23 +212,22 @@ public class HFileReaderV2 extends AbstractHFileReader { fileInfo = new FileInfo(); fileInfo.read(blockIter.nextBlockWithBlockType(BlockType.FILE_INFO).getByteStream()); byte[] creationTimeBytes = fileInfo.get(FileInfo.CREATE_TIME_TS); - this.hfileContext.setFileCreateTime(creationTimeBytes == null ? 0 : Bytes.toLong(creationTimeBytes)); + this.hfileContext.setFileCreateTime(creationTimeBytes == null ? + 0 : Bytes.toLong(creationTimeBytes)); lastKey = fileInfo.get(FileInfo.LASTKEY); avgKeyLen = Bytes.toInt(fileInfo.get(FileInfo.AVG_KEY_LEN)); avgValueLen = Bytes.toInt(fileInfo.get(FileInfo.AVG_VALUE_LEN)); - byte [] keyValueFormatVersion = - fileInfo.get(HFileWriterV2.KEY_VALUE_VERSION); + byte [] keyValueFormatVersion = fileInfo.get(HFileWriterImpl.KEY_VALUE_VERSION); includesMemstoreTS = keyValueFormatVersion != null && - Bytes.toInt(keyValueFormatVersion) == - HFileWriterV2.KEY_VALUE_VER_WITH_MEMSTORE; - fsBlockReaderV2.setIncludesMemstoreTS(includesMemstoreTS); + Bytes.toInt(keyValueFormatVersion) == HFileWriterImpl.KEY_VALUE_VER_WITH_MEMSTORE; + fsBlockReader.setIncludesMemstoreTS(includesMemstoreTS); if (includesMemstoreTS) { - decodeMemstoreTS = Bytes.toLong(fileInfo.get(HFileWriterV2.MAX_MEMSTORE_TS_KEY)) > 0; + decodeMemstoreTS = Bytes.toLong(fileInfo.get(HFileWriterImpl.MAX_MEMSTORE_TS_KEY)) > 0; } // Read data block encoding algorithm name from file info. dataBlockEncoder = HFileDataBlockEncoderImpl.createFromFileInfo(fileInfo); - fsBlockReaderV2.setDataBlockEncoder(dataBlockEncoder); + fsBlockReader.setDataBlockEncoder(dataBlockEncoder); // Store all other load-on-open blocks for further consumption. HFileBlock b; @@ -217,38 +270,760 @@ public class HFileReaderV2 extends AbstractHFileReader { } }); } - } - protected HFileContext createHFileContext(FSDataInputStreamWrapper fsdis, long fileSize, - HFileSystem hfs, Path path, FixedFileTrailer trailer) throws IOException { - return new HFileContextBuilder() - .withIncludesMvcc(this.includesMemstoreTS) - .withCompression(this.compressAlgo) - .withHBaseCheckSum(trailer.getMinorVersion() >= MINOR_VERSION_WITH_CHECKSUM) - .build(); + byte[] tmp = fileInfo.get(FileInfo.MAX_TAGS_LEN); + // max tag length is not present in the HFile means tags were not at all written to file. + if (tmp != null) { + hfileContext.setIncludesTags(true); + tmp = fileInfo.get(FileInfo.TAGS_COMPRESSED); + if (tmp != null && Bytes.toBoolean(tmp)) { + hfileContext.setCompressTags(true); + } + } } /** - * Create a Scanner on this file. No seeks or reads are done on creation. Call - * {@link HFileScanner#seekTo(byte[])} to position an start the read. There is - * nothing to clean up in a Scanner. Letting go of your references to the - * scanner is sufficient. - * - * @param cacheBlocks True if we should cache blocks read in by this scanner. - * @param pread Use positional read rather than seek+read if true (pread is - * better for random reads, seek+read is better scanning). - * @param isCompaction is scanner being used for a compaction? - * @return Scanner on this file. + * File version check is a little sloppy. We read v3 files but can also read v2 files if their + * content has been pb'd; files written with 0.98. */ - @Override - public HFileScanner getScanner(boolean cacheBlocks, final boolean pread, - final boolean isCompaction) { - if (dataBlockEncoder.useEncodedScanner()) { - return new EncodedScannerV2(this, cacheBlocks, pread, isCompaction, - hfileContext); + private void checkFileVersion() { + int majorVersion = trailer.getMajorVersion(); + if (majorVersion == getMajorVersion()) return; + int minorVersion = trailer.getMinorVersion(); + if (majorVersion == 2 && minorVersion >= MIN_V2_MINOR_VERSION_WITH_PB) return; + // We can read v3 or v2 versions of hfile. + throw new IllegalArgumentException("Invalid HFile version: major=" + + trailer.getMajorVersion() + ", minor=" + trailer.getMinorVersion() + ": expected at least " + + "major=2 and minor=" + MAX_MINOR_VERSION); + } + + @SuppressWarnings("serial") + public static class BlockIndexNotLoadedException extends IllegalStateException { + public BlockIndexNotLoadedException() { + // Add a message in case anyone relies on it as opposed to class name. + super("Block index not loaded"); + } + } + + private String toStringFirstKey() { + return KeyValue.keyToString(getFirstKey()); + } + + private String toStringLastKey() { + return KeyValue.keyToString(getLastKey()); + } + + @Override + public String toString() { + return "reader=" + path.toString() + + (!isFileInfoLoaded()? "": + ", compression=" + compressAlgo.getName() + + ", cacheConf=" + cacheConf + + ", firstKey=" + toStringFirstKey() + + ", lastKey=" + toStringLastKey()) + + ", avgKeyLen=" + avgKeyLen + + ", avgValueLen=" + avgValueLen + + ", entries=" + trailer.getEntryCount() + + ", length=" + fileSize; + } + + @Override + public long length() { + return fileSize; + } + + /** + * @return the first key in the file. May be null if file has no entries. Note + * that this is not the first row key, but rather the byte form of the + * first KeyValue. + */ + @Override + public byte [] getFirstKey() { + if (dataBlockIndexReader == null) { + throw new BlockIndexNotLoadedException(); + } + return dataBlockIndexReader.isEmpty() ? null + : dataBlockIndexReader.getRootBlockKey(0); + } + + /** + * TODO left from {@link HFile} version 1: move this to StoreFile after Ryan's + * patch goes in to eliminate {@link KeyValue} here. + * + * @return the first row key, or null if the file is empty. + */ + @Override + public byte[] getFirstRowKey() { + byte[] firstKey = getFirstKey(); + return firstKey == null? null: KeyValue.createKeyValueFromKey(firstKey).getRow(); + } + + /** + * TODO left from {@link HFile} version 1: move this to StoreFile after + * Ryan's patch goes in to eliminate {@link KeyValue} here. + * + * @return the last row key, or null if the file is empty. + */ + @Override + public byte[] getLastRowKey() { + byte[] lastKey = getLastKey(); + return lastKey == null? null: KeyValue.createKeyValueFromKey(lastKey).getRow(); + } + + /** @return number of KV entries in this HFile */ + @Override + public long getEntries() { + return trailer.getEntryCount(); + } + + /** @return comparator */ + @Override + public KVComparator getComparator() { + return comparator; + } + + /** @return compression algorithm */ + @Override + public Compression.Algorithm getCompressionAlgorithm() { + return compressAlgo; + } + + /** + * @return the total heap size of data and meta block indexes in bytes. Does + * not take into account non-root blocks of a multilevel data index. + */ + public long indexSize() { + return (dataBlockIndexReader != null ? dataBlockIndexReader.heapSize() : 0) + + ((metaBlockIndexReader != null) ? metaBlockIndexReader.heapSize() + : 0); + } + + @Override + public String getName() { + return name; + } + + @Override + public HFileBlockIndex.BlockIndexReader getDataBlockIndexReader() { + return dataBlockIndexReader; + } + + @Override + public FixedFileTrailer getTrailer() { + return trailer; + } + + @Override + public FileInfo loadFileInfo() throws IOException { + return fileInfo; + } + + /** + * An exception thrown when an operation requiring a scanner to be seeked + * is invoked on a scanner that is not seeked. + */ + @SuppressWarnings("serial") + public static class NotSeekedException extends IllegalStateException { + public NotSeekedException() { + super("Not seeked to a key/value"); + } + } + + protected static class HFileScannerImpl implements HFileScanner { + private ByteBuffer blockBuffer; + protected final boolean cacheBlocks; + protected final boolean pread; + protected final boolean isCompaction; + private int currKeyLen; + private int currValueLen; + private int currMemstoreTSLen; + private long currMemstoreTS; + // Updated but never read? + protected volatile int blockFetches; + protected final HFile.Reader reader; + private int currTagsLen; + + protected HFileBlock block; + + /** + * The next indexed key is to keep track of the indexed key of the next data block. + * If the nextIndexedKey is HConstants.NO_NEXT_INDEXED_KEY, it means that the + * current data block is the last data block. + * + * If the nextIndexedKey is null, it means the nextIndexedKey has not been loaded yet. + */ + protected Cell nextIndexedKey; + + public HFileScannerImpl(final HFile.Reader reader, final boolean cacheBlocks, + final boolean pread, final boolean isCompaction) { + this.reader = reader; + this.cacheBlocks = cacheBlocks; + this.pread = pread; + this.isCompaction = isCompaction; } - return new ScannerV2(this, cacheBlocks, pread, isCompaction); + @Override + public boolean isSeeked(){ + return blockBuffer != null; + } + + @Override + public String toString() { + return "HFileScanner for reader " + String.valueOf(getReader()); + } + + protected void assertSeeked() { + if (!isSeeked()) + throw new NotSeekedException(); + } + + @Override + public HFile.Reader getReader() { + return reader; + } + + protected int getCellBufSize() { + int kvBufSize = KEY_VALUE_LEN_SIZE + currKeyLen + currValueLen; + if (this.reader.getFileContext().isIncludesTags()) { + kvBufSize += Bytes.SIZEOF_SHORT + currTagsLen; + } + return kvBufSize; + } + + protected int getNextCellStartPosition() { + int nextKvPos = blockBuffer.position() + KEY_VALUE_LEN_SIZE + currKeyLen + currValueLen + + currMemstoreTSLen; + if (this.reader.getFileContext().isIncludesTags()) { + nextKvPos += Bytes.SIZEOF_SHORT + currTagsLen; + } + return nextKvPos; + } + + protected void readKeyValueLen() { + blockBuffer.mark(); + currKeyLen = blockBuffer.getInt(); + currValueLen = blockBuffer.getInt(); + if (currKeyLen < 0 || currValueLen < 0 || currKeyLen > blockBuffer.limit() + || currValueLen > blockBuffer.limit()) { + throw new IllegalStateException("Invalid currKeyLen " + currKeyLen + " or currValueLen " + + currValueLen + ". Block offset: " + + block.getOffset() + ", block length: " + blockBuffer.limit() + ", position: " + + blockBuffer.position() + " (without header)."); + } + ByteBufferUtils.skip(blockBuffer, currKeyLen + currValueLen); + if (this.reader.getFileContext().isIncludesTags()) { + // Read short as unsigned, high byte first + currTagsLen = ((blockBuffer.get() & 0xff) << 8) ^ (blockBuffer.get() & 0xff); + if (currTagsLen < 0 || currTagsLen > blockBuffer.limit()) { + throw new IllegalStateException("Invalid currTagsLen " + currTagsLen + ". Block offset: " + + block.getOffset() + ", block length: " + blockBuffer.limit() + ", position: " + + blockBuffer.position() + " (without header)."); + } + ByteBufferUtils.skip(blockBuffer, currTagsLen); + } + readMvccVersion(); + blockBuffer.reset(); + } + + /** + * Within a loaded block, seek looking for the last key that is smaller than + * (or equal to?) the key we are interested in. + * A note on the seekBefore: if you have seekBefore = true, AND the first + * key in the block = key, then you'll get thrown exceptions. The caller has + * to check for that case and load the previous block as appropriate. + * @param key + * the key to find + * @param seekBefore + * find the key before the given key in case of exact match. + * @return 0 in case of an exact key match, 1 in case of an inexact match, + * -2 in case of an inexact match and furthermore, the input key + * less than the first key of current block(e.g. using a faked index + * key) + */ + protected int blockSeek(Cell key, boolean seekBefore) { + int klen, vlen, tlen = 0; + long memstoreTS = 0; + int memstoreTSLen = 0; + int lastKeyValueSize = -1; + KeyValue.KeyOnlyKeyValue keyOnlyKv = new KeyValue.KeyOnlyKeyValue(); + do { + blockBuffer.mark(); + klen = blockBuffer.getInt(); + vlen = blockBuffer.getInt(); + if (klen < 0 || vlen < 0 || klen > blockBuffer.limit() + || vlen > blockBuffer.limit()) { + throw new IllegalStateException("Invalid klen " + klen + " or vlen " + + vlen + ". Block offset: " + + block.getOffset() + ", block length: " + blockBuffer.limit() + ", position: " + + blockBuffer.position() + " (without header)."); + } + ByteBufferUtils.skip(blockBuffer, klen + vlen); + if (this.reader.getFileContext().isIncludesTags()) { + // Read short as unsigned, high byte first + tlen = ((blockBuffer.get() & 0xff) << 8) ^ (blockBuffer.get() & 0xff); + if (tlen < 0 || tlen > blockBuffer.limit()) { + throw new IllegalStateException("Invalid tlen " + tlen + ". Block offset: " + + block.getOffset() + ", block length: " + blockBuffer.limit() + ", position: " + + blockBuffer.position() + " (without header)."); + } + ByteBufferUtils.skip(blockBuffer, tlen); + } + if (this.reader.shouldIncludeMemstoreTS()) { + if (this.reader.isDecodeMemstoreTS()) { + memstoreTS = Bytes.readAsVLong(blockBuffer.array(), blockBuffer.arrayOffset() + + blockBuffer.position()); + memstoreTSLen = WritableUtils.getVIntSize(memstoreTS); + } else { + memstoreTS = 0; + memstoreTSLen = 1; + } + } + blockBuffer.reset(); + int keyOffset = blockBuffer.arrayOffset() + blockBuffer.position() + (Bytes.SIZEOF_INT * 2); + keyOnlyKv.setKey(blockBuffer.array(), keyOffset, klen); + int comp = reader.getComparator().compareOnlyKeyPortion(key, keyOnlyKv); + + if (comp == 0) { + if (seekBefore) { + if (lastKeyValueSize < 0) { + throw new IllegalStateException("blockSeek with seekBefore " + + "at the first key of the block: key=" + + CellUtil.getCellKeyAsString(key) + + ", blockOffset=" + block.getOffset() + ", onDiskSize=" + + block.getOnDiskSizeWithHeader()); + } + blockBuffer.position(blockBuffer.position() - lastKeyValueSize); + readKeyValueLen(); + return 1; // non exact match. + } + currKeyLen = klen; + currValueLen = vlen; + currTagsLen = tlen; + if (this.reader.shouldIncludeMemstoreTS()) { + currMemstoreTS = memstoreTS; + currMemstoreTSLen = memstoreTSLen; + } + return 0; // indicate exact match + } else if (comp < 0) { + if (lastKeyValueSize > 0) + blockBuffer.position(blockBuffer.position() - lastKeyValueSize); + readKeyValueLen(); + if (lastKeyValueSize == -1 && blockBuffer.position() == 0) { + return HConstants.INDEX_KEY_MAGIC; + } + return 1; + } + + // The size of this key/value tuple, including key/value length fields. + lastKeyValueSize = klen + vlen + memstoreTSLen + KEY_VALUE_LEN_SIZE; + // include tag length also if tags included with KV + if (this.reader.getFileContext().isIncludesTags()) { + lastKeyValueSize += tlen + Bytes.SIZEOF_SHORT; + } + blockBuffer.position(blockBuffer.position() + lastKeyValueSize); + } while (blockBuffer.remaining() > 0); + + // Seek to the last key we successfully read. This will happen if this is + // the last key/value pair in the file, in which case the following call + // to next() has to return false. + blockBuffer.position(blockBuffer.position() - lastKeyValueSize); + readKeyValueLen(); + return 1; // didn't exactly find it. + } + + @Override + public Cell getNextIndexedKey() { + return nextIndexedKey; + } + + @Override + public int seekTo(Cell key) throws IOException { + return seekTo(key, true); + } + + @Override + public int reseekTo(Cell key) throws IOException { + int compared; + if (isSeeked()) { + compared = compareKey(reader.getComparator(), key); + if (compared < 1) { + // If the required key is less than or equal to current key, then + // don't do anything. + return compared; + } else { + // The comparison with no_next_index_key has to be checked + if (this.nextIndexedKey != null && + (this.nextIndexedKey == HConstants.NO_NEXT_INDEXED_KEY || reader + .getComparator().compareOnlyKeyPortion(key, nextIndexedKey) < 0)) { + // The reader shall continue to scan the current data block instead + // of querying the + // block index as long as it knows the target key is strictly + // smaller than + // the next indexed key or the current data block is the last data + // block. + return loadBlockAndSeekToKey(this.block, nextIndexedKey, false, key, false); + } + } + } + // Don't rewind on a reseek operation, because reseek implies that we are + // always going forward in the file. + return seekTo(key, false); + } + + /** + * An internal API function. Seek to the given key, optionally rewinding to + * the first key of the block before doing the seek. + * + * @param key - a cell representing the key that we need to fetch + * @param rewind whether to rewind to the first key of the block before + * doing the seek. If this is false, we are assuming we never go + * back, otherwise the result is undefined. + * @return -1 if the key is earlier than the first key of the file, + * 0 if we are at the given key, 1 if we are past the given key + * -2 if the key is earlier than the first key of the file while + * using a faked index key + * @throws IOException + */ + public int seekTo(Cell key, boolean rewind) throws IOException { + HFileBlockIndex.BlockIndexReader indexReader = reader.getDataBlockIndexReader(); + BlockWithScanInfo blockWithScanInfo = indexReader.loadDataBlockWithScanInfo(key, block, + cacheBlocks, pread, isCompaction, getEffectiveDataBlockEncoding()); + if (blockWithScanInfo == null || blockWithScanInfo.getHFileBlock() == null) { + // This happens if the key e.g. falls before the beginning of the file. + return -1; + } + return loadBlockAndSeekToKey(blockWithScanInfo.getHFileBlock(), + blockWithScanInfo.getNextIndexedKey(), rewind, key, false); + } + + @Override + public boolean seekBefore(Cell key) throws IOException { + HFileBlock seekToBlock = reader.getDataBlockIndexReader().seekToDataBlock(key, block, + cacheBlocks, pread, isCompaction, reader.getEffectiveEncodingInCache(isCompaction)); + if (seekToBlock == null) { + return false; + } + ByteBuffer firstKey = getFirstKeyInBlock(seekToBlock); + + if (reader.getComparator() + .compareOnlyKeyPortion( + new KeyValue.KeyOnlyKeyValue(firstKey.array(), firstKey.arrayOffset(), + firstKey.limit()), key) >= 0) { + long previousBlockOffset = seekToBlock.getPrevBlockOffset(); + // The key we are interested in + if (previousBlockOffset == -1) { + // we have a 'problem', the key we want is the first of the file. + return false; + } + + // It is important that we compute and pass onDiskSize to the block + // reader so that it does not have to read the header separately to + // figure out the size. + seekToBlock = reader.readBlock(previousBlockOffset, + seekToBlock.getOffset() - previousBlockOffset, cacheBlocks, + pread, isCompaction, true, BlockType.DATA, getEffectiveDataBlockEncoding()); + // TODO shortcut: seek forward in this block to the last key of the + // block. + } + Cell firstKeyInCurrentBlock = new KeyValue.KeyOnlyKeyValue(Bytes.getBytes(firstKey)); + loadBlockAndSeekToKey(seekToBlock, firstKeyInCurrentBlock, true, key, true); + return true; + } + + /** + * Scans blocks in the "scanned" section of the {@link HFile} until the next + * data block is found. + * + * @return the next block, or null if there are no more data blocks + * @throws IOException + */ + protected HFileBlock readNextDataBlock() throws IOException { + long lastDataBlockOffset = reader.getTrailer().getLastDataBlockOffset(); + if (block == null) + return null; + + HFileBlock curBlock = block; + + do { + if (curBlock.getOffset() >= lastDataBlockOffset) + return null; + + if (curBlock.getOffset() < 0) { + throw new IOException("Invalid block file offset: " + block); + } + + // We are reading the next block without block type validation, because + // it might turn out to be a non-data block. + curBlock = reader.readBlock(curBlock.getOffset() + + curBlock.getOnDiskSizeWithHeader(), + curBlock.getNextBlockOnDiskSizeWithHeader(), cacheBlocks, pread, + isCompaction, true, null, getEffectiveDataBlockEncoding()); + } while (!curBlock.getBlockType().isData()); + + return curBlock; + } + + public DataBlockEncoding getEffectiveDataBlockEncoding() { + return this.reader.getEffectiveEncodingInCache(isCompaction); + } + + @Override + public Cell getKeyValue() { + if (!isSeeked()) + return null; + + KeyValue ret = new KeyValue(blockBuffer.array(), blockBuffer.arrayOffset() + + blockBuffer.position(), getCellBufSize()); + if (this.reader.shouldIncludeMemstoreTS()) { + ret.setSequenceId(currMemstoreTS); + } + return ret; + } + + @Override + public ByteBuffer getKey() { + assertSeeked(); + return ByteBuffer.wrap( + blockBuffer.array(), + blockBuffer.arrayOffset() + blockBuffer.position() + + KEY_VALUE_LEN_SIZE, currKeyLen).slice(); + } + + public int compareKey(KVComparator comparator, byte[] key, int offset, int length) { + return comparator.compareFlatKey(key, offset, length, blockBuffer.array(), + blockBuffer.arrayOffset() + blockBuffer.position() + KEY_VALUE_LEN_SIZE, currKeyLen); + } + + @Override + public ByteBuffer getValue() { + assertSeeked(); + return ByteBuffer.wrap( + blockBuffer.array(), + blockBuffer.arrayOffset() + blockBuffer.position() + + KEY_VALUE_LEN_SIZE + currKeyLen, currValueLen).slice(); + } + + protected void setNonSeekedState() { + block = null; + blockBuffer = null; + currKeyLen = 0; + currValueLen = 0; + currMemstoreTS = 0; + currMemstoreTSLen = 0; + currTagsLen = 0; + } + + /** + * Go to the next key/value in the block section. Loads the next block if + * necessary. If successful, {@link #getKey()} and {@link #getValue()} can + * be called. + * + * @return true if successfully navigated to the next key/value + */ + @Override + public boolean next() throws IOException { + assertSeeked(); + + try { + blockBuffer.position(getNextCellStartPosition()); + } catch (IllegalArgumentException e) { + LOG.error("Current pos = " + blockBuffer.position() + + "; currKeyLen = " + currKeyLen + "; currValLen = " + + currValueLen + "; block limit = " + blockBuffer.limit() + + "; HFile name = " + reader.getName() + + "; currBlock currBlockOffset = " + block.getOffset()); + throw e; + } + + if (blockBuffer.remaining() <= 0) { + long lastDataBlockOffset = + reader.getTrailer().getLastDataBlockOffset(); + + if (block.getOffset() >= lastDataBlockOffset) { + setNonSeekedState(); + return false; + } + + // read the next block + HFileBlock nextBlock = readNextDataBlock(); + if (nextBlock == null) { + setNonSeekedState(); + return false; + } + + updateCurrBlock(nextBlock); + return true; + } + + // We are still in the same block. + readKeyValueLen(); + return true; + } + + /** + * Positions this scanner at the start of the file. + * + * @return false if empty file; i.e. a call to next would return false and + * the current key and value are undefined. + * @throws IOException + */ + @Override + public boolean seekTo() throws IOException { + if (reader == null) { + return false; + } + + if (reader.getTrailer().getEntryCount() == 0) { + // No data blocks. + return false; + } + + long firstDataBlockOffset = + reader.getTrailer().getFirstDataBlockOffset(); + if (block != null && block.getOffset() == firstDataBlockOffset) { + blockBuffer.rewind(); + readKeyValueLen(); + return true; + } + + block = reader.readBlock(firstDataBlockOffset, -1, cacheBlocks, pread, + isCompaction, true, BlockType.DATA, getEffectiveDataBlockEncoding()); + if (block.getOffset() < 0) { + throw new IOException("Invalid block offset: " + block.getOffset()); + } + updateCurrBlock(block); + return true; + } + + protected int loadBlockAndSeekToKey(HFileBlock seekToBlock, Cell nextIndexedKey, + boolean rewind, Cell key, boolean seekBefore) throws IOException { + if (block == null || block.getOffset() != seekToBlock.getOffset()) { + updateCurrBlock(seekToBlock); + } else if (rewind) { + blockBuffer.rewind(); + } + + // Update the nextIndexedKey + this.nextIndexedKey = nextIndexedKey; + return blockSeek(key, seekBefore); + } + + /** + * Updates the current block to be the given {@link HFileBlock}. Seeks to + * the the first key/value pair. + * + * @param newBlock the block to make current + */ + protected void updateCurrBlock(HFileBlock newBlock) { + block = newBlock; + + // sanity check + if (block.getBlockType() != BlockType.DATA) { + throw new IllegalStateException("Scanner works only on data " + + "blocks, got " + block.getBlockType() + "; " + + "fileName=" + reader.getName() + ", " + + "dataBlockEncoder=" + reader.getDataBlockEncoding() + ", " + + "isCompaction=" + isCompaction); + } + + blockBuffer = block.getBufferWithoutHeader(); + readKeyValueLen(); + blockFetches++; + + // Reset the next indexed key + this.nextIndexedKey = null; + } + + protected void readMvccVersion() { + if (this.reader.shouldIncludeMemstoreTS()) { + if (this.reader.isDecodeMemstoreTS()) { + currMemstoreTS = Bytes.readAsVLong(blockBuffer.array(), blockBuffer.arrayOffset() + + blockBuffer.position()); + currMemstoreTSLen = WritableUtils.getVIntSize(currMemstoreTS); + } else { + currMemstoreTS = 0; + currMemstoreTSLen = 1; + } + } + } + + protected ByteBuffer getFirstKeyInBlock(HFileBlock curBlock) { + ByteBuffer buffer = curBlock.getBufferWithoutHeader(); + // It is safe to manipulate this buffer because we own the buffer object. + buffer.rewind(); + int klen = buffer.getInt(); + buffer.getInt(); + ByteBuffer keyBuff = buffer.slice(); + keyBuff.limit(klen); + keyBuff.rewind(); + return keyBuff; + } + + @Override + public String getKeyString() { + return Bytes.toStringBinary(blockBuffer.array(), + blockBuffer.arrayOffset() + blockBuffer.position() + + KEY_VALUE_LEN_SIZE, currKeyLen); + } + + @Override + public String getValueString() { + return Bytes.toString(blockBuffer.array(), blockBuffer.arrayOffset() + + blockBuffer.position() + KEY_VALUE_LEN_SIZE + currKeyLen, + currValueLen); + } + + public int compareKey(KVComparator comparator, Cell key) { + return comparator.compareOnlyKeyPortion( + key, + new KeyValue.KeyOnlyKeyValue(blockBuffer.array(), blockBuffer.arrayOffset() + + blockBuffer.position() + KEY_VALUE_LEN_SIZE, currKeyLen)); + } + } + + public Path getPath() { + return path; + } + + @Override + public DataBlockEncoding getDataBlockEncoding() { + return dataBlockEncoder.getDataBlockEncoding(); + } + + @Override + public Configuration getConf() { + return conf; + } + + @Override + public void setConf(Configuration conf) { + this.conf = conf; + } + + /** Minor versions in HFile starting with this number have hbase checksums */ + public static final int MINOR_VERSION_WITH_CHECKSUM = 1; + /** In HFile minor version that does not support checksums */ + public static final int MINOR_VERSION_NO_CHECKSUM = 0; + + /** HFile minor version that introduced pbuf filetrailer */ + public static final int PBUF_TRAILER_MINOR_VERSION = 2; + + /** + * The size of a (key length, value length) tuple that prefixes each entry in + * a data block. + */ + public final static int KEY_VALUE_LEN_SIZE = 2 * Bytes.SIZEOF_INT; + + protected boolean includesMemstoreTS = false; + protected boolean decodeMemstoreTS = false; + + + public boolean isDecodeMemstoreTS() { + return this.decodeMemstoreTS; + } + + public boolean shouldIncludeMemstoreTS() { + return includesMemstoreTS; } /** @@ -279,16 +1054,16 @@ public class HFileReaderV2 extends AbstractHFileReader { // perform this check if cached block is a data block. if (cachedBlock.getBlockType().isData() && !actualDataBlockEncoding.equals(expectedDataBlockEncoding)) { - // This mismatch may happen if a ScannerV2, which is used for say a + // This mismatch may happen if a Scanner, which is used for say a // compaction, tries to read an encoded block from the block cache. - // The reverse might happen when an EncodedScannerV2 tries to read + // The reverse might happen when an EncodedScanner tries to read // un-encoded blocks which were cached earlier. // // Because returning a data block with an implicit BlockType mismatch // will cause the requesting scanner to throw a disk read should be // forced here. This will potentially cause a significant number of // cache misses, so update so we should keep track of this as it might - // justify the work on a CompoundScannerV2. + // justify the work on a CompoundScanner. if (!expectedDataBlockEncoding.equals(DataBlockEncoding.NONE) && !actualDataBlockEncoding.equals(DataBlockEncoding.NONE)) { // If the block is encoded but the encoding does not match the @@ -310,6 +1085,7 @@ public class HFileReaderV2 extends AbstractHFileReader { } return null; } + /** * @param metaBlockName * @param cacheBlock Add block to cache, if found @@ -380,15 +1156,17 @@ public class HFileReaderV2 extends AbstractHFileReader { throw new IOException("Requested block is out of range: " + dataBlockOffset + ", lastDataBlockOffset: " + trailer.getLastDataBlockOffset()); } - - // For any given block from any given file, synchronize reads for said block. + // For any given block from any given file, synchronize reads for said + // block. // Without a cache, this synchronizing is needless overhead, but really // the other choice is to duplicate work (which the cache would prevent you // from doing). + BlockCacheKey cacheKey = new BlockCacheKey(name, dataBlockOffset); + boolean useLock = false; IdLock.Entry lockEntry = null; - TraceScope traceScope = Trace.startSpan("HFileReaderV2.readBlock"); + TraceScope traceScope = Trace.startSpan("HFileReaderImpl.readBlock"); try { while (true) { if (useLock) { @@ -402,6 +1180,9 @@ public class HFileReaderV2 extends AbstractHFileReader { HFileBlock cachedBlock = getCachedBlock(cacheKey, cacheBlock, useLock, isCompaction, updateCacheMetrics, expectedBlockType, expectedDataBlockEncoding); if (cachedBlock != null) { + if (Trace.isTracing()) { + traceScope.getSpan().addTimelineAnnotation("blockCacheHit"); + } assert cachedBlock.isUnpacked() : "Packed block leak."; if (cachedBlock.getBlockType().isData()) { if (updateCacheMetrics) { @@ -529,565 +1310,23 @@ public class HFileReaderV2 extends AbstractHFileReader { } /** For testing */ - @Override - HFileBlock.FSReader getUncachedBlockReader() { + public HFileBlock.FSReader getUncachedBlockReader() { return fsBlockReader; } - - protected abstract static class AbstractScannerV2 - extends AbstractHFileReader.Scanner { - protected HFileBlock block; - - /** - * The next indexed key is to keep track of the indexed key of the next data block. - * If the nextIndexedKey is HConstants.NO_NEXT_INDEXED_KEY, it means that the - * current data block is the last data block. - * - * If the nextIndexedKey is null, it means the nextIndexedKey has not been loaded yet. - */ - protected byte[] nextIndexedKey; - - public AbstractScannerV2(HFileReaderV2 r, boolean cacheBlocks, - final boolean pread, final boolean isCompaction) { - super(r, cacheBlocks, pread, isCompaction); - } - - protected abstract ByteBuffer getFirstKeyInBlock(HFileBlock curBlock); - - protected abstract int loadBlockAndSeekToKey(HFileBlock seekToBlock, byte[] nextIndexedKey, - boolean rewind, Cell key, boolean seekBefore) throws IOException; - - @Override - public int seekTo(byte[] key, int offset, int length) throws IOException { - // Always rewind to the first key of the block, because the given key - // might be before or after the current key. - return seekTo(new KeyValue.KeyOnlyKeyValue(key, offset, length)); - } - - @Override - public int reseekTo(byte[] key, int offset, int length) throws IOException { - return reseekTo(new KeyValue.KeyOnlyKeyValue(key, offset, length)); - } - - @Override - public int seekTo(Cell key) throws IOException { - return seekTo(key, true); - } - - @Override - public int reseekTo(Cell key) throws IOException { - int compared; - if (isSeeked()) { - compared = compareKey(reader.getComparator(), key); - if (compared < 1) { - // If the required key is less than or equal to current key, then - // don't do anything. - return compared; - } else { - // The comparison with no_next_index_key has to be checked - if (this.nextIndexedKey != null && - (this.nextIndexedKey == HConstants.NO_NEXT_INDEXED_KEY || reader - .getComparator() - .compareOnlyKeyPortion(key, - new KeyValue.KeyOnlyKeyValue(nextIndexedKey, 0, - nextIndexedKey.length)) < 0)) { - // The reader shall continue to scan the current data block instead - // of querying the - // block index as long as it knows the target key is strictly - // smaller than - // the next indexed key or the current data block is the last data - // block. - return loadBlockAndSeekToKey(this.block, nextIndexedKey, false, key, false); - } - } - } - // Don't rewind on a reseek operation, because reseek implies that we are - // always going forward in the file. - return seekTo(key, false); - } - - - /** - * An internal API function. Seek to the given key, optionally rewinding to - * the first key of the block before doing the seek. - * - * @param key - a cell representing the key that we need to fetch - * @param rewind whether to rewind to the first key of the block before - * doing the seek. If this is false, we are assuming we never go - * back, otherwise the result is undefined. - * @return -1 if the key is earlier than the first key of the file, - * 0 if we are at the given key, 1 if we are past the given key - * -2 if the key is earlier than the first key of the file while - * using a faked index key - * @throws IOException - */ - public int seekTo(Cell key, boolean rewind) throws IOException { - HFileBlockIndex.BlockIndexReader indexReader = reader.getDataBlockIndexReader(); - BlockWithScanInfo blockWithScanInfo = indexReader.loadDataBlockWithScanInfo(key, block, - cacheBlocks, pread, isCompaction, getEffectiveDataBlockEncoding()); - if (blockWithScanInfo == null || blockWithScanInfo.getHFileBlock() == null) { - // This happens if the key e.g. falls before the beginning of the file. - return -1; - } - return loadBlockAndSeekToKey(blockWithScanInfo.getHFileBlock(), - blockWithScanInfo.getNextIndexedKey(), rewind, key, false); - } - - @Override - public boolean seekBefore(byte[] key, int offset, int length) throws IOException { - return seekBefore(new KeyValue.KeyOnlyKeyValue(key, offset, length)); - } - - @Override - public boolean seekBefore(Cell key) throws IOException { - HFileBlock seekToBlock = reader.getDataBlockIndexReader().seekToDataBlock(key, block, - cacheBlocks, pread, isCompaction, - ((HFileReaderV2) reader).getEffectiveEncodingInCache(isCompaction)); - if (seekToBlock == null) { - return false; - } - ByteBuffer firstKey = getFirstKeyInBlock(seekToBlock); - - if (reader.getComparator() - .compareOnlyKeyPortion( - new KeyValue.KeyOnlyKeyValue(firstKey.array(), firstKey.arrayOffset(), - firstKey.limit()), key) >= 0) { - long previousBlockOffset = seekToBlock.getPrevBlockOffset(); - // The key we are interested in - if (previousBlockOffset == -1) { - // we have a 'problem', the key we want is the first of the file. - return false; - } - - // It is important that we compute and pass onDiskSize to the block - // reader so that it does not have to read the header separately to - // figure out the size. - seekToBlock = reader.readBlock(previousBlockOffset, - seekToBlock.getOffset() - previousBlockOffset, cacheBlocks, - pread, isCompaction, true, BlockType.DATA, getEffectiveDataBlockEncoding()); - // TODO shortcut: seek forward in this block to the last key of the - // block. - } - byte[] firstKeyInCurrentBlock = Bytes.getBytes(firstKey); - loadBlockAndSeekToKey(seekToBlock, firstKeyInCurrentBlock, true, key, true); - return true; - } - - /** - * Scans blocks in the "scanned" section of the {@link HFile} until the next - * data block is found. - * - * @return the next block, or null if there are no more data blocks - * @throws IOException - */ - protected HFileBlock readNextDataBlock() throws IOException { - long lastDataBlockOffset = reader.getTrailer().getLastDataBlockOffset(); - if (block == null) - return null; - - HFileBlock curBlock = block; - - do { - if (curBlock.getOffset() >= lastDataBlockOffset) - return null; - - if (curBlock.getOffset() < 0) { - throw new IOException("Invalid block file offset: " + block); - } - - // We are reading the next block without block type validation, because - // it might turn out to be a non-data block. - curBlock = reader.readBlock(curBlock.getOffset() - + curBlock.getOnDiskSizeWithHeader(), - curBlock.getNextBlockOnDiskSizeWithHeader(), cacheBlocks, pread, - isCompaction, true, null, getEffectiveDataBlockEncoding()); - } while (!curBlock.getBlockType().isData()); - - return curBlock; - } - - public DataBlockEncoding getEffectiveDataBlockEncoding() { - return ((HFileReaderV2)reader).getEffectiveEncodingInCache(isCompaction); - } - /** - * Compare the given key against the current key - * @param comparator - * @param key - * @param offset - * @param length - * @return -1 is the passed key is smaller than the current key, 0 if equal and 1 if greater - */ - public abstract int compareKey(KVComparator comparator, byte[] key, int offset, - int length); - - public abstract int compareKey(KVComparator comparator, Cell kv); - } - /** - * Implementation of {@link HFileScanner} interface. + * Scanner that operates on encoded data blocks. */ - protected static class ScannerV2 extends AbstractScannerV2 { - private HFileReaderV2 reader; - - public ScannerV2(HFileReaderV2 r, boolean cacheBlocks, - final boolean pread, final boolean isCompaction) { - super(r, cacheBlocks, pread, isCompaction); - this.reader = r; - } - - @Override - public Cell getKeyValue() { - if (!isSeeked()) - return null; - - KeyValue ret = new KeyValue(blockBuffer.array(), blockBuffer.arrayOffset() - + blockBuffer.position(), getCellBufSize()); - if (this.reader.shouldIncludeMemstoreTS()) { - ret.setSequenceId(currMemstoreTS); - } - return ret; - } - - protected int getCellBufSize() { - return KEY_VALUE_LEN_SIZE + currKeyLen + currValueLen; - } - - @Override - public ByteBuffer getKey() { - assertSeeked(); - return ByteBuffer.wrap( - blockBuffer.array(), - blockBuffer.arrayOffset() + blockBuffer.position() - + KEY_VALUE_LEN_SIZE, currKeyLen).slice(); - } - - @Override - public int compareKey(KVComparator comparator, byte[] key, int offset, int length) { - return comparator.compareFlatKey(key, offset, length, blockBuffer.array(), - blockBuffer.arrayOffset() + blockBuffer.position() + KEY_VALUE_LEN_SIZE, currKeyLen); - } - - @Override - public ByteBuffer getValue() { - assertSeeked(); - return ByteBuffer.wrap( - blockBuffer.array(), - blockBuffer.arrayOffset() + blockBuffer.position() - + KEY_VALUE_LEN_SIZE + currKeyLen, currValueLen).slice(); - } - - protected void setNonSeekedState() { - block = null; - blockBuffer = null; - currKeyLen = 0; - currValueLen = 0; - currMemstoreTS = 0; - currMemstoreTSLen = 0; - } - - /** - * Go to the next key/value in the block section. Loads the next block if - * necessary. If successful, {@link #getKey()} and {@link #getValue()} can - * be called. - * - * @return true if successfully navigated to the next key/value - */ - @Override - public boolean next() throws IOException { - assertSeeked(); - - try { - blockBuffer.position(getNextCellStartPosition()); - } catch (IllegalArgumentException e) { - LOG.error("Current pos = " + blockBuffer.position() - + "; currKeyLen = " + currKeyLen + "; currValLen = " - + currValueLen + "; block limit = " + blockBuffer.limit() - + "; HFile name = " + reader.getName() - + "; currBlock currBlockOffset = " + block.getOffset()); - throw e; - } - - if (blockBuffer.remaining() <= 0) { - long lastDataBlockOffset = - reader.getTrailer().getLastDataBlockOffset(); - - if (block.getOffset() >= lastDataBlockOffset) { - setNonSeekedState(); - return false; - } - - // read the next block - HFileBlock nextBlock = readNextDataBlock(); - if (nextBlock == null) { - setNonSeekedState(); - return false; - } - - updateCurrBlock(nextBlock); - return true; - } - - // We are still in the same block. - readKeyValueLen(); - return true; - } - - protected int getNextCellStartPosition() { - return blockBuffer.position() + KEY_VALUE_LEN_SIZE + currKeyLen + currValueLen - + currMemstoreTSLen; - } - - /** - * Positions this scanner at the start of the file. - * - * @return false if empty file; i.e. a call to next would return false and - * the current key and value are undefined. - * @throws IOException - */ - @Override - public boolean seekTo() throws IOException { - if (reader == null) { - return false; - } - - if (reader.getTrailer().getEntryCount() == 0) { - // No data blocks. - return false; - } - - long firstDataBlockOffset = - reader.getTrailer().getFirstDataBlockOffset(); - if (block != null && block.getOffset() == firstDataBlockOffset) { - blockBuffer.rewind(); - readKeyValueLen(); - return true; - } - - block = reader.readBlock(firstDataBlockOffset, -1, cacheBlocks, pread, - isCompaction, true, BlockType.DATA, getEffectiveDataBlockEncoding()); - if (block.getOffset() < 0) { - throw new IOException("Invalid block offset: " + block.getOffset()); - } - updateCurrBlock(block); - return true; - } - - @Override - protected int loadBlockAndSeekToKey(HFileBlock seekToBlock, byte[] nextIndexedKey, - boolean rewind, Cell key, boolean seekBefore) throws IOException { - if (block == null || block.getOffset() != seekToBlock.getOffset()) { - updateCurrBlock(seekToBlock); - } else if (rewind) { - blockBuffer.rewind(); - } - - // Update the nextIndexedKey - this.nextIndexedKey = nextIndexedKey; - return blockSeek(key, seekBefore); - } - - /** - * Updates the current block to be the given {@link HFileBlock}. Seeks to - * the the first key/value pair. - * - * @param newBlock the block to make current - */ - protected void updateCurrBlock(HFileBlock newBlock) { - block = newBlock; - - // sanity check - if (block.getBlockType() != BlockType.DATA) { - throw new IllegalStateException("ScannerV2 works only on data " + - "blocks, got " + block.getBlockType() + "; " + - "fileName=" + reader.name + ", " + - "dataBlockEncoder=" + reader.dataBlockEncoder + ", " + - "isCompaction=" + isCompaction); - } - - blockBuffer = block.getBufferWithoutHeader(); - readKeyValueLen(); - blockFetches++; - - // Reset the next indexed key - this.nextIndexedKey = null; - } - - protected void readKeyValueLen() { - blockBuffer.mark(); - currKeyLen = blockBuffer.getInt(); - currValueLen = blockBuffer.getInt(); - ByteBufferUtils.skip(blockBuffer, currKeyLen + currValueLen); - readMvccVersion(); - if (currKeyLen < 0 || currValueLen < 0 - || currKeyLen > blockBuffer.limit() - || currValueLen > blockBuffer.limit()) { - throw new IllegalStateException("Invalid currKeyLen " + currKeyLen - + " or currValueLen " + currValueLen + ". Block offset: " - + block.getOffset() + ", block length: " + blockBuffer.limit() - + ", position: " + blockBuffer.position() + " (without header)."); - } - blockBuffer.reset(); - } - - protected void readMvccVersion() { - if (this.reader.shouldIncludeMemstoreTS()) { - if (this.reader.decodeMemstoreTS) { - try { - currMemstoreTS = Bytes.readVLong(blockBuffer.array(), blockBuffer.arrayOffset() - + blockBuffer.position()); - currMemstoreTSLen = WritableUtils.getVIntSize(currMemstoreTS); - } catch (Exception e) { - throw new RuntimeException("Error reading memstore timestamp", e); - } - } else { - currMemstoreTS = 0; - currMemstoreTSLen = 1; - } - } - } - - /** - * Within a loaded block, seek looking for the last key that is smaller than - * (or equal to?) the key we are interested in. - * - * A note on the seekBefore: if you have seekBefore = true, AND the first - * key in the block = key, then you'll get thrown exceptions. The caller has - * to check for that case and load the previous block as appropriate. - * - * @param key - * the key to find - * @param seekBefore - * find the key before the given key in case of exact match. - * @return 0 in case of an exact key match, 1 in case of an inexact match, - * -2 in case of an inexact match and furthermore, the input key - * less than the first key of current block(e.g. using a faked index - * key) - */ - protected int blockSeek(Cell key, boolean seekBefore) { - int klen, vlen; - long memstoreTS = 0; - int memstoreTSLen = 0; - int lastKeyValueSize = -1; - KeyValue.KeyOnlyKeyValue keyOnlykv = new KeyValue.KeyOnlyKeyValue(); - do { - blockBuffer.mark(); - klen = blockBuffer.getInt(); - vlen = blockBuffer.getInt(); - blockBuffer.reset(); - if (this.reader.shouldIncludeMemstoreTS()) { - if (this.reader.decodeMemstoreTS) { - try { - int memstoreTSOffset = blockBuffer.arrayOffset() + blockBuffer.position() - + KEY_VALUE_LEN_SIZE + klen + vlen; - memstoreTS = Bytes.readVLong(blockBuffer.array(), memstoreTSOffset); - memstoreTSLen = WritableUtils.getVIntSize(memstoreTS); - } catch (Exception e) { - throw new RuntimeException("Error reading memstore timestamp", e); - } - } else { - memstoreTS = 0; - memstoreTSLen = 1; - } - } - - int keyOffset = blockBuffer.arrayOffset() + blockBuffer.position() + KEY_VALUE_LEN_SIZE; - keyOnlykv.setKey(blockBuffer.array(), keyOffset, klen); - int comp = reader.getComparator().compareOnlyKeyPortion(key, keyOnlykv); - - if (comp == 0) { - if (seekBefore) { - if (lastKeyValueSize < 0) { - throw new IllegalStateException("blockSeek with seekBefore " - + "at the first key of the block: key=" - + CellUtil.getCellKeyAsString(key) - + ", blockOffset=" + block.getOffset() + ", onDiskSize=" - + block.getOnDiskSizeWithHeader()); - } - blockBuffer.position(blockBuffer.position() - lastKeyValueSize); - readKeyValueLen(); - return 1; // non exact match. - } - currKeyLen = klen; - currValueLen = vlen; - if (this.reader.shouldIncludeMemstoreTS()) { - currMemstoreTS = memstoreTS; - currMemstoreTSLen = memstoreTSLen; - } - return 0; // indicate exact match - } else if (comp < 0) { - if (lastKeyValueSize > 0) - blockBuffer.position(blockBuffer.position() - lastKeyValueSize); - readKeyValueLen(); - if (lastKeyValueSize == -1 && blockBuffer.position() == 0 - && this.reader.trailer.getMinorVersion() >= MINOR_VERSION_WITH_FAKED_KEY) { - return HConstants.INDEX_KEY_MAGIC; - } - return 1; - } - - // The size of this key/value tuple, including key/value length fields. - lastKeyValueSize = klen + vlen + memstoreTSLen + KEY_VALUE_LEN_SIZE; - blockBuffer.position(blockBuffer.position() + lastKeyValueSize); - } while (blockBuffer.remaining() > 0); - - // Seek to the last key we successfully read. This will happen if this is - // the last key/value pair in the file, in which case the following call - // to next() has to return false. - blockBuffer.position(blockBuffer.position() - lastKeyValueSize); - readKeyValueLen(); - return 1; // didn't exactly find it. - } - - @Override - protected ByteBuffer getFirstKeyInBlock(HFileBlock curBlock) { - ByteBuffer buffer = curBlock.getBufferWithoutHeader(); - // It is safe to manipulate this buffer because we own the buffer object. - buffer.rewind(); - int klen = buffer.getInt(); - buffer.getInt(); - ByteBuffer keyBuff = buffer.slice(); - keyBuff.limit(klen); - keyBuff.rewind(); - return keyBuff; - } - - @Override - public String getKeyString() { - return Bytes.toStringBinary(blockBuffer.array(), - blockBuffer.arrayOffset() + blockBuffer.position() - + KEY_VALUE_LEN_SIZE, currKeyLen); - } - - @Override - public String getValueString() { - return Bytes.toString(blockBuffer.array(), blockBuffer.arrayOffset() - + blockBuffer.position() + KEY_VALUE_LEN_SIZE + currKeyLen, - currValueLen); - } - - @Override - public int compareKey(KVComparator comparator, Cell key) { - return comparator.compareOnlyKeyPortion( - key, - new KeyValue.KeyOnlyKeyValue(blockBuffer.array(), blockBuffer.arrayOffset() - + blockBuffer.position() + KEY_VALUE_LEN_SIZE, currKeyLen)); - } - } - - /** - * ScannerV2 that operates on encoded data blocks. - */ - protected static class EncodedScannerV2 extends AbstractScannerV2 { + protected static class EncodedScanner extends HFileScannerImpl { private final HFileBlockDecodingContext decodingCtx; private final DataBlockEncoder.EncodedSeeker seeker; private final DataBlockEncoder dataBlockEncoder; protected final HFileContext meta; - public EncodedScannerV2(HFileReaderV2 reader, boolean cacheBlocks, + public EncodedScanner(HFile.Reader reader, boolean cacheBlocks, boolean pread, boolean isCompaction, HFileContext meta) { super(reader, cacheBlocks, pread, isCompaction); - DataBlockEncoding encoding = reader.dataBlockEncoder.getDataBlockEncoding(); + DataBlockEncoding encoding = reader.getDataBlockEncoding(); dataBlockEncoder = encoding.getEncoder(); decodingCtx = dataBlockEncoder.newDataBlockDecodingContext(meta); seeker = dataBlockEncoder.createSeeker( @@ -1186,7 +1425,6 @@ public class HFileReaderV2 extends AbstractHFileReader { return seeker.getKeyDeepCopy(); } - @Override public int compareKey(KVComparator comparator, byte[] key, int offset, int length) { return seeker.compareKey(comparator, key, offset, length); } @@ -1225,13 +1463,11 @@ public class HFileReaderV2 extends AbstractHFileReader { } } - @Override protected ByteBuffer getFirstKeyInBlock(HFileBlock curBlock) { return dataBlockEncoder.getFirstKeyInBlock(getEncodedBuffer(curBlock)); } - @Override - protected int loadBlockAndSeekToKey(HFileBlock seekToBlock, byte[] nextIndexedKey, + protected int loadBlockAndSeekToKey(HFileBlock seekToBlock, Cell nextIndexedKey, boolean rewind, Cell key, boolean seekBefore) throws IOException { if (block == null || block.getOffset() != seekToBlock.getOffset()) { updateCurrentBlock(seekToBlock); @@ -1242,7 +1478,6 @@ public class HFileReaderV2 extends AbstractHFileReader { return seeker.seekToKeyInBlock(key, seekBefore); } - @Override public int compareKey(KVComparator comparator, Cell key) { return seeker.compareKey(comparator, key); } @@ -1276,7 +1511,6 @@ public class HFileReaderV2 extends AbstractHFileReader { return null; } - @Override public boolean isFileInfoLoaded() { return true; // We load file info in constructor in version 2. } @@ -1297,11 +1531,6 @@ public class HFileReaderV2 extends AbstractHFileReader { } } - @Override - public int getMajorVersion() { - return 2; - } - @Override public HFileContext getFileContext() { return hfileContext; @@ -1312,7 +1541,108 @@ public class HFileReaderV2 extends AbstractHFileReader { * not completed, true otherwise */ @VisibleForTesting - boolean prefetchComplete() { + public boolean prefetchComplete() { return PrefetchExecutor.isCompleted(path); } + + protected HFileContext createHFileContext(FSDataInputStreamWrapper fsdis, long fileSize, + HFileSystem hfs, Path path, FixedFileTrailer trailer) throws IOException { + HFileContextBuilder builder = new HFileContextBuilder() + .withIncludesMvcc(this.includesMemstoreTS) + .withHBaseCheckSum(true) + .withCompression(this.compressAlgo); + + // Check for any key material available + byte[] keyBytes = trailer.getEncryptionKey(); + if (keyBytes != null) { + Encryption.Context cryptoContext = Encryption.newContext(conf); + Key key; + String masterKeyName = conf.get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, + User.getCurrent().getShortName()); + try { + // First try the master key + key = EncryptionUtil.unwrapKey(conf, masterKeyName, keyBytes); + } catch (KeyException e) { + // If the current master key fails to unwrap, try the alternate, if + // one is configured + if (LOG.isDebugEnabled()) { + LOG.debug("Unable to unwrap key with current master key '" + masterKeyName + "'"); + } + String alternateKeyName = + conf.get(HConstants.CRYPTO_MASTERKEY_ALTERNATE_NAME_CONF_KEY); + if (alternateKeyName != null) { + try { + key = EncryptionUtil.unwrapKey(conf, alternateKeyName, keyBytes); + } catch (KeyException ex) { + throw new IOException(ex); + } + } else { + throw new IOException(e); + } + } + // Use the algorithm the key wants + Cipher cipher = Encryption.getCipher(conf, key.getAlgorithm()); + if (cipher == null) { + throw new IOException("Cipher '" + key.getAlgorithm() + "' is not available"); + } + cryptoContext.setCipher(cipher); + cryptoContext.setKey(key); + builder.withEncryptionContext(cryptoContext); + } + + HFileContext context = builder.build(); + + if (LOG.isTraceEnabled()) { + LOG.trace("Reader" + (path != null ? " for " + path : "" ) + + " initialized with cacheConf: " + cacheConf + + " comparator: " + comparator.getClass().getSimpleName() + + " fileContext: " + context); + } + + return context; + } + + /** + * Create a Scanner on this file. No seeks or reads are done on creation. Call + * {@link HFileScanner#seekTo(Cell)} to position an start the read. There is + * nothing to clean up in a Scanner. Letting go of your references to the + * scanner is sufficient. NOTE: Do not use this overload of getScanner for + * compactions. See {@link #getScanner(boolean, boolean, boolean)} + * + * @param cacheBlocks True if we should cache blocks read in by this scanner. + * @param pread Use positional read rather than seek+read if true (pread is + * better for random reads, seek+read is better scanning). + * @return Scanner on this file. + */ + @Override + public HFileScanner getScanner(boolean cacheBlocks, final boolean pread) { + return getScanner(cacheBlocks, pread, false); + } + + /** + * Create a Scanner on this file. No seeks or reads are done on creation. Call + * {@link HFileScanner#seekTo(Cell)} to position an start the read. There is + * nothing to clean up in a Scanner. Letting go of your references to the + * scanner is sufficient. + * @param cacheBlocks + * True if we should cache blocks read in by this scanner. + * @param pread + * Use positional read rather than seek+read if true (pread is better + * for random reads, seek+read is better scanning). + * @param isCompaction + * is scanner being used for a compaction? + * @return Scanner on this file. + */ + @Override + public HFileScanner getScanner(boolean cacheBlocks, final boolean pread, + final boolean isCompaction) { + if (dataBlockEncoder.useEncodedScanner()) { + return new EncodedScanner(this, cacheBlocks, pread, isCompaction, this.hfileContext); + } + return new HFileScannerImpl(this, cacheBlocks, pread, isCompaction); + } + + public int getMajorVersion() { + return 3; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV3.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV3.java deleted file mode 100644 index b28d8c1a875..00000000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV3.java +++ /dev/null @@ -1,358 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.io.hfile; - -import java.io.IOException; -import java.security.Key; -import java.security.KeyException; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.fs.HFileSystem; -import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; -import org.apache.hadoop.hbase.io.crypto.Cipher; -import org.apache.hadoop.hbase.io.crypto.Encryption; -import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo; -import org.apache.hadoop.hbase.security.EncryptionUtil; -import org.apache.hadoop.hbase.security.User; -import org.apache.hadoop.hbase.util.ByteBufferUtils; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.io.WritableUtils; - -/** - * {@link HFile} reader for version 3. - */ -@InterfaceAudience.Private -public class HFileReaderV3 extends HFileReaderV2 { - - private static final Log LOG = LogFactory.getLog(HFileReaderV3.class); - - public static final int MAX_MINOR_VERSION = 0; - - /** - * Opens a HFile. You must load the index before you can use it by calling - * {@link #loadFileInfo()}. - * @param path - * Path to HFile. - * @param trailer - * File trailer. - * @param fsdis - * input stream. - * @param size - * Length of the stream. - * @param cacheConf - * Cache configuration. - * @param hfs - * The file system. - * @param conf - * Configuration - */ - public HFileReaderV3(final Path path, FixedFileTrailer trailer, final FSDataInputStreamWrapper fsdis, - final long size, final CacheConfig cacheConf, final HFileSystem hfs, - final Configuration conf) throws IOException { - super(path, trailer, fsdis, size, cacheConf, hfs, conf); - byte[] tmp = fileInfo.get(FileInfo.MAX_TAGS_LEN); - // max tag length is not present in the HFile means tags were not at all written to file. - if (tmp != null) { - hfileContext.setIncludesTags(true); - tmp = fileInfo.get(FileInfo.TAGS_COMPRESSED); - if (tmp != null && Bytes.toBoolean(tmp)) { - hfileContext.setCompressTags(true); - } - } - } - - @Override - protected HFileContext createHFileContext(FSDataInputStreamWrapper fsdis, long fileSize, - HFileSystem hfs, Path path, FixedFileTrailer trailer) throws IOException { - trailer.expectMajorVersion(3); - HFileContextBuilder builder = new HFileContextBuilder() - .withIncludesMvcc(this.includesMemstoreTS) - .withHBaseCheckSum(true) - .withCompression(this.compressAlgo); - - // Check for any key material available - byte[] keyBytes = trailer.getEncryptionKey(); - if (keyBytes != null) { - Encryption.Context cryptoContext = Encryption.newContext(conf); - Key key; - String masterKeyName = conf.get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, - User.getCurrent().getShortName()); - try { - // First try the master key - key = EncryptionUtil.unwrapKey(conf, masterKeyName, keyBytes); - } catch (KeyException e) { - // If the current master key fails to unwrap, try the alternate, if - // one is configured - if (LOG.isDebugEnabled()) { - LOG.debug("Unable to unwrap key with current master key '" + masterKeyName + "'"); - } - String alternateKeyName = - conf.get(HConstants.CRYPTO_MASTERKEY_ALTERNATE_NAME_CONF_KEY); - if (alternateKeyName != null) { - try { - key = EncryptionUtil.unwrapKey(conf, alternateKeyName, keyBytes); - } catch (KeyException ex) { - throw new IOException(ex); - } - } else { - throw new IOException(e); - } - } - // Use the algorithm the key wants - Cipher cipher = Encryption.getCipher(conf, key.getAlgorithm()); - if (cipher == null) { - throw new IOException("Cipher '" + key.getAlgorithm() + "' is not available"); - } - cryptoContext.setCipher(cipher); - cryptoContext.setKey(key); - builder.withEncryptionContext(cryptoContext); - } - - HFileContext context = builder.build(); - - if (LOG.isTraceEnabled()) { - LOG.trace("Reader" + (path != null ? " for " + path : "" ) + - " initialized with cacheConf: " + cacheConf + - " comparator: " + comparator.getClass().getSimpleName() + - " fileContext: " + context); - } - - return context; - } - - /** - * Create a Scanner on this file. No seeks or reads are done on creation. Call - * {@link HFileScanner#seekTo(byte[])} to position an start the read. There is - * nothing to clean up in a Scanner. Letting go of your references to the - * scanner is sufficient. - * @param cacheBlocks - * True if we should cache blocks read in by this scanner. - * @param pread - * Use positional read rather than seek+read if true (pread is better - * for random reads, seek+read is better scanning). - * @param isCompaction - * is scanner being used for a compaction? - * @return Scanner on this file. - */ - @Override - public HFileScanner getScanner(boolean cacheBlocks, final boolean pread, - final boolean isCompaction) { - if (dataBlockEncoder.useEncodedScanner()) { - return new EncodedScannerV3(this, cacheBlocks, pread, isCompaction, this.hfileContext); - } - return new ScannerV3(this, cacheBlocks, pread, isCompaction); - } - - /** - * Implementation of {@link HFileScanner} interface. - */ - protected static class ScannerV3 extends ScannerV2 { - - private HFileReaderV3 reader; - private int currTagsLen; - - public ScannerV3(HFileReaderV3 r, boolean cacheBlocks, final boolean pread, - final boolean isCompaction) { - super(r, cacheBlocks, pread, isCompaction); - this.reader = r; - } - - @Override - protected int getCellBufSize() { - int kvBufSize = super.getCellBufSize(); - if (reader.hfileContext.isIncludesTags()) { - kvBufSize += Bytes.SIZEOF_SHORT + currTagsLen; - } - return kvBufSize; - } - - protected void setNonSeekedState() { - super.setNonSeekedState(); - currTagsLen = 0; - } - - @Override - protected int getNextCellStartPosition() { - int nextKvPos = super.getNextCellStartPosition(); - if (reader.hfileContext.isIncludesTags()) { - nextKvPos += Bytes.SIZEOF_SHORT + currTagsLen; - } - return nextKvPos; - } - - protected void readKeyValueLen() { - blockBuffer.mark(); - currKeyLen = blockBuffer.getInt(); - currValueLen = blockBuffer.getInt(); - if (currKeyLen < 0 || currValueLen < 0 || currKeyLen > blockBuffer.limit() - || currValueLen > blockBuffer.limit()) { - throw new IllegalStateException("Invalid currKeyLen " + currKeyLen + " or currValueLen " - + currValueLen + ". Block offset: " - + block.getOffset() + ", block length: " + blockBuffer.limit() + ", position: " - + blockBuffer.position() + " (without header)."); - } - ByteBufferUtils.skip(blockBuffer, currKeyLen + currValueLen); - if (reader.hfileContext.isIncludesTags()) { - // Read short as unsigned, high byte first - currTagsLen = ((blockBuffer.get() & 0xff) << 8) ^ (blockBuffer.get() & 0xff); - if (currTagsLen < 0 || currTagsLen > blockBuffer.limit()) { - throw new IllegalStateException("Invalid currTagsLen " + currTagsLen + ". Block offset: " - + block.getOffset() + ", block length: " + blockBuffer.limit() + ", position: " - + blockBuffer.position() + " (without header)."); - } - ByteBufferUtils.skip(blockBuffer, currTagsLen); - } - readMvccVersion(); - blockBuffer.reset(); - } - - /** - * Within a loaded block, seek looking for the last key that is smaller than - * (or equal to?) the key we are interested in. - * A note on the seekBefore: if you have seekBefore = true, AND the first - * key in the block = key, then you'll get thrown exceptions. The caller has - * to check for that case and load the previous block as appropriate. - * @param key - * the key to find - * @param seekBefore - * find the key before the given key in case of exact match. - * @return 0 in case of an exact key match, 1 in case of an inexact match, - * -2 in case of an inexact match and furthermore, the input key - * less than the first key of current block(e.g. using a faked index - * key) - */ - @Override - protected int blockSeek(Cell key, boolean seekBefore) { - int klen, vlen, tlen = 0; - long memstoreTS = 0; - int memstoreTSLen = 0; - int lastKeyValueSize = -1; - KeyValue.KeyOnlyKeyValue keyOnlyKv = new KeyValue.KeyOnlyKeyValue(); - do { - blockBuffer.mark(); - klen = blockBuffer.getInt(); - vlen = blockBuffer.getInt(); - if (klen < 0 || vlen < 0 || klen > blockBuffer.limit() - || vlen > blockBuffer.limit()) { - throw new IllegalStateException("Invalid klen " + klen + " or vlen " - + vlen + ". Block offset: " - + block.getOffset() + ", block length: " + blockBuffer.limit() + ", position: " - + blockBuffer.position() + " (without header)."); - } - ByteBufferUtils.skip(blockBuffer, klen + vlen); - if (reader.hfileContext.isIncludesTags()) { - // Read short as unsigned, high byte first - tlen = ((blockBuffer.get() & 0xff) << 8) ^ (blockBuffer.get() & 0xff); - if (tlen < 0 || tlen > blockBuffer.limit()) { - throw new IllegalStateException("Invalid tlen " + tlen + ". Block offset: " - + block.getOffset() + ", block length: " + blockBuffer.limit() + ", position: " - + blockBuffer.position() + " (without header)."); - } - ByteBufferUtils.skip(blockBuffer, tlen); - } - if (this.reader.shouldIncludeMemstoreTS()) { - if (this.reader.decodeMemstoreTS) { - try { - memstoreTS = Bytes.readVLong(blockBuffer.array(), blockBuffer.arrayOffset() - + blockBuffer.position()); - memstoreTSLen = WritableUtils.getVIntSize(memstoreTS); - } catch (Exception e) { - throw new RuntimeException("Error reading memstore timestamp", e); - } - } else { - memstoreTS = 0; - memstoreTSLen = 1; - } - } - blockBuffer.reset(); - int keyOffset = blockBuffer.arrayOffset() + blockBuffer.position() + (Bytes.SIZEOF_INT * 2); - keyOnlyKv.setKey(blockBuffer.array(), keyOffset, klen); - int comp = reader.getComparator().compareOnlyKeyPortion(key, keyOnlyKv); - - if (comp == 0) { - if (seekBefore) { - if (lastKeyValueSize < 0) { - throw new IllegalStateException("blockSeek with seekBefore " - + "at the first key of the block: key=" - + CellUtil.getCellKeyAsString(key) - + ", blockOffset=" + block.getOffset() + ", onDiskSize=" - + block.getOnDiskSizeWithHeader()); - } - blockBuffer.position(blockBuffer.position() - lastKeyValueSize); - readKeyValueLen(); - return 1; // non exact match. - } - currKeyLen = klen; - currValueLen = vlen; - currTagsLen = tlen; - if (this.reader.shouldIncludeMemstoreTS()) { - currMemstoreTS = memstoreTS; - currMemstoreTSLen = memstoreTSLen; - } - return 0; // indicate exact match - } else if (comp < 0) { - if (lastKeyValueSize > 0) - blockBuffer.position(blockBuffer.position() - lastKeyValueSize); - readKeyValueLen(); - if (lastKeyValueSize == -1 && blockBuffer.position() == 0) { - return HConstants.INDEX_KEY_MAGIC; - } - return 1; - } - - // The size of this key/value tuple, including key/value length fields. - lastKeyValueSize = klen + vlen + memstoreTSLen + KEY_VALUE_LEN_SIZE; - // include tag length also if tags included with KV - if (reader.hfileContext.isIncludesTags()) { - lastKeyValueSize += tlen + Bytes.SIZEOF_SHORT; - } - blockBuffer.position(blockBuffer.position() + lastKeyValueSize); - } while (blockBuffer.remaining() > 0); - - // Seek to the last key we successfully read. This will happen if this is - // the last key/value pair in the file, in which case the following call - // to next() has to return false. - blockBuffer.position(blockBuffer.position() - lastKeyValueSize); - readKeyValueLen(); - return 1; // didn't exactly find it. - } - } - - /** - * ScannerV3 that operates on encoded data blocks. - */ - protected static class EncodedScannerV3 extends EncodedScannerV2 { - public EncodedScannerV3(HFileReaderV3 reader, boolean cacheBlocks, boolean pread, - boolean isCompaction, HFileContext context) { - super(reader, cacheBlocks, pread, isCompaction, context); - } - } - - @Override - public int getMajorVersion() { - return 3; - } -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java index 1ad91e3d02d..2b6e011e072 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java @@ -30,7 +30,7 @@ import org.apache.hadoop.hbase.Cell; * *

      A scanner doesn't always have a key/value that it is pointing to * when it is first created and before - * {@link #seekTo()}/{@link #seekTo(byte[])} are called. + * {@link #seekTo()}/{@link #seekTo(Cell)} are called. * In this case, {@link #getKey()}/{@link #getValue()} returns null. At most * other times, a key and value will be available. The general pattern is that * you position the Scanner using the seekTo variants and then getKey and @@ -39,69 +39,57 @@ import org.apache.hadoop.hbase.Cell; @InterfaceAudience.Private public interface HFileScanner { /** - * SeekTo or just before the passed key. Examine the return - * code to figure whether we found the key or not. - * Consider the key stream of all the keys in the file, - * k[0] .. k[n], where there are n keys in the file. - * @param key Key to find. - * @return -1, if key < k[0], no position; - * 0, such that k[i] = key and scanner is left in position i; and - * 1, such that k[i] < key, and scanner is left in position i. - * The scanner will position itself between k[i] and k[i+1] where - * k[i] < key <= k[i+1]. - * If there is no key k[i+1] greater than or equal to the input key, then the + * SeekTo or just before the passed cell. Examine the return + * code to figure whether we found the cell or not. + * Consider the cell stream of all the cells in the file, + * c[0] .. c[n], where there are n cells in the file. + * @param cell + * @return -1, if cell < c[0], no position; + * 0, such that c[i] = cell and scanner is left in position i; and + * 1, such that c[i] < cell, and scanner is left in position i. + * The scanner will position itself between c[i] and c[i+1] where + * c[i] < cell <= c[i+1]. + * If there is no cell c[i+1] greater than or equal to the input cell, then the * scanner will position itself at the end of the file and next() will return * false when it is called. * @throws IOException */ - @Deprecated - int seekTo(byte[] key) throws IOException; - @Deprecated - int seekTo(byte[] key, int offset, int length) throws IOException; + int seekTo(Cell cell) throws IOException; - int seekTo(Cell c) throws IOException; /** - * Reseek to or just before the passed key. Similar to seekTo + * Reseek to or just before the passed cell. Similar to seekTo * except that this can be called even if the scanner is not at the beginning * of a file. - * This can be used to seek only to keys which come after the current position + * This can be used to seek only to cells which come after the current position * of the scanner. - * Consider the key stream of all the keys in the file, - * k[0] .. k[n], where there are n keys in the file after + * Consider the cell stream of all the cells in the file, + * c[0] .. c[n], where there are n cellc in the file after * current position of HFileScanner. - * The scanner will position itself between k[i] and k[i+1] where - * k[i] < key <= k[i+1]. - * If there is no key k[i+1] greater than or equal to the input key, then the + * The scanner will position itself between c[i] and c[i+1] where + * c[i] < cell <= c[i+1]. + * If there is no cell c[i+1] greater than or equal to the input cell, then the * scanner will position itself at the end of the file and next() will return * false when it is called. - * @param key Key to find (should be non-null) - * @return -1, if key < k[0], no position; - * 0, such that k[i] = key and scanner is left in position i; and - * 1, such that k[i] < key, and scanner is left in position i. + * @param cell Cell to find (should be non-null) + * @return -1, if cell < c[0], no position; + * 0, such that c[i] = cell and scanner is left in position i; and + * 1, such that c[i] < cell, and scanner is left in position i. * @throws IOException */ - @Deprecated - int reseekTo(byte[] key) throws IOException; - @Deprecated - int reseekTo(byte[] key, int offset, int length) throws IOException; + int reseekTo(Cell cell) throws IOException; - int reseekTo(Cell c) throws IOException; /** - * Consider the key stream of all the keys in the file, - * k[0] .. k[n], where there are n keys in the file. - * @param key Key to find - * @return false if key <= k[0] or true with scanner in position 'i' such - * that: k[i] < key. Furthermore: there may be a k[i+1], such that - * k[i] < key <= k[i+1] but there may also NOT be a k[i+1], and next() will + * Consider the cell stream of all the cells in the file, + * c[0] .. c[n], where there are n cells in the file. + * @param cell Cell to find + * @return false if cell <= c[0] or true with scanner in position 'i' such + * that: c[i] < cell. Furthermore: there may be a c[i+1], such that + * c[i] < cell <= c[i+1] but there may also NOT be a c[i+1], and next() will * return false (EOF). * @throws IOException */ - @Deprecated - boolean seekBefore(byte[] key) throws IOException; - @Deprecated - boolean seekBefore(byte[] key, int offset, int length) throws IOException; + boolean seekBefore(Cell cell) throws IOException; - boolean seekBefore(Cell kv) throws IOException; /** * Positions this scanner at the start of the file. * @return False if empty file; i.e. a call to next would return false and @@ -117,14 +105,14 @@ public interface HFileScanner { boolean next() throws IOException; /** * Gets a buffer view to the current key. You must call - * {@link #seekTo(byte[])} before this method. + * {@link #seekTo(Cell)} before this method. * @return byte buffer for the key. The limit is set to the key size, and the * position is 0, the start of the buffer view. */ ByteBuffer getKey(); /** * Gets a buffer view to the current value. You must call - * {@link #seekTo(byte[])} before this method. + * {@link #seekTo(Cell)} before this method. * * @return byte buffer for the value. The limit is set to the value size, and * the position is 0, the start of the buffer view. @@ -136,13 +124,13 @@ public interface HFileScanner { Cell getKeyValue(); /** * Convenience method to get a copy of the key as a string - interpreting the - * bytes as UTF8. You must call {@link #seekTo(byte[])} before this method. + * bytes as UTF8. You must call {@link #seekTo(Cell)} before this method. * @return key as a string */ String getKeyString(); /** * Convenience method to get a copy of the value as a string - interpreting - * the bytes as UTF8. You must call {@link #seekTo(byte[])} before this method. + * the bytes as UTF8. You must call {@link #seekTo(Cell)} before this method. * @return value as a string */ String getValueString(); @@ -152,8 +140,13 @@ public interface HFileScanner { HFile.Reader getReader(); /** * @return True is scanner has had one of the seek calls invoked; i.e. - * {@link #seekBefore(byte[])} or {@link #seekTo()} or {@link #seekTo(byte[])}. + * {@link #seekBefore(Cell)} or {@link #seekTo()} or {@link #seekTo(Cell)}. * Otherwise returns false. */ boolean isSeeked(); + + /** + * @return the next key in the index (the key to seek to the next block) + */ + Cell getNextIndexedKey(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterFactory.java new file mode 100644 index 00000000000..047022d9043 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterFactory.java @@ -0,0 +1,40 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitationsME + * under the License. + */ +package org.apache.hadoop.hbase.io.hfile; + +import java.io.IOException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.KeyValue.KVComparator; + +public class HFileWriterFactory extends HFile.WriterFactory { + HFileWriterFactory(Configuration conf, CacheConfig cacheConf) { + super(conf, cacheConf); + } + + @Override + public HFile.Writer createWriter(FileSystem fs, Path path, FSDataOutputStream ostream, + KVComparator comparator, HFileContext context) + throws IOException { + return new HFileWriterImpl(conf, cacheConf, path, ostream, comparator, context); + } +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java similarity index 55% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java rename to hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java index 28c4655271b..05553633a53 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,6 +21,7 @@ package org.apache.hadoop.hbase.io.hfile; import java.io.DataOutput; import java.io.DataOutputStream; import java.io.IOException; +import java.net.InetSocketAddress; import java.util.ArrayList; import java.util.List; @@ -31,40 +31,105 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.io.hfile.HFile.Writer; +import org.apache.hadoop.hbase.io.compress.Compression; +import org.apache.hadoop.hbase.io.crypto.Encryption; +import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; +import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo; import org.apache.hadoop.hbase.io.hfile.HFileBlock.BlockWritable; +import org.apache.hadoop.hbase.security.EncryptionUtil; +import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.BloomFilterWriter; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.io.Writable; /** - * Writes HFile format version 2. + * Common functionality needed by all versions of {@link HFile} writers. */ @InterfaceAudience.Private -public class HFileWriterV2 extends AbstractHFileWriter { - static final Log LOG = LogFactory.getLog(HFileWriterV2.class); +public class HFileWriterImpl implements HFile.Writer { + private static final Log LOG = LogFactory.getLog(HFileWriterImpl.class); - /** Max memstore (mvcc) timestamp in FileInfo */ - public static final byte [] MAX_MEMSTORE_TS_KEY = - Bytes.toBytes("MAX_MEMSTORE_TS_KEY"); + /** The Cell previously appended. Becomes the last cell in the file.*/ + protected Cell lastCell = null; + + /** FileSystem stream to write into. */ + protected FSDataOutputStream outputStream; + + /** True if we opened the outputStream (and so will close it). */ + protected final boolean closeOutputStream; + + /** A "file info" block: a key-value map of file-wide metadata. */ + protected FileInfo fileInfo = new HFile.FileInfo(); + + /** Total # of key/value entries, i.e. how many times add() was called. */ + protected long entryCount = 0; + + /** Used for calculating the average key length. */ + protected long totalKeyLength = 0; + + /** Used for calculating the average value length. */ + protected long totalValueLength = 0; + + /** Total uncompressed bytes, maybe calculate a compression ratio later. */ + protected long totalUncompressedBytes = 0; + + /** Key comparator. Used to ensure we write in order. */ + protected final KVComparator comparator; + + /** Meta block names. */ + protected List metaNames = new ArrayList(); + + /** {@link Writable}s representing meta block data. */ + protected List metaData = new ArrayList(); + + /** + * First cell in a block. + * This reference should be short-lived since we write hfiles in a burst. + */ + protected Cell firstCellInBlock = null; + + + /** May be null if we were passed a stream. */ + protected final Path path; + + /** Cache configuration for caching data on write. */ + protected final CacheConfig cacheConf; + + /** + * Name for this object used when logging or in toString. Is either + * the result of a toString on stream or else name of passed file Path. + */ + protected final String name; + + /** + * The data block encoding which will be used. + * {@link NoOpDataBlockEncoder#INSTANCE} if there is no encoding. + */ + protected final HFileDataBlockEncoder blockEncoder; + + protected final HFileContext hFileContext; + + private int maxTagsLength = 0; /** KeyValue version in FileInfo */ - public static final byte [] KEY_VALUE_VERSION = - Bytes.toBytes("KEY_VALUE_VERSION"); + public static final byte [] KEY_VALUE_VERSION = Bytes.toBytes("KEY_VALUE_VERSION"); /** Version for KeyValue which includes memstore timestamp */ public static final int KEY_VALUE_VER_WITH_MEMSTORE = 1; /** Inline block writers for multi-level block index and compound Blooms. */ - private List inlineBlockWriters = - new ArrayList(); + private List inlineBlockWriters = new ArrayList(); - /** Unified version 2 block writer */ + /** block writer */ protected HFileBlock.Writer fsBlockWriter; private HFileBlockIndex.BlockIndexWriter dataBlockIndexWriter; @@ -83,40 +148,135 @@ public class HFileWriterV2 extends AbstractHFileWriter { private Cell lastCellOfPreviousBlock = null; /** Additional data items to be written to the "load-on-open" section. */ - private List additionalLoadOnOpenData = - new ArrayList(); + private List additionalLoadOnOpenData = new ArrayList(); protected long maxMemstoreTS = 0; - static class WriterFactoryV2 extends HFile.WriterFactory { - WriterFactoryV2(Configuration conf, CacheConfig cacheConf) { - super(conf, cacheConf); + public HFileWriterImpl(final Configuration conf, CacheConfig cacheConf, Path path, + FSDataOutputStream outputStream, + KVComparator comparator, HFileContext fileContext) { + this.outputStream = outputStream; + this.path = path; + this.name = path != null ? path.getName() : outputStream.toString(); + this.hFileContext = fileContext; + DataBlockEncoding encoding = hFileContext.getDataBlockEncoding(); + if (encoding != DataBlockEncoding.NONE) { + this.blockEncoder = new HFileDataBlockEncoderImpl(encoding); + } else { + this.blockEncoder = NoOpDataBlockEncoder.INSTANCE; } + this.comparator = comparator != null ? comparator + : KeyValue.COMPARATOR; - @Override - public Writer createWriter(FileSystem fs, Path path, - FSDataOutputStream ostream, - KVComparator comparator, HFileContext context) throws IOException { - context.setIncludesTags(false);// HFile V2 does not deal with tags at all! - return new HFileWriterV2(conf, cacheConf, fs, path, ostream, - comparator, context); + closeOutputStream = path != null; + this.cacheConf = cacheConf; + finishInit(conf); + if (LOG.isTraceEnabled()) { + LOG.trace("Writer" + (path != null ? " for " + path : "") + + " initialized with cacheConf: " + cacheConf + + " comparator: " + comparator.getClass().getSimpleName() + + " fileContext: " + fileContext); + } + } + + /** + * Add to the file info. All added key/value pairs can be obtained using + * {@link HFile.Reader#loadFileInfo()}. + * + * @param k Key + * @param v Value + * @throws IOException in case the key or the value are invalid + */ + @Override + public void appendFileInfo(final byte[] k, final byte[] v) + throws IOException { + fileInfo.append(k, v, true); + } + + /** + * Sets the file info offset in the trailer, finishes up populating fields in + * the file info, and writes the file info into the given data output. The + * reason the data output is not always {@link #outputStream} is that we store + * file info as a block in version 2. + * + * @param trailer fixed file trailer + * @param out the data output to write the file info to + * @throws IOException + */ + protected final void writeFileInfo(FixedFileTrailer trailer, DataOutputStream out) + throws IOException { + trailer.setFileInfoOffset(outputStream.getPos()); + finishFileInfo(); + fileInfo.write(out); + } + + /** + * Checks that the given Cell's key does not violate the key order. + * + * @param cell Cell whose key to check. + * @return true if the key is duplicate + * @throws IOException if the key or the key order is wrong + */ + protected boolean checkKey(final Cell cell) throws IOException { + boolean isDuplicateKey = false; + + if (cell == null) { + throw new IOException("Key cannot be null or empty"); + } + if (lastCell != null) { + int keyComp = comparator.compareOnlyKeyPortion(lastCell, cell); + + if (keyComp > 0) { + throw new IOException("Added a key not lexically larger than" + + " previous. Current cell = " + cell + ", lastCell = " + lastCell); + } else if (keyComp == 0) { + isDuplicateKey = true; } } + return isDuplicateKey; + } - /** Constructor that takes a path, creates and closes the output stream. */ - public HFileWriterV2(Configuration conf, CacheConfig cacheConf, - FileSystem fs, Path path, FSDataOutputStream ostream, - final KVComparator comparator, final HFileContext context) throws IOException { - super(cacheConf, - ostream == null ? createOutputStream(conf, fs, path, null) : ostream, - path, comparator, context); - finishInit(conf); + /** Checks the given value for validity. */ + protected void checkValue(final byte[] value, final int offset, + final int length) throws IOException { + if (value == null) { + throw new IOException("Value cannot be null"); + } + } + + /** + * @return Path or null if we were passed a stream rather than a Path. + */ + @Override + public Path getPath() { + return path; + } + + @Override + public String toString() { + return "writer=" + (path != null ? path.toString() : null) + ", name=" + + name + ", compression=" + hFileContext.getCompression().getName(); + } + + public static Compression.Algorithm compressionByName(String algoName) { + if (algoName == null) + return HFile.DEFAULT_COMPRESSION_ALGORITHM; + return Compression.getCompressionAlgorithmByName(algoName); + } + + /** A helper method to create HFile output streams in constructors */ + protected static FSDataOutputStream createOutputStream(Configuration conf, + FileSystem fs, Path path, InetSocketAddress[] favoredNodes) throws IOException { + FsPermission perms = FSUtils.getFilePermissions(fs, conf, + HConstants.DATA_FILE_UMASK_KEY); + return FSUtils.create(fs, path, perms, favoredNodes); } /** Additional initialization steps */ protected void finishInit(final Configuration conf) { - if (fsBlockWriter != null) + if (fsBlockWriter != null) { throw new IllegalStateException("finishInit called twice"); + } fsBlockWriter = new HFileBlock.Writer(blockEncoder, hFileContext); @@ -140,9 +300,7 @@ public class HFileWriterV2 extends AbstractHFileWriter { * @throws IOException */ protected void checkBlockBoundary() throws IOException { - if (fsBlockWriter.blockSizeWritten() < hFileContext.getBlocksize()) - return; - + if (fsBlockWriter.blockSizeWritten() < hFileContext.getBlocksize()) return; finishBlock(); writeInlineBlocks(false); newBlock(); @@ -150,8 +308,7 @@ public class HFileWriterV2 extends AbstractHFileWriter { /** Clean up the current data block */ private void finishBlock() throws IOException { - if (!fsBlockWriter.isWriting() || fsBlockWriter.blockSizeWritten() == 0) - return; + if (!fsBlockWriter.isWriting() || fsBlockWriter.blockSizeWritten() == 0) return; // Update the first data block offset for scanning. if (firstDataBlockOffset == -1) { @@ -161,7 +318,6 @@ public class HFileWriterV2 extends AbstractHFileWriter { lastDataBlockOffset = outputStream.getPos(); fsBlockWriter.writeHeaderAndData(outputStream); int onDiskSize = fsBlockWriter.getOnDiskSizeWithHeader(); - Cell indexEntry = CellComparator.getMidpoint(this.comparator, lastCellOfPreviousBlock, firstCellInBlock); dataBlockIndexWriter.addEntry(CellUtil.getCellKeySerializedAsKeyValueKey(indexEntry), @@ -199,8 +355,7 @@ public class HFileWriterV2 extends AbstractHFileWriter { */ private void doCacheOnWrite(long offset) { HFileBlock cacheFormatBlock = fsBlockWriter.getBlockForCaching(cacheConf); - cacheConf.getBlockCache().cacheBlock( - new BlockCacheKey(name, offset), cacheFormatBlock); + cacheConf.getBlockCache().cacheBlock(new BlockCacheKey(name, offset), cacheFormatBlock); } /** @@ -244,48 +399,6 @@ public class HFileWriterV2 extends AbstractHFileWriter { metaData.add(i, content); } - /** - * Add key/value to file. Keys must be added in an order that agrees with the - * Comparator passed on construction. - * - * @param cell Cell to add. Cannot be empty nor null. - * @throws IOException - */ - @Override - public void append(final Cell cell) throws IOException { - byte[] value = cell.getValueArray(); - int voffset = cell.getValueOffset(); - int vlength = cell.getValueLength(); - // checkKey uses comparator to check we are writing in order. - boolean dupKey = checkKey(cell); - checkValue(value, voffset, vlength); - if (!dupKey) { - checkBlockBoundary(); - } - - if (!fsBlockWriter.isWriting()) { - newBlock(); - } - - fsBlockWriter.write(cell); - - totalKeyLength += CellUtil.estimatedSerializedSizeOfKey(cell); - totalValueLength += vlength; - - // Are we the first key in this block? - if (firstCellInBlock == null) { - // If cell is big, block will be closed and this firstCellInBlock reference will only last - // a short while. - firstCellInBlock = cell; - } - - // TODO: What if cell is 10MB and we write infrequently? We'll hold on to the cell here - // indefinetly? - lastCell = cell; - entryCount++; - this.maxMemstoreTS = Math.max(this.maxMemstoreTS, cell.getSequenceId()); - } - @Override public void close() throws IOException { if (outputStream == null) { @@ -409,16 +522,120 @@ public class HFileWriterV2 extends AbstractHFileWriter { }); } - protected int getMajorVersion() { - return 2; - } - - protected int getMinorVersion() { - return HFileReaderV2.MAX_MINOR_VERSION; - } - @Override public HFileContext getFileContext() { return hFileContext; } -} + + /** + * Add key/value to file. Keys must be added in an order that agrees with the + * Comparator passed on construction. + * + * @param cell + * Cell to add. Cannot be empty nor null. + * @throws IOException + */ + @Override + public void append(final Cell cell) throws IOException { + byte[] value = cell.getValueArray(); + int voffset = cell.getValueOffset(); + int vlength = cell.getValueLength(); + // checkKey uses comparator to check we are writing in order. + boolean dupKey = checkKey(cell); + checkValue(value, voffset, vlength); + if (!dupKey) { + checkBlockBoundary(); + } + + if (!fsBlockWriter.isWriting()) { + newBlock(); + } + + fsBlockWriter.write(cell); + + totalKeyLength += CellUtil.estimatedSerializedSizeOfKey(cell); + totalValueLength += vlength; + + // Are we the first key in this block? + if (firstCellInBlock == null) { + // If cell is big, block will be closed and this firstCellInBlock reference will only last + // a short while. + firstCellInBlock = cell; + } + + // TODO: What if cell is 10MB and we write infrequently? We hold on to cell here indefinetly? + lastCell = cell; + entryCount++; + this.maxMemstoreTS = Math.max(this.maxMemstoreTS, cell.getSequenceId()); + int tagsLength = cell.getTagsLength(); + if (tagsLength > this.maxTagsLength) { + this.maxTagsLength = tagsLength; + } + } + + protected void finishFileInfo() throws IOException { + if (lastCell != null) { + // Make a copy. The copy is stuffed into our fileinfo map. Needs a clean + // byte buffer. Won't take a tuple. + byte [] lastKey = CellUtil.getCellKeySerializedAsKeyValueKey(this.lastCell); + fileInfo.append(FileInfo.LASTKEY, lastKey, false); + } + + // Average key length. + int avgKeyLen = + entryCount == 0 ? 0 : (int) (totalKeyLength / entryCount); + fileInfo.append(FileInfo.AVG_KEY_LEN, Bytes.toBytes(avgKeyLen), false); + fileInfo.append(FileInfo.CREATE_TIME_TS, Bytes.toBytes(hFileContext.getFileCreateTime()), + false); + + // Average value length. + int avgValueLen = + entryCount == 0 ? 0 : (int) (totalValueLength / entryCount); + fileInfo.append(FileInfo.AVG_VALUE_LEN, Bytes.toBytes(avgValueLen), false); + if (hFileContext.getDataBlockEncoding() == DataBlockEncoding.PREFIX_TREE) { + // In case of Prefix Tree encoding, we always write tags information into HFiles even if all + // KVs are having no tags. + fileInfo.append(FileInfo.MAX_TAGS_LEN, Bytes.toBytes(this.maxTagsLength), false); + } else if (hFileContext.isIncludesTags()) { + // When tags are not being written in this file, MAX_TAGS_LEN is excluded + // from the FileInfo + fileInfo.append(FileInfo.MAX_TAGS_LEN, Bytes.toBytes(this.maxTagsLength), false); + boolean tagsCompressed = (hFileContext.getDataBlockEncoding() != DataBlockEncoding.NONE) + && hFileContext.isCompressTags(); + fileInfo.append(FileInfo.TAGS_COMPRESSED, Bytes.toBytes(tagsCompressed), false); + } + } + + protected int getMajorVersion() { + return 3; + } + + protected int getMinorVersion() { + return HFileReaderImpl.MAX_MINOR_VERSION; + } + + protected void finishClose(FixedFileTrailer trailer) throws IOException { + // Write out encryption metadata before finalizing if we have a valid crypto context + Encryption.Context cryptoContext = hFileContext.getEncryptionContext(); + if (cryptoContext != Encryption.Context.NONE) { + // Wrap the context's key and write it as the encryption metadata, the wrapper includes + // all information needed for decryption + trailer.setEncryptionKey(EncryptionUtil.wrapKey(cryptoContext.getConf(), + cryptoContext.getConf().get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, + User.getCurrent().getShortName()), + cryptoContext.getKey())); + } + // Now we can finish the close + trailer.setMetaIndexCount(metaNames.size()); + trailer.setTotalUncompressedBytes(totalUncompressedBytes+ trailer.getTrailerSize()); + trailer.setEntryCount(entryCount); + trailer.setCompressionCodec(hFileContext.getCompression()); + + trailer.serialize(outputStream); + + if (closeOutputStream) { + outputStream.close(); + outputStream = null; + } + } +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV3.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV3.java deleted file mode 100644 index 086395ca6be..00000000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV3.java +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.io.hfile; - -import java.io.IOException; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FSDataOutputStream; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.KeyValue.KVComparator; -import org.apache.hadoop.hbase.io.crypto.Encryption; -import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; -import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo; -import org.apache.hadoop.hbase.io.hfile.HFile.Writer; -import org.apache.hadoop.hbase.security.EncryptionUtil; -import org.apache.hadoop.hbase.security.User; -import org.apache.hadoop.hbase.util.Bytes; - -/** - * {@link HFile} writer for version 3. - */ -@InterfaceAudience.Private -public class HFileWriterV3 extends HFileWriterV2 { - - private static final Log LOG = LogFactory.getLog(HFileWriterV3.class); - - private int maxTagsLength = 0; - - static class WriterFactoryV3 extends HFile.WriterFactory { - WriterFactoryV3(Configuration conf, CacheConfig cacheConf) { - super(conf, cacheConf); - } - - @Override - public Writer createWriter(FileSystem fs, Path path, FSDataOutputStream ostream, - final KVComparator comparator, HFileContext fileContext) - throws IOException { - return new HFileWriterV3(conf, cacheConf, fs, path, ostream, comparator, fileContext); - } - } - - /** Constructor that takes a path, creates and closes the output stream. */ - public HFileWriterV3(Configuration conf, CacheConfig cacheConf, FileSystem fs, Path path, - FSDataOutputStream ostream, final KVComparator comparator, - final HFileContext fileContext) throws IOException { - super(conf, cacheConf, fs, path, ostream, comparator, fileContext); - if (LOG.isTraceEnabled()) { - LOG.trace("Writer" + (path != null ? " for " + path : "") + - " initialized with cacheConf: " + cacheConf + - " comparator: " + comparator.getClass().getSimpleName() + - " fileContext: " + fileContext); - } - } - - /** - * Add key/value to file. Keys must be added in an order that agrees with the - * Comparator passed on construction. - * - * @param cell - * Cell to add. Cannot be empty nor null. - * @throws IOException - */ - @Override - public void append(final Cell cell) throws IOException { - // Currently get the complete arrays - super.append(cell); - int tagsLength = cell.getTagsLength(); - if (tagsLength > this.maxTagsLength) { - this.maxTagsLength = tagsLength; - } - } - - protected void finishFileInfo() throws IOException { - super.finishFileInfo(); - if (hFileContext.getDataBlockEncoding() == DataBlockEncoding.PREFIX_TREE) { - // In case of Prefix Tree encoding, we always write tags information into HFiles even if all - // KVs are having no tags. - fileInfo.append(FileInfo.MAX_TAGS_LEN, Bytes.toBytes(this.maxTagsLength), false); - } else if (hFileContext.isIncludesTags()) { - // When tags are not being written in this file, MAX_TAGS_LEN is excluded - // from the FileInfo - fileInfo.append(FileInfo.MAX_TAGS_LEN, Bytes.toBytes(this.maxTagsLength), false); - boolean tagsCompressed = (hFileContext.getDataBlockEncoding() != DataBlockEncoding.NONE) - && hFileContext.isCompressTags(); - fileInfo.append(FileInfo.TAGS_COMPRESSED, Bytes.toBytes(tagsCompressed), false); - } - } - - @Override - protected int getMajorVersion() { - return 3; - } - - @Override - protected int getMinorVersion() { - return HFileReaderV3.MAX_MINOR_VERSION; - } - - @Override - protected void finishClose(FixedFileTrailer trailer) throws IOException { - // Write out encryption metadata before finalizing if we have a valid crypto context - Encryption.Context cryptoContext = hFileContext.getEncryptionContext(); - if (cryptoContext != Encryption.Context.NONE) { - // Wrap the context's key and write it as the encryption metadata, the wrapper includes - // all information needed for decryption - trailer.setEncryptionKey(EncryptionUtil.wrapKey(cryptoContext.getConf(), - cryptoContext.getConf().get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, - User.getCurrent().getShortName()), - cryptoContext.getKey())); - } - // Now we can finish the close - super.finishClose(trailer); - } - -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InclusiveCombinedBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InclusiveCombinedBlockCache.java new file mode 100644 index 00000000000..667e7b4c14b --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/InclusiveCombinedBlockCache.java @@ -0,0 +1,58 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hadoop.hbase.io.hfile; + +import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceAudience; + +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) +public class InclusiveCombinedBlockCache extends CombinedBlockCache implements BlockCache { + public InclusiveCombinedBlockCache(LruBlockCache l1, BlockCache l2) { + super(l1,l2); + } + + @Override + public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, + boolean repeat, boolean updateCacheMetrics) { + // On all external cache set ups the lru should have the l2 cache set as the victimHandler + // Because of that all requests that miss inside of the lru block cache will be + // tried in the l2 block cache. + return lruCache.getBlock(cacheKey, caching, repeat, updateCacheMetrics); + } + + /** + * + * @param cacheKey The block's cache key. + * @param buf The block contents wrapped in a ByteBuffer. + * @param inMemory Whether block should be treated as in-memory. This parameter is only useful for + * the L1 lru cache. + * @param cacheDataInL1 This is totally ignored. + */ + @Override + public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory, + final boolean cacheDataInL1) { + // This is the inclusive part of the combined block cache. + // Every block is placed into both block caches. + lruCache.cacheBlock(cacheKey, buf, inMemory, true); + + // This assumes that insertion into the L2 block cache is either async or very fast. + l2Cache.cacheBlock(cacheKey, buf, inMemory, true); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index 82df5f77af1..bf46bcf28f6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -197,8 +197,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize { private boolean forceInMemory; /** Where to send victims (blocks evicted/missing from the cache) */ - // TODO: Fix it so this is not explicit reference to a particular BlockCache implementation. - private BucketCache victimHandler = null; + private BlockCache victimHandler = null; /** * Default constructor. Specify maximum size and expected average block @@ -419,8 +418,17 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize { LruCachedBlock cb = map.get(cacheKey); if (cb == null) { if (!repeat && updateCacheMetrics) stats.miss(caching); - if (victimHandler != null) { - return victimHandler.getBlock(cacheKey, caching, repeat, updateCacheMetrics); + // If there is another block cache then try and read there. + // However if this is a retry ( second time in double checked locking ) + // And it's already a miss then the l2 will also be a miss. + if (victimHandler != null && !repeat) { + Cacheable result = victimHandler.getBlock(cacheKey, caching, repeat, updateCacheMetrics); + + // Promote this to L1. + if (result != null && caching) { + cacheBlock(cacheKey, result, /* inMemory = */ false, /* cacheData = */ true); + } + return result; } return null; } @@ -489,10 +497,14 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize { } stats.evicted(block.getCachedTime()); if (evictedByEvictionProcess && victimHandler != null) { - boolean wait = getCurrentSize() < acceptableSize(); - boolean inMemory = block.getPriority() == BlockPriority.MEMORY; - victimHandler.cacheBlockWithWait(block.getCacheKey(), block.getBuffer(), - inMemory, wait); + if (victimHandler instanceof BucketCache) { + boolean wait = getCurrentSize() < acceptableSize(); + boolean inMemory = block.getPriority() == BlockPriority.MEMORY; + ((BucketCache)victimHandler).cacheBlockWithWait(block.getCacheKey(), block.getBuffer(), + inMemory, wait); + } else { + victimHandler.cacheBlock(block.getCacheKey(), block.getBuffer()); + } } return block.heapSize(); } @@ -787,7 +799,10 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize { synchronized(this) { try { this.wait(1000 * 10/*Don't wait for ever*/); - } catch(InterruptedException e) {} + } catch(InterruptedException e) { + LOG.warn("Interrupted eviction thread ", e); + Thread.currentThread().interrupt(); + } } LruBlockCache cache = this.cache.get(); if (cache == null) break; @@ -1057,7 +1072,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize { return counts; } - public void setVictimCache(BucketCache handler) { + public void setVictimCache(BlockCache handler) { assert victimHandler == null; victimHandler = handler; } @@ -1067,7 +1082,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize { return map; } - BucketCache getVictimHandler() { + BlockCache getVictimHandler() { return this.victimHandler; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java new file mode 100644 index 00000000000..57e7f2827ae --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java @@ -0,0 +1,272 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hadoop.hbase.io.hfile; + +import net.spy.memcached.CachedData; +import net.spy.memcached.ConnectionFactoryBuilder; +import net.spy.memcached.FailureMode; +import net.spy.memcached.MemcachedClient; +import net.spy.memcached.transcoders.Transcoder; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.util.Addressing; +import org.apache.htrace.Trace; +import org.apache.htrace.TraceScope; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.NoSuchElementException; +import java.util.concurrent.ExecutionException; + +/** + * Class to store blocks into memcached. + * This should only be used on a cluster of Memcached daemons that are tuned well and have a + * good network connection to the HBase regionservers. Any other use will likely slow down HBase + * greatly. + */ +@InterfaceAudience.Private +public class MemcachedBlockCache implements BlockCache { + private static final Log LOG = LogFactory.getLog(MemcachedBlockCache.class.getName()); + + // Some memcache versions won't take more than 1024 * 1024. So set the limit below + // that just in case this client is used with those versions. + public static final int MAX_SIZE = 1020 * 1024; + + // Config key for what memcached servers to use. + // They should be specified in a comma sperated list with ports. + // like: + // + // host1:11211,host3:8080,host4:11211 + public static final String MEMCACHED_CONFIG_KEY = "hbase.cache.memcached.servers"; + public static final String MEMCACHED_TIMEOUT_KEY = "hbase.cache.memcached.timeout"; + public static final String MEMCACHED_OPTIMEOUT_KEY = "hbase.cache.memcached.optimeout"; + public static final long MEMCACHED_DEFAULT_TIMEOUT = 500; + + private final MemcachedClient client; + private final HFileBlockTranscoder tc = new HFileBlockTranscoder(); + private final CacheStats cacheStats = new CacheStats("MemcachedBlockCache"); + + public MemcachedBlockCache(Configuration c) throws IOException { + LOG.info("Creating MemcachedBlockCache"); + + long opTimeout = c.getLong(MEMCACHED_OPTIMEOUT_KEY, MEMCACHED_DEFAULT_TIMEOUT); + long queueTimeout = c.getLong(MEMCACHED_TIMEOUT_KEY, opTimeout + MEMCACHED_DEFAULT_TIMEOUT); + + ConnectionFactoryBuilder builder = new ConnectionFactoryBuilder() + .setOpTimeout(opTimeout) + .setOpQueueMaxBlockTime(queueTimeout) // Cap the max time before anything times out + .setFailureMode(FailureMode.Redistribute) + .setShouldOptimize(true) // When regions move lots of reads happen together + // So combining them into single requests is nice. + .setDaemon(true) // Don't keep threads around past the end of days. + .setUseNagleAlgorithm(false) // Ain't nobody got time for that + .setReadBufferSize(HConstants.DEFAULT_BLOCKSIZE * 4 * 1024); // 4 times larger than the + // default block just in case + + + // Assume only the localhost is serving memecached. + // A la mcrouter or co-locating memcached with split regionservers. + // + // If this config is a pool of memecached servers they will all be used according to the + // default hashing scheme defined by the memcache client. Spy Memecache client in this + // case. + String serverListString = c.get(MEMCACHED_CONFIG_KEY,"localhost:11211"); + String[] servers = serverListString.split(","); + List serverAddresses = new ArrayList(servers.length); + for (String s:servers) { + serverAddresses.add(Addressing.createInetSocketAddressFromHostAndPortStr(s)); + } + + client = new MemcachedClient(builder.build(), serverAddresses); + } + + @Override + public void cacheBlock(BlockCacheKey cacheKey, + Cacheable buf, + boolean inMemory, + boolean cacheDataInL1) { + cacheBlock(cacheKey, buf); + } + + @Override + public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf) { + if (buf instanceof HFileBlock) { + client.add(cacheKey.toString(), MAX_SIZE, (HFileBlock) buf, tc); + } else { + if (LOG.isDebugEnabled()) { + LOG.debug("MemcachedBlockCache can not cache Cacheable's of type " + + buf.getClass().toString()); + } + } + } + + @Override + public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, + boolean repeat, boolean updateCacheMetrics) { + // Assume that nothing is the block cache + HFileBlock result = null; + + try (TraceScope traceScope = Trace.startSpan("MemcachedBlockCache.getBlock")) { + result = client.get(cacheKey.toString(), tc); + } catch (Exception e) { + // Catch a pretty broad set of exceptions to limit any changes in the memecache client + // and how it handles failures from leaking into the read path. + if (LOG.isDebugEnabled()) { + LOG.debug("Exception pulling from memcached [ " + + cacheKey.toString() + + " ]. Treating as a miss.", e); + } + result = null; + } finally { + // Update stats if this request doesn't have it turned off 100% of the time + if (updateCacheMetrics) { + if (result == null) { + cacheStats.miss(caching); + } else { + cacheStats.hit(caching); + } + } + } + + + return result; + } + + @Override + public boolean evictBlock(BlockCacheKey cacheKey) { + try { + cacheStats.evict(); + return client.delete(cacheKey.toString()).get(); + } catch (InterruptedException e) { + LOG.warn("Error deleting " + cacheKey.toString(), e); + Thread.currentThread().interrupt(); + } catch (ExecutionException e) { + if (LOG.isDebugEnabled()) { + LOG.debug("Error deleting " + cacheKey.toString(), e); + } + } + return false; + } + + /** + * This method does nothing so that memcached can handle all evictions. + */ + @Override + public int evictBlocksByHfileName(String hfileName) { + return 0; + } + + @Override + public CacheStats getStats() { + return cacheStats; + } + + @Override + public void shutdown() { + client.shutdown(); + } + + @Override + public long size() { + return 0; + } + + @Override + public long getFreeSize() { + return 0; + } + + @Override + public long getCurrentSize() { + return 0; + } + + @Override + public long getBlockCount() { + return 0; + } + + @Override + public Iterator iterator() { + return new Iterator() { + @Override + public boolean hasNext() { + return false; + } + + @Override + public CachedBlock next() { + throw new NoSuchElementException("MemcachedBlockCache can't iterate over blocks."); + } + + @Override + public void remove() { + + } + }; + } + + @Override + public BlockCache[] getBlockCaches() { + return null; + } + + /** + * Class to encode and decode an HFileBlock to and from memecached's resulting byte arrays. + */ + private static class HFileBlockTranscoder implements Transcoder { + + @Override + public boolean asyncDecode(CachedData d) { + return false; + } + + @Override + public CachedData encode(HFileBlock block) { + ByteBuffer bb = ByteBuffer.allocate(block.getSerializedLength()); + block.serialize(bb); + return new CachedData(0, bb.array(), CachedData.MAX_SIZE); + } + + @Override + public HFileBlock decode(CachedData d) { + try { + ByteBuffer buf = ByteBuffer.wrap(d.getData()); + return (HFileBlock) HFileBlock.blockDeserializer.deserialize(buf, true); + } catch (IOException e) { + LOG.warn("Error deserializing data from memcached",e); + } + return null; + } + + @Override + public int getMaxSize() { + return MAX_SIZE; + } + } + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java index d3b303adf73..6a5c884e69b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java @@ -39,6 +39,7 @@ import java.util.Set; import java.util.concurrent.ArrayBlockingQueue; import java.util.concurrent.BlockingQueue; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; @@ -109,13 +110,14 @@ public class BucketCache implements BlockCache, HeapSize { final static int DEFAULT_WRITER_QUEUE_ITEMS = 64; // Store/read block data - IOEngine ioEngine; + final IOEngine ioEngine; // Store the block in this map before writing it to cache @VisibleForTesting - Map ramCache; + final ConcurrentMap ramCache; // In this map, store the block's meta data like offset, length - private Map backingMap; + @VisibleForTesting + ConcurrentMap backingMap; /** * Flag if the cache is enabled or not... We shut it off if there are IO @@ -132,14 +134,14 @@ public class BucketCache implements BlockCache, HeapSize { * to the BucketCache. It then updates the ramCache and backingMap accordingly. */ @VisibleForTesting - ArrayList> writerQueues = + final ArrayList> writerQueues = new ArrayList>(); @VisibleForTesting - WriterThread writerThreads[]; + final WriterThread[] writerThreads; /** Volatile boolean to track if free space is in process or not */ private volatile boolean freeInProgress = false; - private Lock freeSpaceLock = new ReentrantLock(); + private final Lock freeSpaceLock = new ReentrantLock(); private UniqueIndexMap deserialiserMap = new UniqueIndexMap(); @@ -152,17 +154,16 @@ public class BucketCache implements BlockCache, HeapSize { /** Cache access count (sequential ID) */ private final AtomicLong accessCount = new AtomicLong(0); - private final Object[] cacheWaitSignals; private static final int DEFAULT_CACHE_WAIT_TIME = 50; // Used in test now. If the flag is false and the cache speed is very fast, // bucket cache will skip some blocks when caching. If the flag is true, we // will wait blocks flushed to IOEngine for some time when caching boolean wait_when_cache = false; - private BucketCacheStats cacheStats = new BucketCacheStats(); + private final BucketCacheStats cacheStats = new BucketCacheStats(); - private String persistencePath; - private long cacheCapacity; + private final String persistencePath; + private final long cacheCapacity; /** Approximate block size */ private final long blockSize; @@ -182,7 +183,8 @@ public class BucketCache implements BlockCache, HeapSize { * * TODO:We could extend the IdLock to IdReadWriteLock for better. */ - private IdLock offsetLock = new IdLock(); + @VisibleForTesting + final IdLock offsetLock = new IdLock(); private final ConcurrentIndex blocksByHFile = new ConcurrentIndex(new Comparator() { @@ -216,7 +218,6 @@ public class BucketCache implements BlockCache, HeapSize { throws FileNotFoundException, IOException { this.ioEngine = getIOEngineFromName(ioEngineName, capacity); this.writerThreads = new WriterThread[writerThreadNum]; - this.cacheWaitSignals = new Object[writerThreadNum]; long blockNumCapacity = capacity / blockSize; if (blockNumCapacity >= Integer.MAX_VALUE) { // Enough for about 32TB of cache! @@ -231,7 +232,6 @@ public class BucketCache implements BlockCache, HeapSize { bucketAllocator = new BucketAllocator(capacity, bucketSizes); for (int i = 0; i < writerThreads.length; ++i) { writerQueues.add(new ArrayBlockingQueue(writerQLen)); - this.cacheWaitSignals[i] = new Object(); } assert writerQueues.size() == writerThreads.length; @@ -252,7 +252,7 @@ public class BucketCache implements BlockCache, HeapSize { final String threadName = Thread.currentThread().getName(); this.cacheEnabled = true; for (int i = 0; i < writerThreads.length; ++i) { - writerThreads[i] = new WriterThread(writerQueues.get(i), i); + writerThreads[i] = new WriterThread(writerQueues.get(i)); writerThreads[i].setName(threadName + "-BucketCacheWriter-" + i); writerThreads[i].setDaemon(true); } @@ -344,38 +344,39 @@ public class BucketCache implements BlockCache, HeapSize { * @param inMemory if block is in-memory * @param wait if true, blocking wait when queue is full */ - public void cacheBlockWithWait(BlockCacheKey cacheKey, Cacheable cachedItem, - boolean inMemory, boolean wait) { - if (!cacheEnabled) + public void cacheBlockWithWait(BlockCacheKey cacheKey, Cacheable cachedItem, boolean inMemory, + boolean wait) { + if (!cacheEnabled) { return; + } - if (backingMap.containsKey(cacheKey) || ramCache.containsKey(cacheKey)) + if (backingMap.containsKey(cacheKey)) { return; + } /* - * Stuff the entry into the RAM cache so it can get drained to the - * persistent store + * Stuff the entry into the RAM cache so it can get drained to the persistent store */ - RAMQueueEntry re = new RAMQueueEntry(cacheKey, cachedItem, - accessCount.incrementAndGet(), inMemory); - ramCache.put(cacheKey, re); + RAMQueueEntry re = + new RAMQueueEntry(cacheKey, cachedItem, accessCount.incrementAndGet(), inMemory); + if (ramCache.putIfAbsent(cacheKey, re) != null) { + return; + } int queueNum = (cacheKey.hashCode() & 0x7FFFFFFF) % writerQueues.size(); BlockingQueue bq = writerQueues.get(queueNum); - boolean successfulAddition = bq.offer(re); - if (!successfulAddition && wait) { - synchronized (cacheWaitSignals[queueNum]) { - try { - successfulAddition = bq.offer(re); - if (!successfulAddition) cacheWaitSignals[queueNum].wait(DEFAULT_CACHE_WAIT_TIME); - } catch (InterruptedException ie) { - Thread.currentThread().interrupt(); - } + boolean successfulAddition = false; + if (wait) { + try { + successfulAddition = bq.offer(re, DEFAULT_CACHE_WAIT_TIME, TimeUnit.MILLISECONDS); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); } + } else { successfulAddition = bq.offer(re); } if (!successfulAddition) { - ramCache.remove(cacheKey); - failedBlockAdditions.incrementAndGet(); + ramCache.remove(cacheKey); + failedBlockAdditions.incrementAndGet(); } else { this.blockNumber.incrementAndGet(); this.heapSize.addAndGet(cachedItem.heapSize()); @@ -394,11 +395,14 @@ public class BucketCache implements BlockCache, HeapSize { @Override public Cacheable getBlock(BlockCacheKey key, boolean caching, boolean repeat, boolean updateCacheMetrics) { - if (!cacheEnabled) + if (!cacheEnabled) { return null; + } RAMQueueEntry re = ramCache.get(key); if (re != null) { - if (updateCacheMetrics) cacheStats.hit(caching); + if (updateCacheMetrics) { + cacheStats.hit(caching); + } re.access(accessCount.incrementAndGet()); return re.getData(); } @@ -408,6 +412,9 @@ public class BucketCache implements BlockCache, HeapSize { IdLock.Entry lockEntry = null; try { lockEntry = offsetLock.getLockEntry(bucketEntry.offset()); + // We can not read here even if backingMap does contain the given key because its offset + // maybe changed. If we lock BlockCacheKey instead of offset, then we can only check + // existence here. if (bucketEntry.equals(backingMap.get(key))) { int len = bucketEntry.getLength(); ByteBuffer bb = ByteBuffer.allocate(len); @@ -438,43 +445,58 @@ public class BucketCache implements BlockCache, HeapSize { } } } - if (!repeat && updateCacheMetrics) cacheStats.miss(caching); + if (!repeat && updateCacheMetrics) { + cacheStats.miss(caching); + } return null; } + @VisibleForTesting + void blockEvicted(BlockCacheKey cacheKey, BucketEntry bucketEntry, boolean decrementBlockNumber) { + bucketAllocator.freeBlock(bucketEntry.offset()); + realCacheSize.addAndGet(-1 * bucketEntry.getLength()); + blocksByHFile.remove(cacheKey.getHfileName(), cacheKey); + if (decrementBlockNumber) { + this.blockNumber.decrementAndGet(); + } + } + @Override public boolean evictBlock(BlockCacheKey cacheKey) { - if (!cacheEnabled) return false; + if (!cacheEnabled) { + return false; + } RAMQueueEntry removedBlock = ramCache.remove(cacheKey); if (removedBlock != null) { this.blockNumber.decrementAndGet(); this.heapSize.addAndGet(-1 * removedBlock.getData().heapSize()); } BucketEntry bucketEntry = backingMap.get(cacheKey); - if (bucketEntry != null) { - IdLock.Entry lockEntry = null; - try { - lockEntry = offsetLock.getLockEntry(bucketEntry.offset()); - if (bucketEntry.equals(backingMap.remove(cacheKey))) { - bucketAllocator.freeBlock(bucketEntry.offset()); - realCacheSize.addAndGet(-1 * bucketEntry.getLength()); - blocksByHFile.remove(cacheKey.getHfileName(), cacheKey); - if (removedBlock == null) { - this.blockNumber.decrementAndGet(); - } - } else { - return false; - } - } catch (IOException ie) { - LOG.warn("Failed evicting block " + cacheKey); + if (bucketEntry == null) { + if (removedBlock != null) { + cacheStats.evicted(0); + return true; + } else { return false; - } finally { - if (lockEntry != null) { - offsetLock.releaseLockEntry(lockEntry); - } } } - cacheStats.evicted(bucketEntry == null? 0: bucketEntry.getCachedTime()); + IdLock.Entry lockEntry = null; + try { + lockEntry = offsetLock.getLockEntry(bucketEntry.offset()); + if (backingMap.remove(cacheKey, bucketEntry)) { + blockEvicted(cacheKey, bucketEntry, removedBlock == null); + } else { + return false; + } + } catch (IOException ie) { + LOG.warn("Failed evicting block " + cacheKey); + return false; + } finally { + if (lockEntry != null) { + offsetLock.releaseLockEntry(lockEntry); + } + } + cacheStats.evicted(bucketEntry.getCachedTime()); return true; } @@ -699,13 +721,10 @@ public class BucketCache implements BlockCache, HeapSize { @VisibleForTesting class WriterThread extends HasThread { private final BlockingQueue inputQueue; - private final int threadNO; private volatile boolean writerEnabled = true; - WriterThread(BlockingQueue queue, int threadNO) { - super(); + WriterThread(BlockingQueue queue) { this.inputQueue = queue; - this.threadNO = threadNO; } // Used for test @@ -722,9 +741,6 @@ public class BucketCache implements BlockCache, HeapSize { try { // Blocks entries = getRAMQueueEntries(inputQueue, entries); - synchronized (cacheWaitSignals[threadNO]) { - cacheWaitSignals[threadNO].notifyAll(); - } } catch (InterruptedException ie) { if (!cacheEnabled) break; } @@ -749,7 +765,9 @@ public class BucketCache implements BlockCache, HeapSize { */ @VisibleForTesting void doDrain(final List entries) throws InterruptedException { - if (entries.isEmpty()) return; + if (entries.isEmpty()) { + return; + } // This method is a little hard to follow. We run through the passed in entries and for each // successful add, we add a non-null BucketEntry to the below bucketEntries. Later we must // do cleanup making sure we've cleared ramCache of all entries regardless of whether we @@ -824,6 +842,21 @@ public class BucketCache implements BlockCache, HeapSize { RAMQueueEntry ramCacheEntry = ramCache.remove(key); if (ramCacheEntry != null) { heapSize.addAndGet(-1 * entries.get(i).getData().heapSize()); + } else if (bucketEntries[i] != null){ + // Block should have already been evicted. Remove it and free space. + IdLock.Entry lockEntry = null; + try { + lockEntry = offsetLock.getLockEntry(bucketEntries[i].offset()); + if (backingMap.remove(key, bucketEntries[i])) { + blockEvicted(key, bucketEntries[i], false); + } + } catch (IOException e) { + LOG.warn("failed to free space for " + key, e); + } finally { + if (lockEntry != null) { + offsetLock.releaseLockEntry(lockEntry); + } + } } } @@ -1049,23 +1082,35 @@ public class BucketCache implements BlockCache, HeapSize { * up the long. Doubt we'll see devices this big for ages. Offsets are divided * by 256. So 5 bytes gives us 256TB or so. */ - static class BucketEntry implements Serializable, Comparable { + static class BucketEntry implements Serializable { private static final long serialVersionUID = -6741504807982257534L; + + // access counter comparator, descending order + static final Comparator COMPARATOR = new Comparator() { + + @Override + public int compare(BucketEntry o1, BucketEntry o2) { + long accessCounter1 = o1.accessCounter; + long accessCounter2 = o2.accessCounter; + return accessCounter1 < accessCounter2 ? 1 : accessCounter1 == accessCounter2 ? 0 : -1; + } + }; + private int offsetBase; private int length; private byte offset1; byte deserialiserIndex; - private volatile long accessTime; + private volatile long accessCounter; private BlockPriority priority; /** * Time this block was cached. Presumes we are created just before we are added to the cache. */ private final long cachedTime = System.nanoTime(); - BucketEntry(long offset, int length, long accessTime, boolean inMemory) { + BucketEntry(long offset, int length, long accessCounter, boolean inMemory) { setOffset(offset); this.length = length; - this.accessTime = accessTime; + this.accessCounter = accessCounter; if (inMemory) { this.priority = BlockPriority.MEMORY; } else { @@ -1104,10 +1149,10 @@ public class BucketCache implements BlockCache, HeapSize { } /** - * Block has been accessed. Update its local access time. + * Block has been accessed. Update its local access counter. */ - public void access(long accessTime) { - this.accessTime = accessTime; + public void access(long accessCounter) { + this.accessCounter = accessCounter; if (this.priority == BlockPriority.SINGLE) { this.priority = BlockPriority.MULTI; } @@ -1117,17 +1162,6 @@ public class BucketCache implements BlockCache, HeapSize { return this.priority; } - @Override - public int compareTo(BucketEntry that) { - if(this.accessTime == that.accessTime) return 0; - return this.accessTime < that.accessTime ? 1 : -1; - } - - @Override - public boolean equals(Object that) { - return this == that; - } - public long getCachedTime() { return cachedTime; } @@ -1198,14 +1232,14 @@ public class BucketCache implements BlockCache, HeapSize { static class RAMQueueEntry { private BlockCacheKey key; private Cacheable data; - private long accessTime; + private long accessCounter; private boolean inMemory; - public RAMQueueEntry(BlockCacheKey bck, Cacheable data, long accessTime, + public RAMQueueEntry(BlockCacheKey bck, Cacheable data, long accessCounter, boolean inMemory) { this.key = bck; this.data = data; - this.accessTime = accessTime; + this.accessCounter = accessCounter; this.inMemory = inMemory; } @@ -1217,8 +1251,8 @@ public class BucketCache implements BlockCache, HeapSize { return key; } - public void access(long accessTime) { - this.accessTime = accessTime; + public void access(long accessCounter) { + this.accessCounter = accessCounter; } public BucketEntry writeToCache(final IOEngine ioEngine, @@ -1230,7 +1264,7 @@ public class BucketCache implements BlockCache, HeapSize { // This cacheable thing can't be serialized... if (len == 0) return null; long offset = bucketAllocator.allocateBlock(len); - BucketEntry bucketEntry = new BucketEntry(offset, len, accessTime, inMemory); + BucketEntry bucketEntry = new BucketEntry(offset, len, accessCounter, inMemory); bucketEntry.setDeserialiserReference(data.getDeserializer(), deserialiserMap); try { if (data instanceof HFileBlock) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/CachedEntryQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/CachedEntryQueue.java index b6954bb77c2..0e33a569f52 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/CachedEntryQueue.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/CachedEntryQueue.java @@ -54,23 +54,23 @@ public class CachedEntryQueue { */ public CachedEntryQueue(long maxSize, long blockSize) { int initialSize = (int) (maxSize / blockSize); - if (initialSize == 0) + if (initialSize == 0) { initialSize++; - queue = MinMaxPriorityQueue - .orderedBy(new Comparator>() { - public int compare(Entry entry1, - Entry entry2) { - return entry1.getValue().compareTo(entry2.getValue()); - } + } + queue = MinMaxPriorityQueue.orderedBy(new Comparator>() { - }).expectedSize(initialSize).create(); + public int compare(Entry entry1, + Entry entry2) { + return BucketEntry.COMPARATOR.compare(entry1.getValue(), entry2.getValue()); + } + + }).expectedSize(initialSize).create(); cacheSize = 0; this.maxSize = maxSize; } /** * Attempt to add the specified entry to this queue. - * *

      * If the queue is smaller than the max size, or if the specified element is * ordered after the smallest element in the queue, the element will be added @@ -83,7 +83,7 @@ public class CachedEntryQueue { cacheSize += entry.getValue().getLength(); } else { BucketEntry head = queue.peek().getValue(); - if (entry.getValue().compareTo(head) > 0) { + if (BucketEntry.COMPARATOR.compare(entry.getValue(), head) > 0) { cacheSize += entry.getValue().getLength(); cacheSize -= head.getLength(); if (cacheSize > maxSize) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java index ef6fa88a8d1..e2274e9935b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java @@ -23,7 +23,6 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.ipc.RpcServer.Call; import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler; import org.apache.hadoop.hbase.monitoring.TaskMonitor; -import org.apache.hadoop.hbase.security.UserProvider; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.StringUtils; @@ -35,14 +34,13 @@ import com.google.protobuf.Message; /** * The request processing logic, which is usually executed in thread pools provided by an * {@link RpcScheduler}. Call {@link #run()} to actually execute the contained - * {@link RpcServer.Call} + * RpcServer.Call */ @InterfaceAudience.Private public class CallRunner { private Call call; private RpcServerInterface rpcServer; private MonitoredRPCHandler status; - private UserProvider userProvider; /** * On construction, adds the size of this call to the running count of outstanding call sizes. @@ -50,13 +48,12 @@ public class CallRunner { * time we occupy heap. */ // The constructor is shutdown so only RpcServer in this class can make one of these. - CallRunner(final RpcServerInterface rpcServer, final Call call, UserProvider userProvider) { + CallRunner(final RpcServerInterface rpcServer, final Call call) { this.call = call; this.rpcServer = rpcServer; // Add size of the call to queue size. this.rpcServer.addCallSize(call.getSize()); this.status = getStatus(); - this.userProvider = userProvider; } public Call getCall() { @@ -70,7 +67,6 @@ public class CallRunner { this.call = null; this.rpcServer = null; this.status = null; - this.userProvider = null; } public void run() { @@ -101,8 +97,6 @@ public class CallRunner { if (call.tinfo != null) { traceScope = Trace.startSpan(call.toTraceString(), call.tinfo); } - RequestContext.set(userProvider.create(call.connection.user), RpcServer.getRemoteIp(), - call.connection.service); // make the call resultPair = this.rpcServer.call(call.service, call.md, call.param, call.cellScanner, call.timestamp, this.status); @@ -117,11 +111,8 @@ public class CallRunner { if (traceScope != null) { traceScope.close(); } - // Must always clear the request context to avoid leaking - // credentials between requests. - RequestContext.clear(); + RpcServer.CurCall.set(null); } - RpcServer.CurCall.set(null); // Set the response for undelayed calls and delayed calls with // undelayed responses. if (!call.isDelayed() || !call.isReturnValueDelayed()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServer.java index 8ca8659a890..825e6887e2c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServer.java @@ -63,6 +63,10 @@ public class MetricsHBaseServer { source.processedCall(processingTime); } + void totalCall(int totalTime) { + source.queuedAndProcessedCall(totalTime); + } + public MetricsHBaseServerSource getMetricsSource() { return source; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RequestContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RequestContext.java deleted file mode 100644 index a395f2ee84d..00000000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RequestContext.java +++ /dev/null @@ -1,153 +0,0 @@ -/* - * Copyright The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.ipc; - -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.security.User; - -import com.google.protobuf.BlockingService; - -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.htrace.Trace; - -import java.net.InetAddress; - -/** - * Represents client information (authenticated username, remote address, protocol) - * for the currently executing request. If called outside the context of a RPC request, all values - * will be null. The {@link CallRunner} class before it a call and then on - * its way out, it will clear the thread local. - */ -@InterfaceAudience.Private -public class RequestContext { - private static ThreadLocal instance = - new ThreadLocal() { - protected RequestContext initialValue() { - return new RequestContext(null, null, null); - } - }; - - public static RequestContext get() { - return instance.get(); - } - - - /** - * Returns the user credentials associated with the current RPC request or - * null if no credentials were provided. - * @return A User - */ - public static User getRequestUser() { - RequestContext ctx = instance.get(); - if (ctx != null) { - return ctx.getUser(); - } - return null; - } - - /** - * Returns the username for any user associated with the current RPC - * request or null if no user is set. - */ - public static String getRequestUserName() { - User user = getRequestUser(); - if (user != null) { - return user.getShortName(); - } - return null; - } - - /** - * Indicates whether or not the current thread is within scope of executing - * an RPC request. - */ - public static boolean isInRequestContext() { - RequestContext ctx = instance.get(); - if (ctx != null) { - return ctx.isInRequest(); - } - return false; - } - - /** - * Initializes the client credentials for the current request. - * @param user - * @param remoteAddress - * @param service - */ - public static void set(User user, - InetAddress remoteAddress, BlockingService service) { - RequestContext ctx = instance.get(); - ctx.user = user; - ctx.remoteAddress = remoteAddress; - ctx.service = service; - ctx.inRequest = true; - if (Trace.isTracing()) { - if (user != null) { - Trace.currentSpan().addKVAnnotation(Bytes.toBytes("user"), Bytes.toBytes(user.getName())); - } - if (remoteAddress != null) { - Trace.currentSpan().addKVAnnotation( - Bytes.toBytes("remoteAddress"), - Bytes.toBytes(remoteAddress.getHostAddress())); - } - } - } - - /** - * Clears out the client credentials for a given request. - */ - public static void clear() { - RequestContext ctx = instance.get(); - ctx.user = null; - ctx.remoteAddress = null; - ctx.service = null; - ctx.inRequest = false; - } - - private User user; - private InetAddress remoteAddress; - private BlockingService service; - // indicates we're within a RPC request invocation - private boolean inRequest; - - private RequestContext(User user, InetAddress remoteAddr, BlockingService service) { - this.user = user; - this.remoteAddress = remoteAddr; - this.service = service; - } - - public User getUser() { - return user; - } - - public InetAddress getRemoteAddress() { - return remoteAddress; - } - - public BlockingService getService() { - return this.service; - } - - boolean isInRequest() { - return inRequest; - } -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java index fd16346183e..0da16a7212f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java @@ -17,6 +17,11 @@ */ package org.apache.hadoop.hbase.ipc; +import java.net.InetAddress; + +import org.apache.hadoop.hbase.security.User; + +import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo; public interface RpcCallContext extends Delayable { /** @@ -36,4 +41,26 @@ public interface RpcCallContext extends Delayable { * @return True if the client supports cellblocks, else return all content in pb */ boolean isClientCellBlockSupport(); + + /** + * Returns the user credentials associated with the current RPC request or + * null if no credentials were provided. + * @return A User + */ + User getRequestUser(); + + /** + * @return Current request's user name or null if none ongoing. + */ + String getRequestUserName(); + + /** + * @return Address of remote client if a request is ongoing, else null + */ + InetAddress getRemoteAddress(); + + /** + * @return the client version info, or null if the information is not present + */ + VersionInfo getClientVersionInfo(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java index 064771c52fd..c69a1876fb3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java @@ -82,6 +82,7 @@ import org.apache.hadoop.hbase.client.Operation; import org.apache.hadoop.hbase.codec.Codec; import org.apache.hadoop.hbase.exceptions.RegionMovedException; import org.apache.hadoop.hbase.io.ByteBufferOutputStream; +import org.apache.hadoop.hbase.io.BoundedByteBufferPool; import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.CellBlockMeta; @@ -90,11 +91,13 @@ import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ExceptionResponse; import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RequestHeader; import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ResponseHeader; import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation; +import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.security.AccessDeniedException; import org.apache.hadoop.hbase.security.AuthMethod; import org.apache.hadoop.hbase.security.HBasePolicyProvider; import org.apache.hadoop.hbase.security.HBaseSaslRpcServer; +import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.HBaseSaslRpcServer.SaslDigestCallbackHandler; import org.apache.hadoop.hbase.security.HBaseSaslRpcServer.SaslGssCallbackHandler; import org.apache.hadoop.hbase.security.SaslStatus; @@ -267,6 +270,9 @@ public class RpcServer implements RpcServerInterface { private UserProvider userProvider; + private final BoundedByteBufferPool reservoir; + + /** * Datastructure that holds all necessary to a method invocation and then afterward, carries * the result. @@ -293,10 +299,14 @@ public class RpcServer implements RpcServerInterface { protected long size; // size of current call protected boolean isError; protected TraceInfo tinfo; + private ByteBuffer cellBlock = null; + + private User user; + private InetAddress remoteAddress; Call(int id, final BlockingService service, final MethodDescriptor md, RequestHeader header, Message param, CellScanner cellScanner, Connection connection, Responder responder, - long size, TraceInfo tinfo) { + long size, TraceInfo tinfo, final InetAddress remoteAddress) { this.id = id; this.service = service; this.md = md; @@ -311,6 +321,21 @@ public class RpcServer implements RpcServerInterface { this.isError = false; this.size = size; this.tinfo = tinfo; + this.user = connection.user == null? null: userProvider.create(connection.user); + this.remoteAddress = remoteAddress; + } + + /** + * Call is done. Execution happened and we returned results to client. It is now safe to + * cleanup. + */ + void done() { + if (this.cellBlock != null) { + // Return buffer to reservoir now we are done with it. + reservoir.putBuffer(this.cellBlock); + this.cellBlock = null; + } + this.connection.decRpcCount(); // Say that we're done with this call. } @Override @@ -375,12 +400,15 @@ public class RpcServer implements RpcServerInterface { // Set the exception as the result of the method invocation. headerBuilder.setException(exceptionBuilder.build()); } - ByteBuffer cellBlock = - ipcUtil.buildCellBlock(this.connection.codec, this.connection.compressionCodec, cells); - if (cellBlock != null) { + // Pass reservoir to buildCellBlock. Keep reference to returne so can add it back to the + // reservoir when finished. This is hacky and the hack is not contained but benefits are + // high when we can avoid a big buffer allocation on each rpc. + this.cellBlock = ipcUtil.buildCellBlock(this.connection.codec, + this.connection.compressionCodec, cells, reservoir); + if (this.cellBlock != null) { CellBlockMeta.Builder cellBlockBuilder = CellBlockMeta.newBuilder(); // Presumes the cellBlock bytebuffer has been flipped so limit has total size in it. - cellBlockBuilder.setLength(cellBlock.limit()); + cellBlockBuilder.setLength(this.cellBlock.limit()); headerBuilder.setCellBlockMeta(cellBlockBuilder.build()); } Message header = headerBuilder.build(); @@ -390,9 +418,9 @@ public class RpcServer implements RpcServerInterface { ByteBuffer bbHeader = IPCUtil.getDelimitedMessageAsByteBuffer(header); ByteBuffer bbResult = IPCUtil.getDelimitedMessageAsByteBuffer(result); int totalSize = bbHeader.capacity() + (bbResult == null? 0: bbResult.limit()) + - (cellBlock == null? 0: cellBlock.limit()); + (this.cellBlock == null? 0: this.cellBlock.limit()); ByteBuffer bbTotalSize = ByteBuffer.wrap(Bytes.toBytes(totalSize)); - bc = new BufferChain(bbTotalSize, bbHeader, bbResult, cellBlock); + bc = new BufferChain(bbTotalSize, bbHeader, bbResult, this.cellBlock); if (connection.useWrap) { bc = wrapWithSasl(bc); } @@ -501,6 +529,27 @@ public class RpcServer implements RpcServerInterface { public UserGroupInformation getRemoteUser() { return connection.user; } + + @Override + public User getRequestUser() { + return user; + } + + @Override + public String getRequestUserName() { + User user = getRequestUser(); + return user == null? null: user.getShortName(); + } + + @Override + public InetAddress getRemoteAddress() { + return remoteAddress; + } + + @Override + public VersionInfo getClientVersionInfo() { + return connection.getVersionInfo(); + } } /** Listens on the socket. Creates jobs for the handler threads*/ @@ -1051,7 +1100,7 @@ public class RpcServer implements RpcServerInterface { } if (!call.response.hasRemaining()) { - call.connection.decRpcCount(); // Say that we're done with this call. + call.done(); return true; } else { return false; // Socket can't take more, we will have to come back. @@ -1175,13 +1224,14 @@ public class RpcServer implements RpcServerInterface { // Fake 'call' for failed authorization response private static final int AUTHORIZATION_FAILED_CALLID = -1; private final Call authFailedCall = - new Call(AUTHORIZATION_FAILED_CALLID, null, null, null, null, null, this, null, 0, null); + new Call(AUTHORIZATION_FAILED_CALLID, null, null, null, null, null, this, null, 0, null, + null); private ByteArrayOutputStream authFailedResponse = new ByteArrayOutputStream(); // Fake 'call' for SASL context setup private static final int SASL_CALLID = -33; private final Call saslCall = - new Call(SASL_CALLID, this.service, null, null, null, null, this, null, 0, null); + new Call(SASL_CALLID, this.service, null, null, null, null, this, null, 0, null, null); public UserGroupInformation attemptingUser = null; // user name before auth @@ -1229,6 +1279,13 @@ public class RpcServer implements RpcServerInterface { this.lastContact = lastContact; } + public VersionInfo getVersionInfo() { + if (connectionHeader.hasVersionInfo()) { + return connectionHeader.getVersionInfo(); + } + return null; + } + /* Return true if the connection has no outstanding rpc */ private boolean isIdle() { return rpcCount.get() == 0; @@ -1485,7 +1542,7 @@ public class RpcServer implements RpcServerInterface { // Else it will be length of the data to read (or -1 if a ping). We catch the integer // length into the 4-byte this.dataLengthBuffer. int count = read4Bytes(); - if (count < 0 || dataLengthBuffer.remaining() > 0 ){ + if (count < 0 || dataLengthBuffer.remaining() > 0 ) { return count; } @@ -1569,7 +1626,7 @@ public class RpcServer implements RpcServerInterface { private int doBadPreambleHandling(final String msg, final Exception e) throws IOException { LOG.warn(msg); - Call fakeCall = new Call(-1, null, null, null, null, null, this, responder, -1, null); + Call fakeCall = new Call(-1, null, null, null, null, null, this, responder, -1, null, null); setupResponse(null, fakeCall, e, msg); responder.doRespond(fakeCall); // Returning -1 closes out the connection. @@ -1615,6 +1672,14 @@ public class RpcServer implements RpcServerInterface { } } } + if (connectionHeader.hasVersionInfo()) { + AUDITLOG.info("Connection from " + this.hostAddress + " port: " + this.remotePort + + " with version info: " + + TextFormat.shortDebugString(connectionHeader.getVersionInfo())); + } else { + AUDITLOG.info("Connection from " + this.hostAddress + " port: " + this.remotePort + + " with unknown version info"); + } } /** @@ -1720,7 +1785,7 @@ public class RpcServer implements RpcServerInterface { if ((totalRequestSize + callQueueSize.get()) > maxQueueSize) { final Call callTooBig = new Call(id, this.service, null, null, null, null, this, - responder, totalRequestSize, null); + responder, totalRequestSize, null, null); ByteArrayOutputStream responseBuffer = new ByteArrayOutputStream(); setupResponse(responseBuffer, callTooBig, new CallQueueTooBigException(), "Call queue is full on " + getListenerAddress() + @@ -1765,7 +1830,7 @@ public class RpcServer implements RpcServerInterface { final Call readParamsFailedCall = new Call(id, this.service, null, null, null, null, this, - responder, totalRequestSize, null); + responder, totalRequestSize, null, null); ByteArrayOutputStream responseBuffer = new ByteArrayOutputStream(); setupResponse(responseBuffer, readParamsFailedCall, t, msg + "; " + t.getMessage()); @@ -1777,9 +1842,8 @@ public class RpcServer implements RpcServerInterface { ? new TraceInfo(header.getTraceInfo().getTraceId(), header.getTraceInfo().getParentId()) : null; Call call = new Call(id, this.service, md, header, param, cellScanner, this, responder, - totalRequestSize, - traceInfo); - scheduler.dispatch(new CallRunner(RpcServer.this, call, userProvider)); + totalRequestSize, traceInfo, RpcServer.getRemoteIp()); + scheduler.dispatch(new CallRunner(RpcServer.this, call)); } private boolean authorizeConnection() throws IOException { @@ -1885,7 +1949,13 @@ public class RpcServer implements RpcServerInterface { final InetSocketAddress bindAddress, Configuration conf, RpcScheduler scheduler) throws IOException { - + this.reservoir = new BoundedByteBufferPool( + conf.getInt("hbase.ipc.server.reservoir.max.buffer.size", 1024 * 1024), + conf.getInt("hbase.ipc.server.reservoir.initial.buffer.size", 16 * 1024), + // Make the max twice the number of handlers to be safe. + conf.getInt("hbase.ipc.server.reservoir.initial.max", + conf.getInt(HConstants.REGION_SERVER_HANDLER_COUNT, + HConstants.DEFAULT_REGION_SERVER_HANDLER_COUNT) * 2)); this.server = server; this.services = services; this.bindAddress = bindAddress; @@ -2036,16 +2106,20 @@ public class RpcServer implements RpcServerInterface { long startTime = System.currentTimeMillis(); PayloadCarryingRpcController controller = new PayloadCarryingRpcController(cellScanner); Message result = service.callBlockingMethod(md, controller, param); - int processingTime = (int) (System.currentTimeMillis() - startTime); + long endTime = System.currentTimeMillis(); + int processingTime = (int) (endTime - startTime); int qTime = (int) (startTime - receiveTime); + int totalTime = (int) (endTime - receiveTime); if (LOG.isTraceEnabled()) { LOG.trace(CurCall.get().toString() + ", response " + TextFormat.shortDebugString(result) + " queueTime: " + qTime + - " processingTime: " + processingTime); + " processingTime: " + processingTime + + " totalTime: " + totalTime); } metrics.dequeuedCall(qTime); metrics.processedCall(processingTime); + metrics.totalCall(totalTime); long responseSize = result.getSerializedSize(); // log any RPC responses that are slower than the configured warn // response time or larger than configured warning size @@ -2306,6 +2380,33 @@ public class RpcServer implements RpcServerInterface { return CurCall.get(); } + /** + * Returns the user credentials associated with the current RPC request or + * null if no credentials were provided. + * @return A User + */ + public static User getRequestUser() { + RpcCallContext ctx = getCurrentCall(); + return ctx == null? null: ctx.getRequestUser(); + } + + /** + * Returns the username for any user associated with the current RPC + * request or null if no user is set. + */ + public static String getRequestUserName() { + User user = getRequestUser(); + return user == null? null: user.getShortName(); + } + + /** + * @return Address of remote client if a request is ongoing, else null + */ + public static InetAddress getRemoteAddress() { + RpcCallContext ctx = getCurrentCall(); + return ctx == null? null: ctx.getRemoteAddress(); + } + /** * @param serviceName Some arbitrary string that represents a 'service'. * @param services Available service instances diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormat.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormat.java index 368510fd78a..814daeaa1c7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormat.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormat.java @@ -28,7 +28,6 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; -import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.mapred.FileInputFormat; import org.apache.hadoop.mapred.JobConf; @@ -50,6 +49,15 @@ public class TableInputFormat extends TableInputFormatBase implements public static final String COLUMN_LIST = "hbase.mapred.tablecolumns"; public void configure(JobConf job) { + try { + initialize(job); + } catch (Exception e) { + LOG.error(StringUtils.stringifyException(e)); + } + } + + @Override + protected void initialize(JobConf job) throws IOException { Path[] tableNames = FileInputFormat.getInputPaths(job); String colArg = job.get(COLUMN_LIST); String[] colNames = colArg.split(" "); @@ -58,12 +66,8 @@ public class TableInputFormat extends TableInputFormatBase implements m_cols[i] = Bytes.toBytes(colNames[i]); } setInputColumns(m_cols); - try { - Connection connection = ConnectionFactory.createConnection(job); - setHTable((HTable) connection.getTable(TableName.valueOf(tableNames[0].getName()))); - } catch (Exception e) { - LOG.error(StringUtils.stringifyException(e)); - } + Connection connection = ConnectionFactory.createConnection(job); + initializeTable(connection, TableName.valueOf(tableNames[0].getName())); } public void validateInput(JobConf job) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java index 2a50efc4aac..f8ccea35a09 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java @@ -18,6 +18,7 @@ */ package org.apache.hadoop.hbase.mapred; +import java.io.Closeable; import java.io.IOException; import org.apache.commons.logging.Log; @@ -25,7 +26,10 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.filter.Filter; @@ -40,28 +44,37 @@ import org.apache.hadoop.mapred.Reporter; * A Base for {@link TableInputFormat}s. Receives a {@link HTable}, a * byte[] of input columns and optionally a {@link Filter}. * Subclasses may use other TableRecordReader implementations. + * + * Subclasses MUST ensure initializeTable(Connection, TableName) is called for an instance to + * function properly. Each of the entry points to this class used by the MapReduce framework, + * {@link #getRecordReader(InputSplit, JobConf, Reporter)} and {@link #getSplits(JobConf, int)}, + * will call {@link #initialize(JobConf)} as a convenient centralized location to handle + * retrieving the necessary configuration information. If your subclass overrides either of these + * methods, either call the parent version or call initialize yourself. + * *

      * An example of a subclass: *

      - *   class ExampleTIF extends TableInputFormatBase implements JobConfigurable {
      + *   class ExampleTIF extends TableInputFormatBase {
        *
      - *     public void configure(JobConf job) {
      - *       HTable exampleTable = new HTable(HBaseConfiguration.create(job),
      - *         Bytes.toBytes("exampleTable"));
      - *       // mandatory
      - *       setHTable(exampleTable);
      - *       Text[] inputColumns = new byte [][] { Bytes.toBytes("columnA"),
      + *     {@literal @}Override
      + *     protected void initialize(JobConf context) throws IOException {
      + *       // We are responsible for the lifecycle of this connection until we hand it over in
      + *       // initializeTable.
      + *       Connection connection =
      + *          ConnectionFactory.createConnection(HBaseConfiguration.create(job));
      + *       TableName tableName = TableName.valueOf("exampleTable");
      + *       // mandatory. once passed here, TableInputFormatBase will handle closing the connection.
      + *       initializeTable(connection, tableName);
      + *       byte[][] inputColumns = new byte [][] { Bytes.toBytes("columnA"),
        *         Bytes.toBytes("columnB") };
        *       // mandatory
        *       setInputColumns(inputColumns);
      - *       RowFilterInterface exampleFilter = new RegExpRowFilter("keyPrefix.*");
      - *       // optional
      + *       // optional, by default we'll get everything for the given columns.
      + *       Filter exampleFilter = new RowFilter(CompareOp.EQUAL, new RegexStringComparator("aa.*"));
        *       setRowFilter(exampleFilter);
        *     }
      - *
      - *     public void validateInput(JobConf job) throws IOException {
      - *     }
      - *  }
      + *   }
        * 
      */ @@ -71,10 +84,19 @@ public abstract class TableInputFormatBase implements InputFormat { private static final Log LOG = LogFactory.getLog(TableInputFormatBase.class); private byte [][] inputColumns; - private HTable table; + private Table table; + private RegionLocator regionLocator; + private Connection connection; private TableRecordReader tableRecordReader; private Filter rowFilter; + private static final String NOT_INITIALIZED = "The input format instance has not been properly " + + "initialized. Ensure you call initializeTable either in your constructor or initialize " + + "method"; + private static final String INITIALIZATION_ERROR = "Cannot create a record reader because of a" + + " previous error. Please look at the previous logs lines from" + + " the task's full log for more details."; + /** * Builds a TableRecordReader. If no TableRecordReader was provided, uses * the default. @@ -85,19 +107,63 @@ implements InputFormat { public RecordReader getRecordReader( InputSplit split, JobConf job, Reporter reporter) throws IOException { - TableSplit tSplit = (TableSplit) split; - TableRecordReader trr = this.tableRecordReader; - // if no table record reader was provided use default - if (trr == null) { - trr = new TableRecordReader(); + // In case a subclass uses the deprecated approach or calls initializeTable directly + if (table == null) { + initialize(job); } + // null check in case our child overrides getTable to not throw. + try { + if (getTable() == null) { + // initialize() must not have been implemented in the subclass. + throw new IOException(INITIALIZATION_ERROR); + } + } catch (IllegalStateException exception) { + throw new IOException(INITIALIZATION_ERROR, exception); + } + + TableSplit tSplit = (TableSplit) split; + // if no table record reader was provided use default + final TableRecordReader trr = this.tableRecordReader == null ? new TableRecordReader() : + this.tableRecordReader; trr.setStartRow(tSplit.getStartRow()); trr.setEndRow(tSplit.getEndRow()); trr.setHTable(this.table); trr.setInputColumns(this.inputColumns); trr.setRowFilter(this.rowFilter); trr.init(); - return trr; + return new RecordReader() { + + @Override + public void close() throws IOException { + trr.close(); + closeTable(); + } + + @Override + public ImmutableBytesWritable createKey() { + return trr.createKey(); + } + + @Override + public Result createValue() { + return trr.createValue(); + } + + @Override + public long getPos() throws IOException { + return trr.getPos(); + } + + @Override + public float getProgress() throws IOException { + return trr.getProgress(); + } + + @Override + public boolean next(ImmutableBytesWritable key, Result value) throws IOException { + return trr.next(key, value); + } + }; } /** @@ -121,9 +187,19 @@ implements InputFormat { */ public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException { if (this.table == null) { - throw new IOException("No table was provided"); + initialize(job); } - byte [][] startKeys = this.table.getStartKeys(); + // null check in case our child overrides getTable to not throw. + try { + if (getTable() == null) { + // initialize() must not have been implemented in the subclass. + throw new IOException(INITIALIZATION_ERROR); + } + } catch (IllegalStateException exception) { + throw new IOException(INITIALIZATION_ERROR, exception); + } + + byte [][] startKeys = this.regionLocator.getStartKeys(); if (startKeys == null || startKeys.length == 0) { throw new IOException("Expecting at least one region"); } @@ -138,7 +214,7 @@ implements InputFormat { for (int i = 0; i < realNumSplits; i++) { int lastPos = startPos + middle; lastPos = startKeys.length % realNumSplits > i ? lastPos + 1 : lastPos; - String regionLocation = table.getRegionLocation(startKeys[startPos]). + String regionLocation = regionLocator.getRegionLocation(startKeys[startPos]). getHostname(); splits[i] = new TableSplit(this.table.getName(), startKeys[startPos], ((i + 1) < realNumSplits) ? startKeys[lastPos]: @@ -149,6 +225,23 @@ implements InputFormat { return splits; } + /** + * Allows subclasses to initialize the table information. + * + * @param connection The Connection to the HBase cluster. MUST be unmanaged. We will close. + * @param tableName The {@link TableName} of the table to process. + * @throws IOException + */ + protected void initializeTable(Connection connection, TableName tableName) throws IOException { + if (this.table != null || this.connection != null) { + LOG.warn("initializeTable called multiple times. Overwriting connection and table " + + "reference; TableInputFormatBase will not close these old references when done."); + } + this.table = connection.getTable(tableName); + this.regionLocator = connection.getRegionLocator(tableName); + this.connection = connection; + } + /** * @param inputColumns to be passed in {@link Result} to the map task. */ @@ -158,8 +251,20 @@ implements InputFormat { /** * Allows subclasses to get the {@link HTable}. + * @deprecated use {@link #getTable()} */ - protected Table getHTable() { + @Deprecated + protected HTable getHTable() { + return (HTable) getTable(); + } + + /** + * Allows subclasses to get the {@link Table}. + */ + protected Table getTable() { + if (table == null) { + throw new IllegalStateException(NOT_INITIALIZED); + } return this.table; } @@ -167,7 +272,9 @@ implements InputFormat { * Allows subclasses to set the {@link HTable}. * * @param table to get the data from + * @deprecated use {@link #initializeTable(Connection,TableName)} */ + @Deprecated protected void setHTable(HTable table) { this.table = table; } @@ -190,4 +297,40 @@ implements InputFormat { protected void setRowFilter(Filter rowFilter) { this.rowFilter = rowFilter; } + + /** + * Handle subclass specific set up. + * Each of the entry points used by the MapReduce framework, + * {@link #getRecordReader(InputSplit, JobConf, Reporter)} and {@link #getSplits(JobConf, int)}, + * will call {@link #initialize(JobConf)} as a convenient centralized location to handle + * retrieving the necessary configuration information and calling + * {@link #initializeTable(Connection, TableName)}. + * + * Subclasses should implement their initialize call such that it is safe to call multiple times. + * The current TableInputFormatBase implementation relies on a non-null table reference to decide + * if an initialize call is needed, but this behavior may change in the future. In particular, + * it is critical that initializeTable not be called multiple times since this will leak + * Connection instances. + * + */ + protected void initialize(JobConf job) throws IOException { + } + + /** + * Close the Table and related objects that were initialized via + * {@link #initializeTable(Connection, TableName)}. + * + * @throws IOException + */ + protected void closeTable() throws IOException { + close(table, connection); + table = null; + connection = null; + } + + private void close(Closeable... closables) throws IOException { + for (Closeable c : closables) { + if(c != null) { c.close(); } + } + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java index f69f21f4b45..26ae0970e2c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java @@ -51,7 +51,7 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.compress.Compression.Algorithm; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; -import org.apache.hadoop.hbase.io.hfile.AbstractHFileWriter; +import org.apache.hadoop.hbase.io.hfile.HFileWriterImpl; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.hfile.HFileContext; @@ -129,7 +129,7 @@ public class HFileOutputFormat2 // Invented config. Add to hbase-*.xml if other than default compression. final String defaultCompressionStr = conf.get("hfile.compression", Compression.Algorithm.NONE.getName()); - final Algorithm defaultCompression = AbstractHFileWriter + final Algorithm defaultCompression = HFileWriterImpl .compressionByName(defaultCompressionStr); final boolean compactionExclude = conf.getBoolean( "hbase.mapreduce.hfileoutputformat.compaction.exclude", false); @@ -483,7 +483,7 @@ public class HFileOutputFormat2 Map compressionMap = new TreeMap(Bytes.BYTES_COMPARATOR); for (Map.Entry e : stringMap.entrySet()) { - Algorithm algorithm = AbstractHFileWriter.compressionByName(e.getValue()); + Algorithm algorithm = HFileWriterImpl.compressionByName(e.getValue()); compressionMap.put(e.getKey(), algorithm); } return compressionMap; @@ -584,17 +584,17 @@ public class HFileOutputFormat2 */ static void configurePartitioner(Job job, List splitPoints) throws IOException { - + Configuration conf = job.getConfiguration(); // create the partitions file - FileSystem fs = FileSystem.get(job.getConfiguration()); - Path partitionsPath = new Path("/tmp", "partitions_" + UUID.randomUUID()); + FileSystem fs = FileSystem.get(conf); + Path partitionsPath = new Path(conf.get("hadoop.tmp.dir"), "partitions_" + UUID.randomUUID()); fs.makeQualified(partitionsPath); - writePartitions(job.getConfiguration(), partitionsPath, splitPoints); + writePartitions(conf, partitionsPath, splitPoints); fs.deleteOnExit(partitionsPath); // configure job to use it job.setPartitionerClass(TotalOrderPartitioner.class); - TotalOrderPartitioner.setPartitionFile(job.getConfiguration(), partitionsPath); + TotalOrderPartitioner.setPartitionFile(conf, partitionsPath); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java index 45f57b3b4fb..0624f10bf7e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java @@ -32,7 +32,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hbase.HBaseConfiguration; @@ -89,6 +88,7 @@ import java.util.Collection; import java.util.Deque; import java.util.HashMap; import java.util.HashSet; +import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.Map; @@ -159,6 +159,75 @@ public class LoadIncrementalHFiles extends Configured implements Tool { + "\n"); } + private static interface BulkHFileVisitor { + TFamily bulkFamily(final byte[] familyName) + throws IOException; + void bulkHFile(final TFamily family, final FileStatus hfileStatus) + throws IOException; + } + + /** + * Iterate over the bulkDir hfiles. + * Skip reference, HFileLink, files starting with "_" and non-valid hfiles. + */ + private static void visitBulkHFiles(final FileSystem fs, final Path bulkDir, + final BulkHFileVisitor visitor) throws IOException { + if (!fs.exists(bulkDir)) { + throw new FileNotFoundException("Bulkload dir " + bulkDir + " not found"); + } + + FileStatus[] familyDirStatuses = fs.listStatus(bulkDir); + if (familyDirStatuses == null) { + throw new FileNotFoundException("No families found in " + bulkDir); + } + + for (FileStatus familyStat : familyDirStatuses) { + if (!familyStat.isDirectory()) { + LOG.warn("Skipping non-directory " + familyStat.getPath()); + continue; + } + Path familyDir = familyStat.getPath(); + byte[] familyName = familyDir.getName().getBytes(); + TFamily family = visitor.bulkFamily(familyName); + + FileStatus[] hfileStatuses = fs.listStatus(familyDir); + for (FileStatus hfileStatus : hfileStatuses) { + if (!fs.isFile(hfileStatus.getPath())) { + LOG.warn("Skipping non-file " + hfileStatus); + continue; + } + + Path hfile = hfileStatus.getPath(); + // Skip "_", reference, HFileLink + String fileName = hfile.getName(); + if (fileName.startsWith("_")) { + continue; + } + if (StoreFileInfo.isReference(fileName)) { + LOG.warn("Skipping reference " + fileName); + continue; + } + if (HFileLink.isHFileLink(fileName)) { + LOG.warn("Skipping HFileLink " + fileName); + continue; + } + + // Validate HFile Format + try { + if (!HFile.isHFileFormat(fs, hfile)) { + LOG.warn("the file " + hfile + " doesn't seems to be an hfile. skipping"); + continue; + } + } catch (FileNotFoundException e) { + LOG.warn("the file " + hfile + " was removed"); + continue; + } + + visitor.bulkHFile(family, hfileStatus); + } + } + } + /** * Represents an HFile waiting to be loaded. An queue is used * in this class in order to support the case where a region has @@ -176,6 +245,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool { this.hfilePath = hfilePath; } + @Override public String toString() { return "family:"+ Bytes.toString(family) + " path:" + hfilePath.toString(); } @@ -185,50 +255,25 @@ public class LoadIncrementalHFiles extends Configured implements Tool { * Walk the given directory for all HFiles, and return a Queue * containing all such files. */ - private void discoverLoadQueue(Deque ret, Path hfofDir) + private void discoverLoadQueue(final Deque ret, final Path hfofDir) throws IOException { fs = hfofDir.getFileSystem(getConf()); - - if (!fs.exists(hfofDir)) { - throw new FileNotFoundException("HFileOutputFormat dir " + - hfofDir + " not found"); - } - - FileStatus[] familyDirStatuses = fs.listStatus(hfofDir); - if (familyDirStatuses == null) { - throw new FileNotFoundException("No families found in " + hfofDir); - } - - for (FileStatus stat : familyDirStatuses) { - if (!stat.isDirectory()) { - LOG.warn("Skipping non-directory " + stat.getPath()); - continue; + visitBulkHFiles(fs, hfofDir, new BulkHFileVisitor() { + @Override + public byte[] bulkFamily(final byte[] familyName) { + return familyName; } - Path familyDir = stat.getPath(); - byte[] family = familyDir.getName().getBytes(); - FileStatus[] hfileStatuses = fs.listStatus(familyDir); - for (FileStatus hfileStatus : hfileStatuses) { - long length = hfileStatus.getLen(); - Path hfile = hfileStatus.getPath(); - // Skip "_", reference, HFileLink - String fileName = hfile.getName(); - if (fileName.startsWith("_")) continue; - if (StoreFileInfo.isReference(fileName)) { - LOG.warn("Skipping reference " + fileName); - continue; - } - if (HFileLink.isHFileLink(fileName)) { - LOG.warn("Skipping HFileLink " + fileName); - continue; - } - if(length > getConf().getLong(HConstants.HREGION_MAX_FILESIZE, + @Override + public void bulkHFile(final byte[] family, final FileStatus hfile) throws IOException { + long length = hfile.getLen(); + if (length > getConf().getLong(HConstants.HREGION_MAX_FILESIZE, HConstants.DEFAULT_MAX_FILE_SIZE)) { - LOG.warn("Trying to bulk load hfile " + hfofDir.toString() + " with size: " + + LOG.warn("Trying to bulk load hfile " + hfile.getPath() + " with size: " + length + " bytes can be problematic as it may lead to oversplitting."); } - ret.add(new LoadQueueItem(family, hfile)); + ret.add(new LoadQueueItem(family, hfile.getPath())); } - } + }); } /** @@ -242,11 +287,10 @@ public class LoadIncrementalHFiles extends Configured implements Tool { */ @SuppressWarnings("deprecation") public void doBulkLoad(Path hfofDir, final HTable table) - throws TableNotFoundException, IOException - { + throws TableNotFoundException, IOException { doBulkLoad(hfofDir, table.getConnection().getAdmin(), table, table.getRegionLocator()); } - + /** * Perform a bulk load of the given directory into the given * pre-existing table. This method is not threadsafe. @@ -282,12 +326,14 @@ public class LoadIncrementalHFiles extends Configured implements Tool { discoverLoadQueue(queue, hfofDir); // check whether there is invalid family name in HFiles to be bulkloaded Collection families = table.getTableDescriptor().getFamilies(); - ArrayList familyNames = new ArrayList(); + ArrayList familyNames = new ArrayList(families.size()); for (HColumnDescriptor family : families) { familyNames.add(family.getNameAsString()); } ArrayList unmatchedFamilies = new ArrayList(); - for (LoadQueueItem lqi : queue) { + Iterator queueIter = queue.iterator(); + while (queueIter.hasNext()) { + LoadQueueItem lqi = queueIter.next(); String familyNameInHFile = Bytes.toString(lqi.family); if (!familyNames.contains(familyNameInHFile)) { unmatchedFamilies.add(familyNameInHFile); @@ -390,6 +436,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool { final Collection lqis = e.getValue(); final Callable> call = new Callable>() { + @Override public List call() throws Exception { List toRetry = tryAtomicRegionLoad(conn, table.getName(), first, lqis); @@ -466,6 +513,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool { final LoadQueueItem item = queue.remove(); final Callable> call = new Callable>() { + @Override public List call() throws Exception { List splits = groupOrSplit(regionGroups, item, table, startEndKeys); return splits; @@ -729,7 +777,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool { throw e; } } - + private boolean isSecureBulkLoadEndpointAvailable() { String classes = getConf().get(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, ""); return classes.contains("org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint"); @@ -844,46 +892,26 @@ public class LoadIncrementalHFiles extends Configured implements Tool { * More modifications necessary if we want to avoid doing it. */ private void createTable(TableName tableName, String dirPath) throws Exception { - Path hfofDir = new Path(dirPath); - FileSystem fs = hfofDir.getFileSystem(getConf()); - - if (!fs.exists(hfofDir)) { - throw new FileNotFoundException("HFileOutputFormat dir " + - hfofDir + " not found"); - } - - FileStatus[] familyDirStatuses = fs.listStatus(hfofDir); - if (familyDirStatuses == null) { - throw new FileNotFoundException("No families found in " + hfofDir); - } - - HTableDescriptor htd = new HTableDescriptor(tableName); - HColumnDescriptor hcd; + final Path hfofDir = new Path(dirPath); + final FileSystem fs = hfofDir.getFileSystem(getConf()); // Add column families // Build a set of keys - byte[][] keys; - TreeMap map = new TreeMap(Bytes.BYTES_COMPARATOR); - - for (FileStatus stat : familyDirStatuses) { - if (!stat.isDirectory()) { - LOG.warn("Skipping non-directory " + stat.getPath()); - continue; + final HTableDescriptor htd = new HTableDescriptor(tableName); + final TreeMap map = new TreeMap(Bytes.BYTES_COMPARATOR); + visitBulkHFiles(fs, hfofDir, new BulkHFileVisitor() { + @Override + public HColumnDescriptor bulkFamily(final byte[] familyName) { + HColumnDescriptor hcd = new HColumnDescriptor(familyName); + htd.addFamily(hcd); + return hcd; } - Path familyDir = stat.getPath(); - byte[] family = familyDir.getName().getBytes(); - - hcd = new HColumnDescriptor(family); - htd.addFamily(hcd); - - Path[] hfiles = FileUtil.stat2Paths(fs.listStatus(familyDir)); - for (Path hfile : hfiles) { - String fileName = hfile.getName(); - if (fileName.startsWith("_") || StoreFileInfo.isReference(fileName) - || HFileLink.isHFileLink(fileName)) continue; + @Override + public void bulkHFile(final HColumnDescriptor hcd, final FileStatus hfileStatus) + throws IOException { + Path hfile = hfileStatus.getPath(); HFile.Reader reader = HFile.createReader(fs, hfile, new CacheConfig(getConf()), getConf()); - final byte[] first, last; try { if (hcd.getCompressionType() != reader.getFileContext().getCompression()) { hcd.setCompressionType(reader.getFileContext().getCompression()); @@ -891,8 +919,8 @@ public class LoadIncrementalHFiles extends Configured implements Tool { " for family " + hcd.toString()); } reader.loadFileInfo(); - first = reader.getFirstRowKey(); - last = reader.getLastRowKey(); + byte[] first = reader.getFirstRowKey(); + byte[] last = reader.getLastRowKey(); LOG.info("Trying to figure out region boundaries hfile=" + hfile + " first=" + Bytes.toStringBinary(first) + @@ -904,13 +932,13 @@ public class LoadIncrementalHFiles extends Configured implements Tool { value = map.containsKey(last)? map.get(last):0; map.put(last, value-1); - } finally { + } finally { reader.close(); } } - } + }); - keys = LoadIncrementalHFiles.inferBoundaries(map); + byte[][] keys = LoadIncrementalHFiles.inferBoundaries(map); this.hbAdmin.createTable(htd,keys); LOG.info("Table "+ tableName +" is available!!"); @@ -945,7 +973,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool { HTable table = (HTable) connection.getTable(tableName);) { doBulkLoad(hfofDir, table); } - + return 0; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java index 8896eb08389..bc2537b7517 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java @@ -175,7 +175,9 @@ implements Configurable { } @Override - protected void initialize() { + protected void initialize(JobContext context) throws IOException { + // Do we have to worry about mis-matches between the Configuration from setConf and the one + // in this context? TableName tableName = TableName.valueOf(conf.get(INPUT_TABLE)); try { initializeTable(ConnectionFactory.createConnection(new Configuration(conf)), tableName); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java index 6ab7ba8d2cc..def460f3524 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java @@ -62,35 +62,40 @@ import org.apache.hadoop.util.StringUtils; * A base for {@link TableInputFormat}s. Receives a {@link Connection}, a {@link TableName}, * an {@link Scan} instance that defines the input columns etc. Subclasses may use * other TableRecordReader implementations. + * + * Subclasses MUST ensure initializeTable(Connection, TableName) is called for an instance to + * function properly. Each of the entry points to this class used by the MapReduce framework, + * {@link #createRecordReader(InputSplit, TaskAttemptContext)} and {@link #getSplits(JobContext)}, + * will call {@link #initialize(JobContext)} as a convenient centralized location to handle + * retrieving the necessary configuration information. If your subclass overrides either of these + * methods, either call the parent version or call initialize yourself. + * *

      * An example of a subclass: *

      - *   class ExampleTIF extends TableInputFormatBase implements JobConfigurable {
      + *   class ExampleTIF extends TableInputFormatBase {
        *
      - *     private JobConf job;
      - *
      - *     public void configure(JobConf job) {
      - *       this.job = job;
      - *       Text[] inputColumns = new byte [][] { Bytes.toBytes("cf1:columnA"),
      - *         Bytes.toBytes("cf2") };
      - *       // mandatory
      - *       setInputColumns(inputColumns);
      - *       RowFilterInterface exampleFilter = new RegExpRowFilter("keyPrefix.*");
      - *       // optional
      - *       setRowFilter(exampleFilter);
      - *     }
      - *     
      - *     protected void initialize() {
      - *       Connection connection =
      - *          ConnectionFactory.createConnection(HBaseConfiguration.create(job));
      + *     {@literal @}Override
      + *     protected void initialize(JobContext context) throws IOException {
      + *       // We are responsible for the lifecycle of this connection until we hand it over in
      + *       // initializeTable.
      + *       Connection connection = ConnectionFactory.createConnection(HBaseConfiguration.create(
      + *              job.getConfiguration()));
        *       TableName tableName = TableName.valueOf("exampleTable");
      - *       // mandatory
      + *       // mandatory. once passed here, TableInputFormatBase will handle closing the connection.
        *       initializeTable(connection, tableName);
      - *    }
      - *
      - *     public void validateInput(JobConf job) throws IOException {
      + *       byte[][] inputColumns = new byte [][] { Bytes.toBytes("columnA"),
      + *         Bytes.toBytes("columnB") };
      + *       // optional, by default we'll get everything for the table.
      + *       Scan scan = new Scan();
      + *       for (byte[] family : inputColumns) {
      + *         scan.addFamily(family);
      + *       }
      + *       Filter exampleFilter = new RowFilter(CompareOp.EQUAL, new RegexStringComparator("aa.*"));
      + *       scan.setFilter(exampleFilter);
      + *       setScan(scan);
        *     }
      - *  }
      + *   }
        * 
      */ @InterfaceAudience.Public @@ -110,6 +115,13 @@ extends InputFormat { final Log LOG = LogFactory.getLog(TableInputFormatBase.class); + private static final String NOT_INITIALIZED = "The input format instance has not been properly " + + "initialized. Ensure you call initializeTable either in your constructor or initialize " + + "method"; + private static final String INITIALIZATION_ERROR = "Cannot create a record reader because of a" + + " previous error. Please look at the previous logs lines from" + + " the task's full log for more details."; + /** Holds the details for the internal scanner. * * @see Scan */ @@ -146,14 +158,18 @@ extends InputFormat { public RecordReader createRecordReader( InputSplit split, TaskAttemptContext context) throws IOException { + // Just in case a subclass is relying on JobConfigurable magic. if (table == null) { - initialize(); + initialize(context); } - if (getTable() == null) { - // initialize() must not have been implemented in the subclass. - throw new IOException("Cannot create a record reader because of a" + - " previous error. Please look at the previous logs lines from" + - " the task's full log for more details."); + // null check in case our child overrides getTable to not throw. + try { + if (getTable() == null) { + // initialize() must not have been implemented in the subclass. + throw new IOException(INITIALIZATION_ERROR); + } + } catch (IllegalStateException exception) { + throw new IOException(INITIALIZATION_ERROR, exception); } TableSplit tSplit = (TableSplit) split; LOG.info("Input split length: " + StringUtils.humanReadableInt(tSplit.getLength()) + " bytes."); @@ -218,14 +234,20 @@ extends InputFormat { public List getSplits(JobContext context) throws IOException { boolean closeOnFinish = false; + // Just in case a subclass is relying on JobConfigurable magic. if (table == null) { - initialize(); + initialize(context); closeOnFinish = true; } - if (getTable() == null) { - // initialize() wasn't implemented, so the table is null. - throw new IOException("No table was provided."); + // null check in case our child overrides getTable to not throw. + try { + if (getTable() == null) { + // initialize() must not have been implemented in the subclass. + throw new IOException(INITIALIZATION_ERROR); + } + } catch (IllegalStateException exception) { + throw new IOException(INITIALIZATION_ERROR, exception); } try { @@ -322,6 +344,10 @@ extends InputFormat { } } + /** + * @deprecated mistakenly made public in 0.98.7. scope will change to package-private + */ + @Deprecated public String reverseDNS(InetAddress ipAddress) throws NamingException, UnknownHostException { String hostName = this.reverseDNSCacheMap.get(ipAddress); if (hostName == null) { @@ -354,7 +380,7 @@ extends InputFormat { * @see org.apache.hadoop.mapreduce.InputFormat#getSplits( * org.apache.hadoop.mapreduce.JobContext) */ - public List calculateRebalancedSplits(List list, JobContext context, + private List calculateRebalancedSplits(List list, JobContext context, long average) throws IOException { List resultList = new ArrayList(); Configuration conf = context.getConfiguration(); @@ -428,6 +454,7 @@ extends InputFormat { * @param isText It determines to use text key mode or binary key mode * @return The split point in the region. */ + @InterfaceAudience.Private public static byte[] getSplitKey(byte[] start, byte[] end, boolean isText) { byte upperLimitByte; byte lowerLimitByte; @@ -507,8 +534,6 @@ extends InputFormat { } /** - * - * * Test if the given region is to be included in the InputSplit while splitting * the regions of a table. *

      @@ -535,7 +560,7 @@ extends InputFormat { /** * Allows subclasses to get the {@link HTable}. * - * @deprecated + * @deprecated use {@link #getTable()} */ @Deprecated protected HTable getHTable() { @@ -547,7 +572,7 @@ extends InputFormat { */ protected RegionLocator getRegionLocator() { if (regionLocator == null) { - initialize(); + throw new IllegalStateException(NOT_INITIALIZED); } return regionLocator; } @@ -557,7 +582,7 @@ extends InputFormat { */ protected Table getTable() { if (table == null) { - initialize(); + throw new IllegalStateException(NOT_INITIALIZED); } return table; } @@ -567,7 +592,7 @@ extends InputFormat { */ protected Admin getAdmin() { if (admin == null) { - initialize(); + throw new IllegalStateException(NOT_INITIALIZED); } return admin; } @@ -575,6 +600,9 @@ extends InputFormat { /** * Allows subclasses to set the {@link HTable}. * + * Will attempt to reuse the underlying Connection for our own needs, including + * retreiving an Admin interface to the HBase cluster. + * * @param table The table to get the data from. * @throws IOException * @deprecated Use {@link #initializeTable(Connection, TableName)} instead. @@ -582,19 +610,23 @@ extends InputFormat { @Deprecated protected void setHTable(HTable table) throws IOException { this.table = table; - this.regionLocator = table.getRegionLocator(); this.connection = table.getConnection(); + this.regionLocator = table.getRegionLocator(); this.admin = this.connection.getAdmin(); } /** * Allows subclasses to initialize the table information. * - * @param connection The {@link Connection} to the HBase cluster. + * @param connection The Connection to the HBase cluster. MUST be unmanaged. We will close. * @param tableName The {@link TableName} of the table to process. * @throws IOException */ protected void initializeTable(Connection connection, TableName tableName) throws IOException { + if (this.table != null || this.connection != null) { + LOG.warn("initializeTable called multiple times. Overwriting connection and table " + + "reference; TableInputFormatBase will not close these old references when done."); + } this.table = connection.getTable(tableName); this.regionLocator = connection.getRegionLocator(tableName); this.admin = connection.getAdmin(); @@ -631,12 +663,21 @@ extends InputFormat { } /** - * This method will be called when any of the following are referenced, but not yet initialized: - * admin, regionLocator, table. Subclasses will have the opportunity to call - * {@link #initializeTable(Connection, TableName)} + * Handle subclass specific set up. + * Each of the entry points used by the MapReduce framework, + * {@link #createRecordReader(InputSplit, TaskAttemptContext)} and {@link #getSplits(JobContext)}, + * will call {@link #initialize(JobContext)} as a convenient centralized location to handle + * retrieving the necessary configuration information and calling + * {@link #initializeTable(Connection, TableName)}. + * + * Subclasses should implement their initialize call such that it is safe to call multiple times. + * The current TableInputFormatBase implementation relies on a non-null table reference to decide + * if an initialize call is needed, but this behavior may change in the future. In particular, + * it is critical that initializeTable not be called multiple times since this will leak + * Connection instances. + * */ - protected void initialize() { - + protected void initialize(JobContext context) throws IOException { } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java index 47f686924ca..06fa7125fa3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java @@ -33,7 +33,6 @@ import org.apache.hadoop.hbase.client.ScannerCallable; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.metrics.ScanMetrics; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.mapreduce.Counter; import org.apache.hadoop.mapreduce.InputSplit; @@ -80,8 +79,7 @@ public class TableRecordReaderImpl { public void restart(byte[] firstRow) throws IOException { currentScan = new Scan(scan); currentScan.setStartRow(firstRow); - currentScan.setAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE, - Bytes.toBytes(Boolean.TRUE)); + currentScan.setScanMetricsEnabled(true); if (this.scanner != null) { if (logScannerActivity) { LOG.info("Closing the previously opened scanner object."); @@ -265,14 +263,11 @@ public class TableRecordReaderImpl { * @throws IOException */ private void updateCounters() throws IOException { - byte[] serializedMetrics = currentScan.getAttribute( - Scan.SCAN_ATTRIBUTES_METRICS_DATA); - if (serializedMetrics == null || serializedMetrics.length == 0 ) { + ScanMetrics scanMetrics = this.scan.getScanMetrics(); + if (scanMetrics == null) { return; } - ScanMetrics scanMetrics = ProtobufUtil.toScanMetrics(serializedMetrics); - updateCounters(scanMetrics, numRestarts, getCounter, context, numStale); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java index b6d43de74d5..28f9f39f779 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java @@ -30,9 +30,6 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; -import org.apache.hadoop.hbase.client.HConnectable; -import org.apache.hadoop.hbase.client.HConnection; -import org.apache.hadoop.hbase.client.HConnectionManager; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; @@ -129,22 +126,17 @@ public class VerifyReplication extends Configured implements Tool { } final TableSplit tableSplit = (TableSplit)(context.getInputSplit()); - HConnectionManager.execute(new HConnectable(conf) { - @Override - public Void connect(HConnection conn) throws IOException { - String zkClusterKey = conf.get(NAME + ".peerQuorumAddress"); - Configuration peerConf = HBaseConfiguration.create(conf); - ZKUtil.applyClusterKeyToConf(peerConf, zkClusterKey); - TableName tableName = TableName.valueOf(conf.get(NAME + ".tableName")); - connection = ConnectionFactory.createConnection(peerConf); - replicatedTable = connection.getTable(tableName); - scan.setStartRow(value.getRow()); - scan.setStopRow(tableSplit.getEndRow()); - replicatedScanner = replicatedTable.getScanner(scan); - return null; - } - }); + String zkClusterKey = conf.get(NAME + ".peerQuorumAddress"); + Configuration peerConf = HBaseConfiguration.create(conf); + ZKUtil.applyClusterKeyToConf(peerConf, zkClusterKey); + + TableName tableName = TableName.valueOf(conf.get(NAME + ".tableName")); + connection = ConnectionFactory.createConnection(peerConf); + replicatedTable = connection.getTable(tableName); + scan.setStartRow(value.getRow()); + scan.setStopRow(tableSplit.getEndRow()); + replicatedScanner = replicatedTable.getScanner(scan); currentCompareRowInPeerTable = replicatedScanner.next(); } while (true) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java index f861529a8fe..4a1e71fcb3b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java @@ -1008,18 +1008,6 @@ public class AssignmentManager { regionStates.updateRegionState(region, State.FAILED_OPEN); return; } - // In case of assignment from EnableTableHandler table state is ENABLING. Any how - // EnableTableHandler will set ENABLED after assigning all the table regions. If we - // try to set to ENABLED directly then client API may think table is enabled. - // When we have a case such as all the regions are added directly into hbase:meta and we call - // assignRegion then we need to make the table ENABLED. Hence in such case the table - // will not be in ENABLING or ENABLED state. - TableName tableName = region.getTable(); - if (!tableStateManager.isTableState(tableName, - TableState.State.ENABLED, TableState.State.ENABLING)) { - LOG.debug("Setting table " + tableName + " to ENABLED state."); - setEnabledTable(tableName); - } LOG.info("Assigning " + region.getRegionNameAsString() + " to " + plan.getDestination().toString()); // Transition RegionState to PENDING_OPEN @@ -2058,6 +2046,7 @@ public class AssignmentManager { * @param plan Plan to execute. */ public void balance(final RegionPlan plan) { + HRegionInfo hri = plan.getRegionInfo(); TableName tableName = hri.getTable(); if (tableStateManager.isTableState(tableName, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java index 9d18c986c28..e9fca27595f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java @@ -42,8 +42,6 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.HFileArchiver; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Connection; -import org.apache.hadoop.hbase.client.MetaScanner; -import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.util.Bytes; @@ -142,9 +140,9 @@ public class CatalogJanitor extends ScheduledChore { final Map mergedRegions = new TreeMap(); // This visitor collects split parents and counts rows in the hbase:meta table - MetaScannerVisitor visitor = new MetaScanner.MetaScannerVisitorBase() { + MetaTableAccessor.Visitor visitor = new MetaTableAccessor.Visitor() { @Override - public boolean processRow(Result r) throws IOException { + public boolean visit(Result r) throws IOException { if (r == null || r.isEmpty()) return true; count.incrementAndGet(); HRegionInfo info = HRegionInfo.getHRegionInfo(r); @@ -165,7 +163,7 @@ public class CatalogJanitor extends ScheduledChore { // Run full scan of hbase:meta catalog table passing in our custom visitor with // the start row - MetaScanner.metaScan(this.connection, visitor, tableName); + MetaTableAccessor.scanMetaForTableRegions(this.connection, visitor, tableName); return new Triple, Map>( count.get(), mergedRegions, splitParents); @@ -369,14 +367,27 @@ public class CatalogJanitor extends ScheduledChore { Path rootdir = this.services.getMasterFileSystem().getRootDir(); Path tabledir = FSUtils.getTableDir(rootdir, daughter.getTable()); + Path daughterRegionDir = new Path(tabledir, daughter.getEncodedName()); + HRegionFileSystem regionFs = null; + + try { + if (!FSUtils.isExists(fs, daughterRegionDir)) { + return new Pair(Boolean.FALSE, Boolean.FALSE); + } + } catch (IOException ioe) { + LOG.warn("Error trying to determine if daughter region exists, " + + "assuming exists and has references", ioe); + return new Pair(Boolean.TRUE, Boolean.TRUE); + } + try { regionFs = HRegionFileSystem.openRegionFromFileSystem( this.services.getConfiguration(), fs, tabledir, daughter, true); } catch (IOException e) { - LOG.warn("Daughter region does not exist: " + daughter.getEncodedName() - + ", parent is: " + parent.getEncodedName()); - return new Pair(Boolean.FALSE, Boolean.FALSE); + LOG.warn("Error trying to determine referenced files from : " + daughter.getEncodedName() + + ", to: " + parent.getEncodedName() + " assuming has references", e); + return new Pair(Boolean.TRUE, Boolean.TRUE); } boolean references = false; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index fe6f06e19b1..5a1e188fde3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -58,14 +58,12 @@ import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.MasterNotRunningException; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.NamespaceNotFoundException; import org.apache.hadoop.hbase.PleaseHoldException; -import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerLoad; import org.apache.hadoop.hbase.ServerName; @@ -75,16 +73,12 @@ import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.UnknownRegionException; import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.client.MetaScanner; -import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor; -import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitorBase; import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.executor.ExecutorType; -import org.apache.hadoop.hbase.ipc.RequestContext; import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException; import org.apache.hadoop.hbase.master.MasterRpcServices.BalanceSwitchMode; @@ -94,22 +88,28 @@ import org.apache.hadoop.hbase.master.balancer.ClusterStatusChore; import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory; import org.apache.hadoop.hbase.master.cleaner.HFileCleaner; import org.apache.hadoop.hbase.master.cleaner.LogCleaner; -import org.apache.hadoop.hbase.master.handler.CreateTableHandler; -import org.apache.hadoop.hbase.master.handler.DeleteTableHandler; -import org.apache.hadoop.hbase.master.handler.DisableTableHandler; import org.apache.hadoop.hbase.master.handler.DispatchMergingRegionHandler; -import org.apache.hadoop.hbase.master.handler.EnableTableHandler; -import org.apache.hadoop.hbase.master.handler.ModifyTableHandler; -import org.apache.hadoop.hbase.master.handler.TableAddFamilyHandler; -import org.apache.hadoop.hbase.master.handler.TableDeleteFamilyHandler; -import org.apache.hadoop.hbase.master.handler.TableModifyFamilyHandler; -import org.apache.hadoop.hbase.master.handler.TruncateTableHandler; +import org.apache.hadoop.hbase.master.procedure.AddColumnFamilyProcedure; +import org.apache.hadoop.hbase.master.procedure.CreateTableProcedure; +import org.apache.hadoop.hbase.master.procedure.DeleteColumnFamilyProcedure; +import org.apache.hadoop.hbase.master.procedure.DeleteTableProcedure; +import org.apache.hadoop.hbase.master.procedure.DisableTableProcedure; +import org.apache.hadoop.hbase.master.procedure.EnableTableProcedure; +import org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants; +import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; +import org.apache.hadoop.hbase.master.procedure.ModifyColumnFamilyProcedure; +import org.apache.hadoop.hbase.master.procedure.ModifyTableProcedure; +import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch; +import org.apache.hadoop.hbase.master.procedure.ProcedureSyncWait; +import org.apache.hadoop.hbase.master.procedure.TruncateTableProcedure; import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; import org.apache.hadoop.hbase.monitoring.MemoryBoundedLogMessageBuffer; import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.monitoring.TaskMonitor; import org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost; import org.apache.hadoop.hbase.procedure.flush.MasterFlushTableProcedureManager; +import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; +import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo; @@ -127,10 +127,10 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CompressionTest; import org.apache.hadoop.hbase.util.EncryptionTest; import org.apache.hadoop.hbase.util.FSUtils; -import org.apache.hadoop.hbase.util.HBaseFsckRepair; import org.apache.hadoop.hbase.util.HFileArchiveUtil; import org.apache.hadoop.hbase.util.HasThread; import org.apache.hadoop.hbase.util.IdLock; +import org.apache.hadoop.hbase.util.ModifyRegionUtils; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.util.VersionInfo; @@ -308,7 +308,11 @@ public class HMaster extends HRegionServer implements MasterServices, Server { // monitor for distributed procedures MasterProcedureManagerHost mpmHost; - private MasterQuotaManager quotaManager; + // it is assigned after 'initialized' guard set to true, so should be volatile + private volatile MasterQuotaManager quotaManager; + + private ProcedureExecutor procedureExecutor; + private WALProcedureStore procedureStore; // handle table states private TableStateManager tableStateManager; @@ -425,6 +429,9 @@ public class HMaster extends HRegionServer implements MasterServices, Server { } RedirectServlet.regionServerInfoPort = infoServer.getPort(); + if(RedirectServlet.regionServerInfoPort == infoPort) { + return infoPort; + } masterJettyServer = new org.mortbay.jetty.Server(); Connector connector = new SelectChannelConnector(); connector.setHost(addr); @@ -577,13 +584,6 @@ public class HMaster extends HRegionServer implements MasterServices, Server { this.mpmHost.loadProcedures(conf); this.mpmHost.initialize(this, this.metricsMaster); - // migrating existent table state from zk - for (Map.Entry entry : ZKDataMigrator - .queryForTableStates(getZooKeeper()).entrySet()) { - LOG.info("Converting state from zk to new states:" + entry); - tableStateManager.setTableState(entry.getKey(), entry.getValue()); - } - ZKUtil.deleteChildrenRecursively(getZooKeeper(), getZooKeeper().tableZNode); } /** @@ -722,6 +722,15 @@ public class HMaster extends HRegionServer implements MasterServices, Server { // assigned when master is shutting down if(isStopped()) return; + // migrating existent table state from zk, so splitters + // and recovery process treat states properly. + for (Map.Entry entry : ZKDataMigrator + .queryForTableStates(getZooKeeper()).entrySet()) { + LOG.info("Converting state from zk to new states:" + entry); + tableStateManager.setTableState(entry.getKey(), entry.getValue()); + } + ZKUtil.deleteChildrenRecursively(getZooKeeper(), getZooKeeper().tableZNode); + status.setStatus("Submitting log splitting work for previously failed region servers"); // Master has recovered hbase:meta region server and we put // other failed region servers in a queue to be handled later by SSH @@ -922,9 +931,10 @@ public class HMaster extends HRegionServer implements MasterServices, Server { } void initQuotaManager() throws IOException { - quotaManager = new MasterQuotaManager(this); + MasterQuotaManager quotaManager = new MasterQuotaManager(this); this.assignmentManager.setRegionStateListener((RegionStateListener)quotaManager); quotaManager.start(); + this.quotaManager = quotaManager; } boolean isCatalogJanitorEnabled() { @@ -1016,7 +1026,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server { this.service.startExecutorService(ExecutorType.MASTER_SERVER_OPERATIONS, conf.getInt("hbase.master.executor.serverops.threads", 5)); this.service.startExecutorService(ExecutorType.MASTER_META_SERVER_OPERATIONS, - conf.getInt("hbase.master.executor.serverops.threads", 5)); + conf.getInt("hbase.master.executor.meta.serverops.threads", 5)); this.service.startExecutorService(ExecutorType.M_LOG_REPLAY_OPS, conf.getInt("hbase.master.executor.logreplayops.threads", 10)); @@ -1026,6 +1036,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server { // Any time changing this maxThreads to > 1, pls see the comment at // AccessController#postCreateTableHandler this.service.startExecutorService(ExecutorType.MASTER_TABLE_OPERATIONS, 1); + startProcedureExecutor(); // Start log cleaner thread int cleanerInterval = conf.getInt("hbase.master.cleaner.interval", 60 * 1000); @@ -1046,6 +1057,12 @@ public class HMaster extends HRegionServer implements MasterServices, Server { } } + @Override + protected void sendShutdownInterrupt() { + super.sendShutdownInterrupt(); + stopProcedureExecutor(); + } + @Override protected void stopServiceThreads() { if (masterJettyServer != null) { @@ -1058,6 +1075,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server { } super.stopServiceThreads(); stopChores(); + // Wait for all the remaining region servers to report in IFF we were // running a cluster shutdown AND we were NOT aborting. if (!isAborted() && this.serverManager != null && @@ -1078,6 +1096,34 @@ public class HMaster extends HRegionServer implements MasterServices, Server { if (this.mpmHost != null) this.mpmHost.stop("server shutting down."); } + private void startProcedureExecutor() throws IOException { + final MasterProcedureEnv procEnv = new MasterProcedureEnv(this); + final Path logDir = new Path(fileSystemManager.getRootDir(), + MasterProcedureConstants.MASTER_PROCEDURE_LOGDIR); + + procedureStore = new WALProcedureStore(conf, fileSystemManager.getFileSystem(), logDir, + new MasterProcedureEnv.WALStoreLeaseRecovery(this)); + procedureStore.registerListener(new MasterProcedureEnv.MasterProcedureStoreListener(this)); + procedureExecutor = new ProcedureExecutor(conf, procEnv, procedureStore, + procEnv.getProcedureQueue()); + + final int numThreads = conf.getInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, + Math.max(Runtime.getRuntime().availableProcessors(), + MasterProcedureConstants.DEFAULT_MIN_MASTER_PROCEDURE_THREADS)); + procedureStore.start(numThreads); + procedureExecutor.start(numThreads); + } + + private void stopProcedureExecutor() { + if (procedureExecutor != null) { + procedureExecutor.stop(); + } + + if (procedureStore != null) { + procedureStore.stop(isAborted()); + } + } + private void stopChores() { if (this.expiredMobFileCleanerChore != null) { this.expiredMobFileCleanerChore.cancel(true); @@ -1127,15 +1173,11 @@ public class HMaster extends HRegionServer implements MasterServices, Server { * @return Maximum time we should run balancer for */ private int getBalancerCutoffTime() { - int balancerCutoffTime = - getConfiguration().getInt("hbase.balancer.max.balancing", -1); + int balancerCutoffTime = getConfiguration().getInt("hbase.balancer.max.balancing", -1); if (balancerCutoffTime == -1) { - // No time period set so create one - int balancerPeriod = - getConfiguration().getInt("hbase.balancer.period", 300000); + // if cutoff time isn't set, defaulting it to period time + int balancerPeriod = getConfiguration().getInt("hbase.balancer.period", 300000); balancerCutoffTime = balancerPeriod; - // If nonsense period, set it to balancerPeriod - if (balancerCutoffTime <= 0) balancerCutoffTime = balancerPeriod; } return balancerCutoffTime; } @@ -1228,8 +1270,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server { * @return Client info for use as prefix on an audit log string; who did an action */ String getClientIdAuditPrefix() { - return "Client=" + RequestContext.getRequestUserName() + "/" + - RequestContext.get().getRemoteAddress(); + return "Client=" + RpcServer.getRequestUserName() + "/" + RpcServer.getRemoteAddress(); } /** @@ -1300,6 +1341,11 @@ public class HMaster extends HRegionServer implements MasterServices, Server { return; } } + // warmup the region on the destination before initiating the move. this call + // is synchronous and takes some time. doing it before the source region gets + // closed + serverManager.sendRegionWarmup(rp.getDestination(), hri); + LOG.info(getClientIdAuditPrefix() + " move " + rp + ", running balancer"); this.assignmentManager.balance(rp); if (this.cpHost != null) { @@ -1314,7 +1360,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server { } @Override - public void createTable(HTableDescriptor hTableDescriptor, + public long createTable(HTableDescriptor hTableDescriptor, byte [][] splitKeys) throws IOException { if (isStopped()) { throw new MasterNotRunningException(); @@ -1323,7 +1369,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server { String namespace = hTableDescriptor.getTableName().getNamespaceAsString(); ensureNamespaceExists(namespace); - HRegionInfo[] newRegions = getHRegionInfos(hTableDescriptor, splitKeys); + HRegionInfo[] newRegions = ModifyRegionUtils.createHRegionInfos(hTableDescriptor, splitKeys); checkInitialized(); sanityCheckTableDescriptor(hTableDescriptor); this.quotaManager.checkNamespaceTableAndRegionQuota(hTableDescriptor.getTableName(), @@ -1332,13 +1378,20 @@ public class HMaster extends HRegionServer implements MasterServices, Server { cpHost.preCreateTable(hTableDescriptor, newRegions); } LOG.info(getClientIdAuditPrefix() + " create " + hTableDescriptor); - this.service.submit(new CreateTableHandler(this, - this.fileSystemManager, hTableDescriptor, conf, - newRegions, this).prepare()); + + // TODO: We can handle/merge duplicate requests, and differentiate the case of + // TableExistsException by saying if the schema is the same or not. + ProcedurePrepareLatch latch = ProcedurePrepareLatch.createLatch(); + long procId = this.procedureExecutor.submitProcedure( + new CreateTableProcedure(procedureExecutor.getEnvironment(), + hTableDescriptor, newRegions, latch)); + latch.await(); + if (cpHost != null) { cpHost.postCreateTable(hTableDescriptor, newRegions); } + return procId; } /** @@ -1348,12 +1401,13 @@ public class HMaster extends HRegionServer implements MasterServices, Server { */ private void sanityCheckTableDescriptor(final HTableDescriptor htd) throws IOException { final String CONF_KEY = "hbase.table.sanity.checks"; + boolean logWarn = false; if (!conf.getBoolean(CONF_KEY, true)) { - return; + logWarn = true; } String tableVal = htd.getConfigurationValue(CONF_KEY); if (tableVal != null && !Boolean.valueOf(tableVal)) { - return; + logWarn = true; } // check max file size @@ -1363,11 +1417,11 @@ public class HMaster extends HRegionServer implements MasterServices, Server { maxFileSize = conf.getLong(HConstants.HREGION_MAX_FILESIZE, maxFileSizeLowerLimit); } if (maxFileSize < conf.getLong("hbase.hregion.max.filesize.limit", maxFileSizeLowerLimit)) { - throw new DoNotRetryIOException("MAX_FILESIZE for table descriptor or " - + "\"hbase.hregion.max.filesize\" (" + maxFileSize - + ") is too small, which might cause over splitting into unmanageable " - + "number of regions. Set " + CONF_KEY + " to false at conf or table descriptor " - + "if you want to bypass sanity checks"); + String message = "MAX_FILESIZE for table descriptor or " + + "\"hbase.hregion.max.filesize\" (" + maxFileSize + + ") is too small, which might cause over splitting into unmanageable " + + "number of regions."; + warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null); } // check flush size @@ -1377,72 +1431,81 @@ public class HMaster extends HRegionServer implements MasterServices, Server { flushSize = conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, flushSizeLowerLimit); } if (flushSize < conf.getLong("hbase.hregion.memstore.flush.size.limit", flushSizeLowerLimit)) { - throw new DoNotRetryIOException("MEMSTORE_FLUSHSIZE for table descriptor or " + String message = "MEMSTORE_FLUSHSIZE for table descriptor or " + "\"hbase.hregion.memstore.flush.size\" ("+flushSize+") is too small, which might cause" - + " very frequent flushing. Set " + CONF_KEY + " to false at conf or table descriptor " - + "if you want to bypass sanity checks"); + + " very frequent flushing."; + warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null); } // check that coprocessors and other specified plugin classes can be loaded try { checkClassLoading(conf, htd); } catch (Exception ex) { - throw new DoNotRetryIOException(ex); + warnOrThrowExceptionForFailure(logWarn, CONF_KEY, ex.getMessage(), null); } // check compression can be loaded try { checkCompression(htd); } catch (IOException e) { - throw new DoNotRetryIOException(e.getMessage(), e); + warnOrThrowExceptionForFailure(logWarn, CONF_KEY, e.getMessage(), e); } // check encryption can be loaded try { checkEncryption(conf, htd); } catch (IOException e) { - throw new DoNotRetryIOException(e.getMessage(), e); + warnOrThrowExceptionForFailure(logWarn, CONF_KEY, e.getMessage(), e); } // check that we have at least 1 CF if (htd.getColumnFamilies().length == 0) { - throw new DoNotRetryIOException("Table should have at least one column family " - + "Set "+CONF_KEY+" at conf or table descriptor if you want to bypass sanity checks"); + String message = "Table should have at least one column family."; + warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null); } for (HColumnDescriptor hcd : htd.getColumnFamilies()) { if (hcd.getTimeToLive() <= 0) { - throw new DoNotRetryIOException("TTL for column family " + hcd.getNameAsString() - + " must be positive. Set " + CONF_KEY + " to false at conf or table descriptor " - + "if you want to bypass sanity checks"); + String message = "TTL for column family " + hcd.getNameAsString() + " must be positive."; + warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null); } // check blockSize if (hcd.getBlocksize() < 1024 || hcd.getBlocksize() > 16 * 1024 * 1024) { - throw new DoNotRetryIOException("Block size for column family " + hcd.getNameAsString() - + " must be between 1K and 16MB Set "+CONF_KEY+" to false at conf or table descriptor " - + "if you want to bypass sanity checks"); + String message = "Block size for column family " + hcd.getNameAsString() + + " must be between 1K and 16MB."; + warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null); } // check versions if (hcd.getMinVersions() < 0) { - throw new DoNotRetryIOException("Min versions for column family " + hcd.getNameAsString() - + " must be positive. Set " + CONF_KEY + " to false at conf or table descriptor " - + "if you want to bypass sanity checks"); + String message = "Min versions for column family " + hcd.getNameAsString() + + " must be positive."; + warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null); } // max versions already being checked // check replication scope if (hcd.getScope() < 0) { - throw new DoNotRetryIOException("Replication scope for column family " - + hcd.getNameAsString() + " must be positive. Set " + CONF_KEY + " to false at conf " - + "or table descriptor if you want to bypass sanity checks"); + String message = "Replication scope for column family " + + hcd.getNameAsString() + " must be positive."; + warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null); } // TODO: should we check coprocessors and encryption ? } } + // HBASE-13350 - Helper method to log warning on sanity check failures if checks disabled. + private static void warnOrThrowExceptionForFailure(boolean logWarn, String confKey, + String message, Exception cause) throws IOException { + if (!logWarn) { + throw new DoNotRetryIOException(message + " Set " + confKey + + " to false at conf or table descriptor if you want to bypass sanity checks", cause); + } + LOG.warn(message); + } + private void startActiveMasterManager(int infoPort) throws KeeperException { String backupZNode = ZKUtil.joinZNode( zooKeeper.backupMasterAddressesZNode, serverName.toString()); @@ -1545,44 +1608,29 @@ public class HMaster extends HRegionServer implements MasterServices, Server { RegionCoprocessorHost.testTableCoprocessorAttrs(conf, htd); } - private HRegionInfo[] getHRegionInfos(HTableDescriptor hTableDescriptor, - byte[][] splitKeys) { - long regionId = System.currentTimeMillis(); - HRegionInfo[] hRegionInfos = null; - if (splitKeys == null || splitKeys.length == 0) { - hRegionInfos = new HRegionInfo[]{new HRegionInfo(hTableDescriptor.getTableName(), null, null, - false, regionId)}; - } else { - int numRegions = splitKeys.length + 1; - hRegionInfos = new HRegionInfo[numRegions]; - byte[] startKey = null; - byte[] endKey = null; - for (int i = 0; i < numRegions; i++) { - endKey = (i == splitKeys.length) ? null : splitKeys[i]; - hRegionInfos[i] = - new HRegionInfo(hTableDescriptor.getTableName(), startKey, endKey, - false, regionId); - startKey = endKey; - } - } - return hRegionInfos; - } - private static boolean isCatalogTable(final TableName tableName) { return tableName.equals(TableName.META_TABLE_NAME); } @Override - public void deleteTable(final TableName tableName) throws IOException { + public long deleteTable(final TableName tableName) throws IOException { checkInitialized(); if (cpHost != null) { cpHost.preDeleteTable(tableName); } LOG.info(getClientIdAuditPrefix() + " delete " + tableName); - this.service.submit(new DeleteTableHandler(tableName, this, this).prepare()); + + // TODO: We can handle/merge duplicate request + ProcedurePrepareLatch latch = ProcedurePrepareLatch.createLatch(); + long procId = this.procedureExecutor.submitProcedure( + new DeleteTableProcedure(procedureExecutor.getEnvironment(), tableName, latch)); + latch.await(); + if (cpHost != null) { cpHost.postDeleteTable(tableName); } + + return procId; } @Override @@ -1592,9 +1640,11 @@ public class HMaster extends HRegionServer implements MasterServices, Server { cpHost.preTruncateTable(tableName); } LOG.info(getClientIdAuditPrefix() + " truncate " + tableName); - TruncateTableHandler handler = new TruncateTableHandler(tableName, this, this, preserveSplits); - handler.prepare(); - handler.process(); + + long procId = this.procedureExecutor.submitProcedure( + new TruncateTableProcedure(procedureExecutor.getEnvironment(), tableName, preserveSplits)); + ProcedureSyncWait.waitForProcedureToComplete(procedureExecutor, procId); + if (cpHost != null) { cpHost.postTruncateTable(tableName); } @@ -1611,8 +1661,11 @@ public class HMaster extends HRegionServer implements MasterServices, Server { return; } } - //TODO: we should process this (and some others) in an executor - new TableAddFamilyHandler(tableName, columnDescriptor, this, this).prepare().process(); + // Execute the operation synchronously - wait for the operation to complete before continuing. + long procId = + this.procedureExecutor.submitProcedure(new AddColumnFamilyProcedure(procedureExecutor + .getEnvironment(), tableName, columnDescriptor)); + ProcedureSyncWait.waitForProcedureToComplete(procedureExecutor, procId); if (cpHost != null) { cpHost.postAddColumn(tableName, columnDescriptor); } @@ -1630,8 +1683,13 @@ public class HMaster extends HRegionServer implements MasterServices, Server { } } LOG.info(getClientIdAuditPrefix() + " modify " + descriptor); - new TableModifyFamilyHandler(tableName, descriptor, this, this) - .prepare().process(); + + // Execute the operation synchronously - wait for the operation to complete before continuing. + long procId = + this.procedureExecutor.submitProcedure(new ModifyColumnFamilyProcedure(procedureExecutor + .getEnvironment(), tableName, descriptor)); + ProcedureSyncWait.waitForProcedureToComplete(procedureExecutor, procId); + if (cpHost != null) { cpHost.postModifyColumn(tableName, descriptor); } @@ -1647,38 +1705,69 @@ public class HMaster extends HRegionServer implements MasterServices, Server { } } LOG.info(getClientIdAuditPrefix() + " delete " + Bytes.toString(columnName)); - new TableDeleteFamilyHandler(tableName, columnName, this, this).prepare().process(); + + // Execute the operation synchronously - wait for the operation to complete before continuing. + long procId = + this.procedureExecutor.submitProcedure(new DeleteColumnFamilyProcedure(procedureExecutor + .getEnvironment(), tableName, columnName)); + ProcedureSyncWait.waitForProcedureToComplete(procedureExecutor, procId); + if (cpHost != null) { cpHost.postDeleteColumn(tableName, columnName); } } @Override - public void enableTable(final TableName tableName) throws IOException { + public long enableTable(final TableName tableName) throws IOException { checkInitialized(); if (cpHost != null) { cpHost.preEnableTable(tableName); } LOG.info(getClientIdAuditPrefix() + " enable " + tableName); - this.service.submit(new EnableTableHandler(this, tableName, - assignmentManager, tableLockManager, false).prepare()); + + // Execute the operation asynchronously - client will check the progress of the operation + final ProcedurePrepareLatch prepareLatch = ProcedurePrepareLatch.createLatch(); + long procId = + this.procedureExecutor.submitProcedure(new EnableTableProcedure(procedureExecutor + .getEnvironment(), tableName, false, prepareLatch)); + // Before returning to client, we want to make sure that the table is prepared to be + // enabled (the table is locked and the table state is set). + // + // Note: if the procedure throws exception, we will catch it and rethrow. + prepareLatch.await(); + if (cpHost != null) { cpHost.postEnableTable(tableName); - } + } + + return procId; } @Override - public void disableTable(final TableName tableName) throws IOException { + public long disableTable(final TableName tableName) throws IOException { checkInitialized(); if (cpHost != null) { cpHost.preDisableTable(tableName); } LOG.info(getClientIdAuditPrefix() + " disable " + tableName); - this.service.submit(new DisableTableHandler(this, tableName, - assignmentManager, tableLockManager, false).prepare()); + + // Execute the operation asynchronously - client will check the progress of the operation + final ProcedurePrepareLatch prepareLatch = ProcedurePrepareLatch.createLatch(); + // Execute the operation asynchronously - client will check the progress of the operation + long procId = + this.procedureExecutor.submitProcedure(new DisableTableProcedure(procedureExecutor + .getEnvironment(), tableName, false, prepareLatch)); + // Before returning to client, we want to make sure that the table is prepared to be + // enabled (the table is locked and the table state is set). + // + // Note: if the procedure throws exception, we will catch it and rethrow. + prepareLatch.await(); + if (cpHost != null) { cpHost.postDisableTable(tableName); } + + return procId; } /** @@ -1694,10 +1783,9 @@ public class HMaster extends HRegionServer implements MasterServices, Server { final AtomicReference> result = new AtomicReference>(null); - MetaScannerVisitor visitor = - new MetaScannerVisitorBase() { + MetaTableAccessor.Visitor visitor = new MetaTableAccessor.Visitor() { @Override - public boolean processRow(Result data) throws IOException { + public boolean visit(Result data) throws IOException { if (data == null || data.size() <= 0) { return true; } @@ -1713,7 +1801,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server { } }; - MetaScanner.metaScan(clusterConnection, visitor, tableName, rowKey, 1); + MetaTableAccessor.scanMeta(clusterConnection, visitor, tableName, rowKey, 1); return result.get(); } @@ -1725,8 +1813,15 @@ public class HMaster extends HRegionServer implements MasterServices, Server { if (cpHost != null) { cpHost.preModifyTable(tableName, descriptor); } + LOG.info(getClientIdAuditPrefix() + " modify " + tableName); - new ModifyTableHandler(tableName, descriptor, this, this).prepare().process(); + + // Execute the operation synchronously - wait for the operation completes before continuing. + long procId = this.procedureExecutor.submitProcedure( + new ModifyTableProcedure(procedureExecutor.getEnvironment(), descriptor)); + + ProcedureSyncWait.waitForProcedureToComplete(procedureExecutor, procId); + if (cpHost != null) { cpHost.postModifyTable(tableName, descriptor); } @@ -1758,39 +1853,42 @@ public class HMaster extends HRegionServer implements MasterServices, Server { this.zooKeeper.backupMasterAddressesZNode); } catch (KeeperException e) { LOG.warn(this.zooKeeper.prefix("Unable to list backup servers"), e); - backupMasterStrings = new ArrayList(0); + backupMasterStrings = null; } - List backupMasters = new ArrayList( - backupMasterStrings.size()); - for (String s: backupMasterStrings) { - try { - byte [] bytes; + + List backupMasters = null; + if (backupMasterStrings != null && !backupMasterStrings.isEmpty()) { + backupMasters = new ArrayList(backupMasterStrings.size()); + for (String s: backupMasterStrings) { try { - bytes = ZKUtil.getData(this.zooKeeper, ZKUtil.joinZNode( - this.zooKeeper.backupMasterAddressesZNode, s)); - } catch (InterruptedException e) { - throw new InterruptedIOException(); - } - if (bytes != null) { - ServerName sn; + byte [] bytes; try { - sn = ServerName.parseFrom(bytes); - } catch (DeserializationException e) { - LOG.warn("Failed parse, skipping registering backup server", e); - continue; + bytes = ZKUtil.getData(this.zooKeeper, ZKUtil.joinZNode( + this.zooKeeper.backupMasterAddressesZNode, s)); + } catch (InterruptedException e) { + throw new InterruptedIOException(); } - backupMasters.add(sn); + if (bytes != null) { + ServerName sn; + try { + sn = ServerName.parseFrom(bytes); + } catch (DeserializationException e) { + LOG.warn("Failed parse, skipping registering backup server", e); + continue; + } + backupMasters.add(sn); + } + } catch (KeeperException e) { + LOG.warn(this.zooKeeper.prefix("Unable to get information about " + + "backup servers"), e); } - } catch (KeeperException e) { - LOG.warn(this.zooKeeper.prefix("Unable to get information about " + - "backup servers"), e); } + Collections.sort(backupMasters, new Comparator() { + @Override + public int compare(ServerName s1, ServerName s2) { + return s1.getServerName().compareTo(s2.getServerName()); + }}); } - Collections.sort(backupMasters, new Comparator() { - @Override - public int compare(ServerName s1, ServerName s2) { - return s1.getServerName().compareTo(s2.getServerName()); - }}); String clusterId = fileSystemManager != null ? fileSystemManager.getClusterId().toString() : null; @@ -1881,6 +1979,11 @@ public class HMaster extends HRegionServer implements MasterServices, Server { return quotaManager; } + @Override + public ProcedureExecutor getMasterProcedureExecutor() { + return procedureExecutor; + } + @Override public ServerName getServerName() { return this.serverName; @@ -2383,6 +2486,26 @@ public class HMaster extends HRegionServer implements MasterServices, Server { mobFileCompactionLock.releaseLockEntry(lockEntry); } } + } + /** + * Queries the state of the {@link LoadBalancerTracker}. If the balancer is not initialized, + * false is returned. + * + * @return The state of the load balancer, or false if the load balancer isn't defined. + */ + public boolean isBalancerOn() { + if (null == loadBalancerTracker) return false; + return loadBalancerTracker.isBalancerOn(); + } + + /** + * Fetch the configured {@link LoadBalancer} class name. If none is set, a default is returned. + * + * @return The name of the {@link LoadBalancer} in use. + */ + public String getLoadBalancerClassName() { + return conf.get(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, LoadBalancerFactory + .getDefaultLoadBalancerClass().getName()); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMasterCommandLine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMasterCommandLine.java index f1606f11ef6..3aeee40d8f2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMasterCommandLine.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMasterCommandLine.java @@ -95,7 +95,7 @@ public class HMasterCommandLine extends ServerCommandLine { if (cmd.hasOption("minRegionServers")) { String val = cmd.getOptionValue("minRegionServers"); getConf().setInt("hbase.regions.server.count.min", - Integer.valueOf(val)); + Integer.parseInt(val)); LOG.debug("minRegionServers set to " + val); } @@ -103,7 +103,7 @@ public class HMasterCommandLine extends ServerCommandLine { if (cmd.hasOption("minServers")) { String val = cmd.getOptionValue("minServers"); getConf().setInt("hbase.regions.server.count.min", - Integer.valueOf(val)); + Integer.parseInt(val)); LOG.debug("minServers set to " + val); } @@ -116,13 +116,13 @@ public class HMasterCommandLine extends ServerCommandLine { // master when we are in local/standalone mode. Useful testing) if (cmd.hasOption("localRegionServers")) { String val = cmd.getOptionValue("localRegionServers"); - getConf().setInt("hbase.regionservers", Integer.valueOf(val)); + getConf().setInt("hbase.regionservers", Integer.parseInt(val)); LOG.debug("localRegionServers set to " + val); } // How many masters to startup inside this process; useful testing if (cmd.hasOption("masters")) { String val = cmd.getOptionValue("masters"); - getConf().setInt("hbase.masters", Integer.valueOf(val)); + getConf().setInt("hbase.masters", Integer.parseInt(val)); LOG.debug("masters set to " + val); } @@ -156,12 +156,46 @@ public class HMasterCommandLine extends ServerCommandLine { DefaultMetricsSystem.setMiniClusterMode(true); final MiniZooKeeperCluster zooKeeperCluster = new MiniZooKeeperCluster(conf); File zkDataPath = new File(conf.get(HConstants.ZOOKEEPER_DATA_DIR)); - int zkClientPort = conf.getInt(HConstants.ZOOKEEPER_CLIENT_PORT, 0); + + // find out the default client port + int zkClientPort = 0; + + // If the zookeeper client port is specified in server quorum, use it. + String zkserver = conf.get(HConstants.ZOOKEEPER_QUORUM); + if (zkserver != null) { + String[] zkservers = zkserver.split(","); + + if (zkservers.length > 1) { + // In local mode deployment, we have the master + a region server and zookeeper server + // started in the same process. Therefore, we only support one zookeeper server. + String errorMsg = "Could not start ZK with " + zkservers.length + + " ZK servers in local mode deployment. Aborting as clients (e.g. shell) will not " + + "be able to find this ZK quorum."; + System.err.println(errorMsg); + throw new IOException(errorMsg); + } + + String[] parts = zkservers[0].split(":"); + + if (parts.length == 2) { + // the second part is the client port + zkClientPort = Integer.parseInt(parts [1]); + } + } + // If the client port could not be find in server quorum conf, try another conf if (zkClientPort == 0) { - throw new IOException("No config value for " - + HConstants.ZOOKEEPER_CLIENT_PORT); + zkClientPort = conf.getInt(HConstants.ZOOKEEPER_CLIENT_PORT, 0); + // The client port has to be set by now; if not, throw exception. + if (zkClientPort == 0) { + throw new IOException("No config value for " + HConstants.ZOOKEEPER_CLIENT_PORT); + } } zooKeeperCluster.setDefaultClientPort(zkClientPort); + // set the ZK tick time if specified + int zkTickTime = conf.getInt(HConstants.ZOOKEEPER_TICK_TIME, 0); + if (zkTickTime > 0) { + zooKeeperCluster.setTickTime(zkTickTime); + } // login the zookeeper server principal (if using security) ZKUtil.loginServer(conf, "hbase.zookeeper.server.keytab.file", @@ -180,6 +214,7 @@ public class HMasterCommandLine extends ServerCommandLine { throw new IOException(errorMsg); } conf.set(HConstants.ZOOKEEPER_CLIENT_PORT, Integer.toString(clientPort)); + // Need to have the zk cluster shutdown when master is shutdown. // Run a subclass that does the zk cluster shutdown on its way out. int mastersCount = conf.getInt("hbase.masters", 1); @@ -254,7 +289,8 @@ public class HMasterCommandLine extends ServerCommandLine { } } - private static void closeAllRegionServerThreads(List regionservers) { + private static void closeAllRegionServerThreads( + List regionservers) { for(JVMClusterUtil.RegionServerThread t : regionservers){ t.getRegionServer().stop("HMaster Aborted; Bringing down regions servers"); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java index 29971728010..9f003ec288d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java @@ -22,6 +22,8 @@ package org.apache.hadoop.hbase.master; import java.io.IOException; import java.util.List; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.HColumnDescriptor; @@ -48,6 +50,8 @@ import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas; public class MasterCoprocessorHost extends CoprocessorHost { + private static final Log LOG = LogFactory.getLog(MasterCoprocessorHost.class); + /** * Coprocessor environment extension providing access to master related * services. @@ -70,10 +74,16 @@ public class MasterCoprocessorHost private MasterServices masterServices; - MasterCoprocessorHost(final MasterServices services, final Configuration conf) { + public MasterCoprocessorHost(final MasterServices services, final Configuration conf) { super(services); this.conf = conf; this.masterServices = services; + // Log the state of coprocessor loading here; should appear only once or + // twice in the daemon log, depending on HBase version, because there is + // only one MasterCoprocessorHost instance in the master process + boolean coprocessorsEnabled = conf.getBoolean(COPROCESSORS_ENABLED_CONF_KEY, + DEFAULT_COPROCESSORS_ENABLED); + LOG.info("System coprocessor loading is " + (coprocessorsEnabled ? "enabled" : "disabled")); loadSystemCoprocessors(conf, MASTER_COPROCESSOR_CONF_KEY); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java index 78e4c119c5e..de28cdc0d2d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java @@ -583,10 +583,12 @@ public class MasterFileSystem { Path familyDir = new Path(tableDir, new Path(region.getEncodedName(), Bytes.toString(familyName))); if (fs.delete(familyDir, true) == false) { - throw new IOException("Could not delete family " - + Bytes.toString(familyName) + " from FileSystem for region " - + region.getRegionNameAsString() + "(" + region.getEncodedName() - + ")"); + if (fs.exists(familyDir)) { + throw new IOException("Could not delete family " + + Bytes.toString(familyName) + " from FileSystem for region " + + region.getRegionNameAsString() + "(" + region.getEncodedName() + + ")"); + } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index fcc93db511a..17c1ee3fb28 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -36,7 +36,6 @@ import org.apache.hadoop.hbase.PleaseHoldException; import org.apache.hadoop.hbase.ServerLoad; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.UnknownRegionException; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.TableState; @@ -46,6 +45,8 @@ import org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface; import org.apache.hadoop.hbase.ipc.ServerRpcController; import org.apache.hadoop.hbase.mob.MobUtils; import org.apache.hadoop.hbase.procedure.MasterProcedureManager; +import org.apache.hadoop.hbase.procedure2.Procedure; +import org.apache.hadoop.hbase.procedure2.ProcedureResult; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.RequestConverter; import org.apache.hadoop.hbase.protobuf.ResponseConverter; @@ -54,6 +55,7 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionReque import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionResponse; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse; +import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; @@ -92,12 +94,16 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnaps import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesResponse; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest; @@ -162,6 +168,7 @@ import org.apache.hadoop.hbase.regionserver.RSRpcServices; import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils; import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.ByteStringer; import org.apache.hadoop.hbase.util.Pair; import org.apache.zookeeper.KeeperException; @@ -276,8 +283,8 @@ public class MasterRpcServices extends RSRpcServices throw new ServiceException(ioe); } byte[] encodedRegionName = request.getRegionName().toByteArray(); - long seqId = master.serverManager.getLastFlushedSequenceId(encodedRegionName); - return ResponseConverter.buildGetLastFlushedSequenceIdResponse(seqId); + RegionStoreSequenceIds ids = master.serverManager.getLastFlushedSequenceId(encodedRegionName); + return ResponseConverter.buildGetLastFlushedSequenceIdResponse(ids); } @Override @@ -308,8 +315,9 @@ public class MasterRpcServices extends RSRpcServices master.checkServiceStarted(); InetAddress ia = master.getRemoteInetAddress( request.getPort(), request.getServerStartCode()); - ServerName rs = master.serverManager.regionServerStartup(ia, request.getPort(), - request.getServerStartCode(), request.getServerCurrentTime()); + // if regionserver passed hostname to use, + // then use it instead of doing a reverse DNS lookup + ServerName rs = master.serverManager.regionServerStartup(request, ia); // Send back some config info RegionServerStartupResponse.Builder resp = createConfigurationSubset(); @@ -408,11 +416,11 @@ public class MasterRpcServices extends RSRpcServices HTableDescriptor hTableDescriptor = HTableDescriptor.convert(req.getTableSchema()); byte [][] splitKeys = ProtobufUtil.getSplitKeysArray(req); try { - master.createTable(hTableDescriptor, splitKeys); + long procId = master.createTable(hTableDescriptor, splitKeys); + return CreateTableResponse.newBuilder().setProcId(procId).build(); } catch (IOException ioe) { throw new ServiceException(ioe); } - return CreateTableResponse.newBuilder().build(); } @Override @@ -464,11 +472,11 @@ public class MasterRpcServices extends RSRpcServices public DeleteTableResponse deleteTable(RpcController controller, DeleteTableRequest request) throws ServiceException { try { - master.deleteTable(ProtobufUtil.toTableName(request.getTableName())); + long procId = master.deleteTable(ProtobufUtil.toTableName(request.getTableName())); + return DeleteTableResponse.newBuilder().setProcId(procId).build(); } catch (IOException ioe) { throw new ServiceException(ioe); } - return DeleteTableResponse.newBuilder().build(); } @Override @@ -487,11 +495,11 @@ public class MasterRpcServices extends RSRpcServices public DisableTableResponse disableTable(RpcController controller, DisableTableRequest request) throws ServiceException { try { - master.disableTable(ProtobufUtil.toTableName(request.getTableName())); + long procId = master.disableTable(ProtobufUtil.toTableName(request.getTableName())); + return DisableTableResponse.newBuilder().setProcId(procId).build(); } catch (IOException ioe) { throw new ServiceException(ioe); } - return DisableTableResponse.newBuilder().build(); } @Override @@ -573,11 +581,11 @@ public class MasterRpcServices extends RSRpcServices public EnableTableResponse enableTable(RpcController controller, EnableTableRequest request) throws ServiceException { try { - master.enableTable(ProtobufUtil.toTableName(request.getTableName())); + long procId = master.enableTable(ProtobufUtil.toTableName(request.getTableName())); + return EnableTableResponse.newBuilder().setProcId(procId).build(); } catch (IOException ioe) { throw new ServiceException(ioe); } - return EnableTableResponse.newBuilder().build(); } @Override @@ -964,6 +972,44 @@ public class MasterRpcServices extends RSRpcServices } } + @Override + public GetProcedureResultResponse getProcedureResult(RpcController controller, + GetProcedureResultRequest request) throws ServiceException { + LOG.debug("Checking to see if procedure is done procId=" + request.getProcId()); + try { + master.checkInitialized(); + GetProcedureResultResponse.Builder builder = GetProcedureResultResponse.newBuilder(); + + Pair v = master.getMasterProcedureExecutor() + .getResultOrProcedure(request.getProcId()); + if (v.getFirst() != null) { + ProcedureResult result = v.getFirst(); + builder.setState(GetProcedureResultResponse.State.FINISHED); + builder.setStartTime(result.getStartTime()); + builder.setLastUpdate(result.getLastUpdate()); + if (result.isFailed()) { + builder.setException(result.getException().convert()); + } + if (result.hasResultData()) { + builder.setResult(ByteStringer.wrap(result.getResult())); + } + master.getMasterProcedureExecutor().removeResult(request.getProcId()); + } else { + Procedure proc = v.getSecond(); + if (proc == null) { + builder.setState(GetProcedureResultResponse.State.NOT_FOUND); + } else { + builder.setState(GetProcedureResultResponse.State.RUNNING); + builder.setStartTime(proc.getStartTime()); + builder.setLastUpdate(proc.getLastUpdate()); + } + } + return builder.build(); + } catch (IOException e) { + throw new ServiceException(e); + } + } + @Override public ListNamespaceDescriptorsResponse listNamespaceDescriptors(RpcController c, ListNamespaceDescriptorsRequest request) throws ServiceException { @@ -984,8 +1030,9 @@ public class MasterRpcServices extends RSRpcServices ListTableDescriptorsByNamespaceRequest request) throws ServiceException { try { ListTableDescriptorsByNamespaceResponse.Builder b = - ListTableDescriptorsByNamespaceResponse.newBuilder(); - for(HTableDescriptor htd: master.listTableDescriptorsByNamespace(request.getNamespaceName())) { + ListTableDescriptorsByNamespaceResponse.newBuilder(); + for (HTableDescriptor htd : master + .listTableDescriptorsByNamespace(request.getNamespaceName())) { b.addTableSchema(htd.convert()); } return b.build(); @@ -1385,7 +1432,7 @@ public class MasterRpcServices extends RSRpcServices if (!hcd.isMobEnabled()) { LOG.error("Column family " + hcd.getName() + " is not a mob column family"); throw new DoNotRetryIOException("Column family " + hcd.getName() - + " is not a mob column family"); + + " is not a mob column family"); } compactedColumns.add(hcd); } @@ -1400,7 +1447,7 @@ public class MasterRpcServices extends RSRpcServices if (compactedColumns.isEmpty()) { LOG.error("No mob column families are assigned in the mob file compaction"); throw new DoNotRetryIOException( - "No mob column families are assigned in the mob file compaction"); + "No mob column families are assigned in the mob file compaction"); } if (request.hasMajor() && request.getMajor()) { isForceAllFiles = true; @@ -1408,11 +1455,19 @@ public class MasterRpcServices extends RSRpcServices String familyLogMsg = (family != null) ? Bytes.toString(family) : ""; if (LOG.isTraceEnabled()) { LOG.trace("User-triggered mob file compaction requested for table: " - + tableName.getNameAsString() + " for column family: " + familyLogMsg); + + tableName.getNameAsString() + " for column family: " + familyLogMsg); } master.mobFileCompactThread.requestMobFileCompaction(master.getConfiguration(), - master.getFileSystem(), tableName, compactedColumns, - master.getTableLockManager(), isForceAllFiles); + master.getFileSystem(), tableName, compactedColumns, + master.getTableLockManager(), isForceAllFiles); return CompactRegionResponse.newBuilder().build(); } + + @Override + public IsBalancerEnabledResponse isBalancerEnabled(RpcController controller, + IsBalancerEnabledRequest request) throws ServiceException { + IsBalancerEnabledResponse.Builder response = IsBalancerEnabledResponse.newBuilder(); + response.setEnabled(master.isBalancerOn()); + return response.build(); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java index 63f311960cd..6153139bd91 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java @@ -31,6 +31,8 @@ import org.apache.hadoop.hbase.TableDescriptors; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.TableNotFoundException; +import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; +import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.quotas.MasterQuotaManager; @@ -81,6 +83,11 @@ public interface MasterServices extends Server { */ MasterQuotaManager getMasterQuotaManager(); + /** + * @return Master's instance of {@link ProcedureExecutor} + */ + ProcedureExecutor getMasterProcedureExecutor(); + /** * Check table is modifiable; i.e. exists and is offline. * @param tableName Name of table to check. @@ -98,7 +105,7 @@ public interface MasterServices extends Server { * @param splitKeys Starting row keys for the initial table regions. If null * a single region is created. */ - void createTable(HTableDescriptor desc, byte[][] splitKeys) + long createTable(HTableDescriptor desc, byte[][] splitKeys) throws IOException; /** @@ -106,7 +113,7 @@ public interface MasterServices extends Server { * @param tableName The table name * @throws IOException */ - void deleteTable(final TableName tableName) throws IOException; + long deleteTable(final TableName tableName) throws IOException; /** * Truncate a table @@ -130,14 +137,14 @@ public interface MasterServices extends Server { * @param tableName The table name * @throws IOException */ - void enableTable(final TableName tableName) throws IOException; + long enableTable(final TableName tableName) throws IOException; /** * Disable an existing table * @param tableName The table name * @throws IOException */ - void disableTable(final TableName tableName) throws IOException; + long disableTable(final TableName tableName) throws IOException; /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStateStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStateStore.java index df61b458d68..ba76115c3da 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStateStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStateStore.java @@ -36,7 +36,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.master.RegionState.State; -import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.MultiHConnection; @@ -56,7 +56,7 @@ public class RegionStateStore { /** The delimiter for meta columns for replicaIds > 0 */ protected static final char META_REPLICA_ID_DELIMITER = '_'; - private volatile HRegion metaRegion; + private volatile Region metaRegion; private volatile boolean initialized; private MultiHConnection multiHConnection; private final Server server; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java index e5214ca6f24..d1fffbe0226 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java @@ -426,7 +426,9 @@ public class RegionStates { if (oldServerName == null) { oldServerName = oldAssignments.remove(encodedName); } - if (oldServerName != null && serverHoldings.containsKey(oldServerName)) { + if (oldServerName != null + && !oldServerName.equals(serverName) + && serverHoldings.containsKey(oldServerName)) { LOG.info("Offlined " + hri.getShortNameToLog() + " from " + oldServerName); removeFromServerHoldings(oldServerName, hri); } @@ -574,14 +576,16 @@ public class RegionStates { // Offline all regions on this server not already in transition. List rits = new ArrayList(); Set regionsToCleanIfNoMetaEntry = new HashSet(); + // Offline regions outside the loop and synchronized block to avoid + // ConcurrentModificationException and deadlock in case of meta anassigned, + // but RegionState a blocked. + Set regionsToOffline = new HashSet(); synchronized (this) { Set assignedRegions = serverHoldings.get(sn); if (assignedRegions == null) { assignedRegions = new HashSet(); } - // Offline regions outside the loop to avoid ConcurrentModificationException - Set regionsToOffline = new HashSet(); for (HRegionInfo region : assignedRegions) { // Offline open regions, no need to offline if SPLIT/MERGED/OFFLINE if (isRegionOnline(region)) { @@ -618,13 +622,13 @@ public class RegionStates { } } } - - for (HRegionInfo hri : regionsToOffline) { - regionOffline(hri); - } - this.notifyAll(); } + + for (HRegionInfo hri : regionsToOffline) { + regionOffline(hri); + } + cleanIfNoMetaEntry(regionsToCleanIfNoMetaEntry); return rits; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java index 69c29fdfa40..1ed251465fb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java @@ -29,8 +29,8 @@ import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Set; -import java.util.SortedMap; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentNavigableMap; import java.util.concurrent.ConcurrentSkipListMap; import java.util.concurrent.CopyOnWriteArrayList; @@ -38,6 +38,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ClockOutOfSyncException; +import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.NotServingRegionException; import org.apache.hadoop.hbase.RegionLoad; @@ -45,9 +46,11 @@ import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerLoad; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.YouAreDeadException; +import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.RetriesExhaustedException; import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer; import org.apache.hadoop.hbase.master.handler.MetaServerShutdownHandler; import org.apache.hadoop.hbase.master.handler.ServerShutdownHandler; @@ -59,6 +62,9 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionResponse; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ServerInfo; +import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest; +import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds; +import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.RegionOpeningState; @@ -71,6 +77,7 @@ import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.zookeeper.KeeperException; import com.google.common.annotations.VisibleForTesting; +import com.google.protobuf.ByteString; import com.google.protobuf.ServiceException; /** @@ -114,9 +121,13 @@ public class ServerManager { // Set if we are to shutdown the cluster. private volatile boolean clusterShutdown = false; - private final SortedMap flushedSequenceIdByRegion = + private final ConcurrentNavigableMap flushedSequenceIdByRegion = new ConcurrentSkipListMap(Bytes.BYTES_COMPARATOR); + private final ConcurrentNavigableMap> + storeFlushedSequenceIdsByRegion = + new ConcurrentSkipListMap>(Bytes.BYTES_COMPARATOR); + /** Map of registered servers to their current load */ private final ConcurrentHashMap onlineServers = new ConcurrentHashMap(); @@ -229,16 +240,13 @@ public class ServerManager { /** * Let the server manager know a new regionserver has come online - * @param ia The remote address - * @param port The remote port - * @param serverStartcode - * @param serverCurrentTime The current time of the region server in ms + * @param request the startup request + * @param ia the InetAddress from which request is received * @return The ServerName we know this server as. * @throws IOException */ - ServerName regionServerStartup(final InetAddress ia, final int port, - final long serverStartcode, long serverCurrentTime) - throws IOException { + ServerName regionServerStartup(RegionServerStartupRequest request, InetAddress ia) + throws IOException { // Test for case where we get a region startup message from a regionserver // that has been quickly restarted but whose znode expiration handler has // not yet run, or from a server whose fail we are currently processing. @@ -246,8 +254,12 @@ public class ServerManager { // is, reject the server and trigger its expiration. The next time it comes // in, it should have been removed from serverAddressToServerInfo and queued // for processing by ProcessServerShutdown. - ServerName sn = ServerName.valueOf(ia.getHostName(), port, serverStartcode); - checkClockSkew(sn, serverCurrentTime); + + final String hostname = request.hasUseThisHostnameInstead() ? + request.getUseThisHostnameInstead() :ia.getHostName(); + ServerName sn = ServerName.valueOf(hostname, request.getPort(), + request.getServerStartCode()); + checkClockSkew(sn, request.getServerCurrentTime()); checkIsDead(sn, "STARTUP"); if (!checkAndRecordNewServer(sn, ServerLoad.EMPTY_SERVERLOAD)) { LOG.warn("THIS SHOULD NOT HAPPEN, RegionServerStartup" @@ -256,6 +268,18 @@ public class ServerManager { return sn; } + private ConcurrentNavigableMap getOrCreateStoreFlushedSequenceId( + byte[] regionName) { + ConcurrentNavigableMap storeFlushedSequenceId = + storeFlushedSequenceIdsByRegion.get(regionName); + if (storeFlushedSequenceId != null) { + return storeFlushedSequenceId; + } + storeFlushedSequenceId = new ConcurrentSkipListMap(Bytes.BYTES_COMPARATOR); + ConcurrentNavigableMap alreadyPut = + storeFlushedSequenceIdsByRegion.putIfAbsent(regionName, storeFlushedSequenceId); + return alreadyPut == null ? storeFlushedSequenceId : alreadyPut; + } /** * Updates last flushed sequence Ids for the regions on server sn * @param sn @@ -267,18 +291,25 @@ public class ServerManager { byte[] encodedRegionName = Bytes.toBytes(HRegionInfo.encodeRegionName(entry.getKey())); Long existingValue = flushedSequenceIdByRegion.get(encodedRegionName); long l = entry.getValue().getCompleteSequenceId(); - if (existingValue != null) { - if (l != -1 && l < existingValue) { - LOG.warn("RegionServer " + sn + - " indicates a last flushed sequence id (" + entry.getValue() + - ") that is less than the previous last flushed sequence id (" + - existingValue + ") for region " + - Bytes.toString(entry.getKey()) + " Ignoring."); - - continue; // Don't let smaller sequence ids override greater sequence ids. + // Don't let smaller sequence ids override greater sequence ids. + if (existingValue == null || (l != HConstants.NO_SEQNUM && l > existingValue)) { + flushedSequenceIdByRegion.put(encodedRegionName, l); + } else if (l != HConstants.NO_SEQNUM && l < existingValue) { + LOG.warn("RegionServer " + sn + " indicates a last flushed sequence id (" + + l + ") that is less than the previous last flushed sequence id (" + + existingValue + ") for region " + Bytes.toString(entry.getKey()) + " Ignoring."); + } + ConcurrentNavigableMap storeFlushedSequenceId = + getOrCreateStoreFlushedSequenceId(encodedRegionName); + for (StoreSequenceId storeSeqId : entry.getValue().getStoreCompleteSequenceId()) { + byte[] family = storeSeqId.getFamilyName().toByteArray(); + existingValue = storeFlushedSequenceId.get(family); + l = storeSeqId.getSequenceId(); + // Don't let smaller sequence ids override greater sequence ids. + if (existingValue == null || (l != HConstants.NO_SEQNUM && l > existingValue.longValue())) { + storeFlushedSequenceId.put(family, l); } } - flushedSequenceIdByRegion.put(encodedRegionName, l); } } @@ -417,12 +448,20 @@ public class ServerManager { this.rsAdmins.remove(serverName); } - public long getLastFlushedSequenceId(byte[] encodedRegionName) { - long seqId = -1L; - if (flushedSequenceIdByRegion.containsKey(encodedRegionName)) { - seqId = flushedSequenceIdByRegion.get(encodedRegionName); + public RegionStoreSequenceIds getLastFlushedSequenceId(byte[] encodedRegionName) { + RegionStoreSequenceIds.Builder builder = RegionStoreSequenceIds.newBuilder(); + Long seqId = flushedSequenceIdByRegion.get(encodedRegionName); + builder.setLastFlushedSequenceId(seqId != null ? seqId.longValue() : HConstants.NO_SEQNUM); + Map storeFlushedSequenceId = + storeFlushedSequenceIdsByRegion.get(encodedRegionName); + if (storeFlushedSequenceId != null) { + for (Map.Entry entry : storeFlushedSequenceId.entrySet()) { + builder.addStoreSequenceId(StoreSequenceId.newBuilder() + .setFamilyName(ByteString.copyFrom(entry.getKey())) + .setSequenceId(entry.getValue().longValue()).build()); + } } - return seqId; + return builder.build(); } /** @@ -768,6 +807,27 @@ public class ServerManager { return sendRegionClose(server, region, null); } + /** + * Sends a WARMUP RPC to the specified server to warmup the specified region. + *

      + * A region server could reject the close request because it either does not + * have the specified region or the region is being split. + * @param server server to warmup a region + * @param region region to warmup + */ + public void sendRegionWarmup(ServerName server, + HRegionInfo region) { + if (server == null) return; + try { + AdminService.BlockingInterface admin = getRsAdmin(server); + ProtobufUtil.warmupRegion(admin, region); + } catch (IOException e) { + LOG.error("Received exception in RPC for warmup server:" + + server + "region: " + region + + "exception: " + e); + } + } + /** * Contacts a region server and waits up to timeout ms * to close the region. This bypasses the active hmaster. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java index 9ac208498be..c93ecf6b639 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java @@ -149,8 +149,8 @@ public class SplitLogManager { Set failedDeletions = Collections.synchronizedSet(new HashSet()); SplitLogManagerDetails details = new SplitLogManagerDetails(tasks, master, failedDeletions, serverName); - coordination.init(); coordination.setDetails(details); + coordination.init(); // Determine recovery mode } this.unassignedTimeout = diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableLockManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableLockManager.java index 0f0922d3d88..cfaeb98fe47 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableLockManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableLockManager.java @@ -240,8 +240,10 @@ public abstract class TableLockManager { return; } LOG.debug("Table is locked by " + - String.format("[tableName=%s, lockOwner=%s, threadId=%s, " + - "purpose=%s, isShared=%s, createTime=%s]", Bytes.toString(data.getTableName().toByteArray()), + String.format("[tableName=%s:%s, lockOwner=%s, threadId=%s, " + + "purpose=%s, isShared=%s, createTime=%s]", + data.getTableName().getNamespace().toStringUtf8(), + data.getTableName().getQualifier().toStringUtf8(), ProtobufUtil.toServerName(data.getLockOwner()), data.getThreadId(), data.getPurpose(), data.getIsShared(), data.getCreateTime())); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java index 323ccee2ea3..02912b9ad5e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java @@ -49,7 +49,7 @@ import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.constraint.ConstraintException; -import org.apache.hadoop.hbase.master.handler.CreateTableHandler; +import org.apache.hadoop.hbase.master.procedure.CreateTableProcedure; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.util.Bytes; @@ -86,31 +86,26 @@ public class TableNamespaceManager { public void start() throws IOException { if (!MetaTableAccessor.tableExists(masterServices.getConnection(), - TableName.NAMESPACE_TABLE_NAME)) { + TableName.NAMESPACE_TABLE_NAME)) { LOG.info("Namespace table not found. Creating..."); createNamespaceTable(masterServices); } try { - // Wait for the namespace table to be assigned. - // If timed out, we will move ahead without initializing it. - // So that it should be initialized later on lazily. + // Wait for the namespace table to be initialized. long startTime = EnvironmentEdgeManager.currentTime(); int timeout = conf.getInt(NS_INIT_TIMEOUT, DEFAULT_NS_INIT_TIMEOUT); - while (!(isTableAssigned() && isTableEnabled())) { + while (!isTableAvailableAndInitialized()) { if (EnvironmentEdgeManager.currentTime() - startTime + 100 > timeout) { // We can't do anything if ns is not online. - throw new IOException("Timedout " + timeout + "ms waiting for namespace table to " + - "be assigned and enabled: " + getTableState()); + throw new IOException("Timedout " + timeout + "ms waiting for namespace table to " + + "be assigned and enabled: " + getTableState()); } Thread.sleep(100); } } catch (InterruptedException e) { - throw (InterruptedIOException)new InterruptedIOException().initCause(e); + throw (InterruptedIOException) new InterruptedIOException().initCause(e); } - - // initialize namespace table - isTableAvailableAndInitialized(); } private synchronized Table getNamespaceTable() throws IOException { @@ -236,18 +231,15 @@ public class TableNamespaceManager { } private void createNamespaceTable(MasterServices masterServices) throws IOException { - HRegionInfo newRegions[] = new HRegionInfo[]{ + HRegionInfo[] newRegions = new HRegionInfo[]{ new HRegionInfo(HTableDescriptor.NAMESPACE_TABLEDESC.getTableName(), null, null)}; - //we need to create the table this way to bypass - //checkInitialized - masterServices.getExecutorService() - .submit(new CreateTableHandler(masterServices, - masterServices.getMasterFileSystem(), - HTableDescriptor.NAMESPACE_TABLEDESC, - masterServices.getConfiguration(), - newRegions, - masterServices).prepare()); + // we need to create the table this way to bypass checkInitialized + masterServices.getMasterProcedureExecutor() + .submitProcedure(new CreateTableProcedure( + masterServices.getMasterProcedureExecutor().getEnvironment(), + HTableDescriptor.NAMESPACE_TABLEDESC, + newRegions)); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java index 5daa823368e..c9daa0dcadc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java @@ -35,7 +35,6 @@ import org.apache.hadoop.hbase.TableDescriptors; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.TableState; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/LoadBalancerFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/LoadBalancerFactory.java index 81e79d03a4a..035973d9bf2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/LoadBalancerFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/LoadBalancerFactory.java @@ -29,6 +29,15 @@ import org.apache.hadoop.util.ReflectionUtils; @InterfaceAudience.Private public class LoadBalancerFactory { + /** + * The default {@link LoadBalancer} class. + * + * @return The Class for the default {@link LoadBalancer}. + */ + public static Class getDefaultLoadBalancerClass() { + return StochasticLoadBalancer.class; + } + /** * Create a loadbalancer from the given conf. * @param conf @@ -38,7 +47,7 @@ public class LoadBalancerFactory { // Create the balancer Class balancerKlass = - conf.getClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, StochasticLoadBalancer.class, + conf.getClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, getDefaultLoadBalancerClass(), LoadBalancer.class); return ReflectionUtils.newInstance(balancerKlass, conf); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java index b60733e7ffb..b8845441475 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java @@ -43,7 +43,7 @@ import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.executor.EventHandler; import org.apache.hadoop.hbase.executor.EventType; -import org.apache.hadoop.hbase.ipc.RequestContext; +import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.master.AssignmentManager; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; @@ -104,9 +104,8 @@ public class CreateTableHandler extends EventHandler { // If we are creating the table in service to an RPC request, record the // active user for later, so proper permissions will be applied to the // new table by the AccessController if it is active - if (RequestContext.isInRequestContext()) { - this.activeUser = RequestContext.getRequestUser(); - } else { + this.activeUser = RpcServer.getRequestUser(); + if (this.activeUser == null) { this.activeUser = UserProvider.instantiate(conf).getCurrent(); } } catch (InterruptedException e) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java index 0664a55f763..cbff5dda67b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java @@ -30,6 +30,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.*; import org.apache.hadoop.hbase.backup.HFileArchiver; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; @@ -126,12 +127,13 @@ public class DeleteTableHandler extends TableEventHandler { LOG.debug("Removing '" + tableName + "' from region states."); am.getRegionStates().tableDeleted(tableName); - // 5. If entry for this table states, remove it. + + // 5.Clean any remaining rows for this table. + cleanAnyRemainingRows(); + + // 6. If entry for this table states, remove it. LOG.debug("Marking '" + tableName + "' as deleted."); am.getTableStateManager().setDeletedTable(tableName); - - // 6.Clean any remaining rows for this table. - cleanAnyRemainingRows(); } /** @@ -141,9 +143,10 @@ public class DeleteTableHandler extends TableEventHandler { * @throws IOException */ private void cleanAnyRemainingRows() throws IOException { - Scan tableScan = MetaTableAccessor.getScanForTableName(tableName); + ClusterConnection connection = this.masterServices.getConnection(); + Scan tableScan = MetaTableAccessor.getScanForTableName(connection, tableName); try (Table metaTable = - this.masterServices.getConnection().getTable(TableName.META_TABLE_NAME)) { + connection.getTable(TableName.META_TABLE_NAME)) { List deletes = new ArrayList(); try (ResultScanner resScanner = metaTable.getScanner(tableScan)) { for (Result result : resScanner) { @@ -153,6 +156,9 @@ public class DeleteTableHandler extends TableEventHandler { if (!deletes.isEmpty()) { LOG.warn("Deleting some vestigal " + deletes.size() + " rows of " + this.tableName + " from " + TableName.META_TABLE_NAME); + if (LOG.isDebugEnabled()) { + for (Delete d: deletes) LOG.debug("Purging " + d); + } metaTable.delete(deletes); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java index 993edb274b0..ee774196319 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java @@ -217,12 +217,19 @@ public class DisableTableHandler extends EventHandler { long startTime = System.currentTimeMillis(); long remaining = timeout; List regions = null; + long lastLogTime = startTime; while (!server.isStopped() && remaining > 0) { Thread.sleep(waitingTimeForEvents); regions = assignmentManager.getRegionStates().getRegionsOfTable(tableName); - LOG.debug("Disable waiting until done; " + remaining + " ms remaining; " + regions); + long now = System.currentTimeMillis(); + // Don't log more than once every ten seconds. Its obnoxious. And only log table regions + // if we are waiting a while for them to go down... + if (LOG.isDebugEnabled() && ((now - lastLogTime) > 10000)) { + lastLogTime = now; + LOG.debug("Disable waiting until done; " + remaining + " ms remaining; " + regions); + } if (regions.isEmpty()) break; - remaining = timeout - (System.currentTimeMillis() - startTime); + remaining = timeout - (now - startTime); } return regions != null && regions.isEmpty(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java index c4969be9065..c7145fdbf2e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java @@ -92,11 +92,7 @@ public class EnableTableHandler extends EventHandler { try { // Check if table exists if (!MetaTableAccessor.tableExists(this.server.getConnection(), tableName)) { - // retainAssignment is true only during recovery. In normal case it is false - if (!this.skipTableStateCheck) { - throw new TableNotFoundException(tableName); - } - this.assignmentManager.getTableStateManager().setDeletedTable(tableName); + throw new TableNotFoundException(tableName); } // There could be multiple client requests trying to disable or enable diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ModifyTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ModifyTableHandler.java index b35de6a5cac..9b82701747c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ModifyTableHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ModifyTableHandler.java @@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil; @InterfaceAudience.Private public class ModifyTableHandler extends TableEventHandler { @@ -88,6 +89,10 @@ public class ModifyTableHandler extends TableEventHandler { this.htd.getRegionReplication(), oldDescriptor.getRegionReplication(), this.htd.getTableName()); + // Setup replication for region replicas if needed + if (htd.getRegionReplication() > 1 && oldDescriptor.getRegionReplication() <= 1) { + ServerRegionReplicaUtil.setupRegionReplicaReplication(server.getConfiguration()); + } if (cpHost != null) { cpHost.postModifyTableHandler(this.tableName, this.htd); } @@ -97,9 +102,9 @@ public class ModifyTableHandler extends TableEventHandler { TableName table) throws IOException { if (newReplicaCount >= oldReplicaCount) return; Set tableRows = new HashSet(); - Scan scan = MetaTableAccessor.getScanForTableName(table); - scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); Connection connection = this.masterServices.getConnection(); + Scan scan = MetaTableAccessor.getScanForTableName(connection, table); + scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); try (Table metaTable = connection.getTable(TableName.META_TABLE_NAME)) { ResultScanner resScanner = metaTable.getScanner(scan); for (Result result : resScanner) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableDeleteFamilyHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableDeleteFamilyHandler.java index 7b5c5c53b54..3bbef0afc2e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableDeleteFamilyHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableDeleteFamilyHandler.java @@ -21,11 +21,11 @@ package org.apache.hadoop.hbase.master.handler; import java.io.IOException; import java.util.List; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.executor.EventType; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; @@ -34,7 +34,7 @@ import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.util.Bytes; /** - * Handles adding a new family to an existing table. + * Handles Deleting a column family from an existing table. */ @InterfaceAudience.Private public class TableDeleteFamilyHandler extends TableEventHandler { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AddColumnFamilyProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AddColumnFamilyProcedure.java new file mode 100644 index 00000000000..377ccb57632 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AddColumnFamilyProcedure.java @@ -0,0 +1,407 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master.procedure; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.security.PrivilegedExceptionAction; +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.InvalidFamilyOperationException; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.TableState; +import org.apache.hadoop.hbase.executor.EventType; +import org.apache.hadoop.hbase.master.MasterCoprocessorHost; +import org.apache.hadoop.hbase.procedure2.StateMachineProcedure; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos; +import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyState; +import org.apache.hadoop.security.UserGroupInformation; + +/** + * The procedure to add a column family to an existing table. + */ +@InterfaceAudience.Private +public class AddColumnFamilyProcedure + extends StateMachineProcedure + implements TableProcedureInterface { + private static final Log LOG = LogFactory.getLog(AddColumnFamilyProcedure.class); + + private final AtomicBoolean aborted = new AtomicBoolean(false); + + private TableName tableName; + private HTableDescriptor unmodifiedHTableDescriptor; + private HColumnDescriptor cfDescriptor; + private UserGroupInformation user; + + private List regionInfoList; + private Boolean traceEnabled; + + public AddColumnFamilyProcedure() { + this.unmodifiedHTableDescriptor = null; + this.regionInfoList = null; + this.traceEnabled = null; + } + + public AddColumnFamilyProcedure( + final MasterProcedureEnv env, + final TableName tableName, + final HColumnDescriptor cfDescriptor) throws IOException { + this.tableName = tableName; + this.cfDescriptor = cfDescriptor; + this.user = env.getRequestUser().getUGI(); + this.unmodifiedHTableDescriptor = null; + this.regionInfoList = null; + this.traceEnabled = null; + } + + @Override + protected Flow executeFromState(final MasterProcedureEnv env, final AddColumnFamilyState state) { + if (isTraceEnabled()) { + LOG.trace(this + " execute state=" + state); + } + + try { + switch (state) { + case ADD_COLUMN_FAMILY_PREPARE: + prepareAdd(env); + setNextState(AddColumnFamilyState.ADD_COLUMN_FAMILY_PRE_OPERATION); + break; + case ADD_COLUMN_FAMILY_PRE_OPERATION: + preAdd(env, state); + setNextState(AddColumnFamilyState.ADD_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR); + break; + case ADD_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR: + updateTableDescriptor(env); + setNextState(AddColumnFamilyState.ADD_COLUMN_FAMILY_POST_OPERATION); + break; + case ADD_COLUMN_FAMILY_POST_OPERATION: + postAdd(env, state); + setNextState(AddColumnFamilyState.ADD_COLUMN_FAMILY_REOPEN_ALL_REGIONS); + break; + case ADD_COLUMN_FAMILY_REOPEN_ALL_REGIONS: + reOpenAllRegionsIfTableIsOnline(env); + return Flow.NO_MORE_STATE; + default: + throw new UnsupportedOperationException(this + " unhandled state=" + state); + } + } catch (InterruptedException|IOException e) { + LOG.warn("Error trying to add the column family" + getColumnFamilyName() + " to the table " + + tableName + " (in state=" + state + ")", e); + + setFailure("master-add-columnfamily", e); + } + return Flow.HAS_MORE_STATE; + } + + @Override + protected void rollbackState(final MasterProcedureEnv env, final AddColumnFamilyState state) + throws IOException { + if (isTraceEnabled()) { + LOG.trace(this + " rollback state=" + state); + } + try { + switch (state) { + case ADD_COLUMN_FAMILY_REOPEN_ALL_REGIONS: + break; // Nothing to undo. + case ADD_COLUMN_FAMILY_POST_OPERATION: + // TODO-MAYBE: call the coprocessor event to undo? + break; + case ADD_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR: + restoreTableDescriptor(env); + break; + case ADD_COLUMN_FAMILY_PRE_OPERATION: + // TODO-MAYBE: call the coprocessor event to undo? + break; + case ADD_COLUMN_FAMILY_PREPARE: + break; // nothing to do + default: + throw new UnsupportedOperationException(this + " unhandled state=" + state); + } + } catch (IOException e) { + // This will be retried. Unless there is a bug in the code, + // this should be just a "temporary error" (e.g. network down) + LOG.warn("Failed rollback attempt step " + state + " for adding the column family" + + getColumnFamilyName() + " to the table " + tableName, e); + throw e; + } + } + + @Override + protected AddColumnFamilyState getState(final int stateId) { + return AddColumnFamilyState.valueOf(stateId); + } + + @Override + protected int getStateId(final AddColumnFamilyState state) { + return state.getNumber(); + } + + @Override + protected AddColumnFamilyState getInitialState() { + return AddColumnFamilyState.ADD_COLUMN_FAMILY_PREPARE; + } + + @Override + protected void setNextState(AddColumnFamilyState state) { + if (aborted.get()) { + setAbortFailure("add-columnfamily", "abort requested"); + } else { + super.setNextState(state); + } + } + + @Override + public boolean abort(final MasterProcedureEnv env) { + aborted.set(true); + return true; + } + + @Override + protected boolean acquireLock(final MasterProcedureEnv env) { + if (!env.isInitialized()) return false; + return env.getProcedureQueue().tryAcquireTableWrite( + tableName, + EventType.C_M_ADD_FAMILY.toString()); + } + + @Override + protected void releaseLock(final MasterProcedureEnv env) { + env.getProcedureQueue().releaseTableWrite(tableName); + } + + @Override + public void serializeStateData(final OutputStream stream) throws IOException { + super.serializeStateData(stream); + + MasterProcedureProtos.AddColumnFamilyStateData.Builder addCFMsg = + MasterProcedureProtos.AddColumnFamilyStateData.newBuilder() + .setUserInfo(MasterProcedureUtil.toProtoUserInfo(user)) + .setTableName(ProtobufUtil.toProtoTableName(tableName)) + .setColumnfamilySchema(cfDescriptor.convert()); + if (unmodifiedHTableDescriptor != null) { + addCFMsg.setUnmodifiedTableSchema(unmodifiedHTableDescriptor.convert()); + } + + addCFMsg.build().writeDelimitedTo(stream); + } + + @Override + public void deserializeStateData(final InputStream stream) throws IOException { + super.deserializeStateData(stream); + + MasterProcedureProtos.AddColumnFamilyStateData addCFMsg = + MasterProcedureProtos.AddColumnFamilyStateData.parseDelimitedFrom(stream); + user = MasterProcedureUtil.toUserInfo(addCFMsg.getUserInfo()); + tableName = ProtobufUtil.toTableName(addCFMsg.getTableName()); + cfDescriptor = HColumnDescriptor.convert(addCFMsg.getColumnfamilySchema()); + if (addCFMsg.hasUnmodifiedTableSchema()) { + unmodifiedHTableDescriptor = HTableDescriptor.convert(addCFMsg.getUnmodifiedTableSchema()); + } + } + + @Override + public void toStringClassDetails(StringBuilder sb) { + sb.append(getClass().getSimpleName()); + sb.append(" (table="); + sb.append(tableName); + sb.append(", columnfamily="); + if (cfDescriptor != null) { + sb.append(getColumnFamilyName()); + } else { + sb.append("Unknown"); + } + sb.append(") user="); + sb.append(user); + } + + @Override + public TableName getTableName() { + return tableName; + } + + @Override + public TableOperationType getTableOperationType() { + return TableOperationType.EDIT; + } + + /** + * Action before any real action of adding column family. + * @param env MasterProcedureEnv + * @throws IOException + */ + private void prepareAdd(final MasterProcedureEnv env) throws IOException { + // Checks whether the table is allowed to be modified. + MasterDDLOperationHelper.checkTableModifiable(env, tableName); + + // In order to update the descriptor, we need to retrieve the old descriptor for comparison. + unmodifiedHTableDescriptor = env.getMasterServices().getTableDescriptors().get(tableName); + if (unmodifiedHTableDescriptor == null) { + throw new IOException("HTableDescriptor missing for " + tableName); + } + if (unmodifiedHTableDescriptor.hasFamily(cfDescriptor.getName())) { + throw new InvalidFamilyOperationException("Column family '" + getColumnFamilyName() + + "' in table '" + tableName + "' already exists so cannot be added"); + } + } + + /** + * Action before adding column family. + * @param env MasterProcedureEnv + * @param state the procedure state + * @throws IOException + * @throws InterruptedException + */ + private void preAdd(final MasterProcedureEnv env, final AddColumnFamilyState state) + throws IOException, InterruptedException { + runCoprocessorAction(env, state); + } + + /** + * Add the column family to the file system + */ + private void updateTableDescriptor(final MasterProcedureEnv env) throws IOException { + // Update table descriptor + LOG.info("AddColumn. Table = " + tableName + " HCD = " + cfDescriptor.toString()); + + HTableDescriptor htd = env.getMasterServices().getTableDescriptors().get(tableName); + + if (htd.hasFamily(cfDescriptor.getName())) { + // It is possible to reach this situation, as we could already add the column family + // to table descriptor, but the master failover happens before we complete this state. + // We should be able to handle running this function multiple times without causing problem. + return; + } + + htd.addFamily(cfDescriptor); + env.getMasterServices().getTableDescriptors().add(htd); + } + + /** + * Restore the table descriptor back to pre-add + * @param env MasterProcedureEnv + * @throws IOException + **/ + private void restoreTableDescriptor(final MasterProcedureEnv env) throws IOException { + HTableDescriptor htd = env.getMasterServices().getTableDescriptors().get(tableName); + if (htd.hasFamily(cfDescriptor.getName())) { + // Remove the column family from file system and update the table descriptor to + // the before-add-column-family-state + MasterDDLOperationHelper.deleteColumnFamilyFromFileSystem(env, tableName, + getRegionInfoList(env), cfDescriptor.getName()); + + env.getMasterServices().getTableDescriptors().add(unmodifiedHTableDescriptor); + + // Make sure regions are opened after table descriptor is updated. + reOpenAllRegionsIfTableIsOnline(env); + } + } + + /** + * Action after adding column family. + * @param env MasterProcedureEnv + * @param state the procedure state + * @throws IOException + * @throws InterruptedException + */ + private void postAdd(final MasterProcedureEnv env, final AddColumnFamilyState state) + throws IOException, InterruptedException { + runCoprocessorAction(env, state); + } + + /** + * Last action from the procedure - executed when online schema change is supported. + * @param env MasterProcedureEnv + * @throws IOException + */ + private void reOpenAllRegionsIfTableIsOnline(final MasterProcedureEnv env) throws IOException { + // This operation only run when the table is enabled. + if (!env.getMasterServices().getAssignmentManager().getTableStateManager() + .isTableState(getTableName(), TableState.State.ENABLED)) { + return; + } + + if (MasterDDLOperationHelper.reOpenAllRegions(env, getTableName(), getRegionInfoList(env))) { + LOG.info("Completed add column family operation on table " + getTableName()); + } else { + LOG.warn("Error on reopening the regions on table " + getTableName()); + } + } + + /** + * The procedure could be restarted from a different machine. If the variable is null, we need to + * retrieve it. + * @return traceEnabled + */ + private Boolean isTraceEnabled() { + if (traceEnabled == null) { + traceEnabled = LOG.isTraceEnabled(); + } + return traceEnabled; + } + + private String getColumnFamilyName() { + return cfDescriptor.getNameAsString(); + } + + /** + * Coprocessor Action. + * @param env MasterProcedureEnv + * @param state the procedure state + * @throws IOException + * @throws InterruptedException + */ + private void runCoprocessorAction(final MasterProcedureEnv env, final AddColumnFamilyState state) + throws IOException, InterruptedException { + final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost(); + if (cpHost != null) { + user.doAs(new PrivilegedExceptionAction() { + @Override + public Void run() throws Exception { + switch (state) { + case ADD_COLUMN_FAMILY_PRE_OPERATION: + cpHost.preAddColumnHandler(tableName, cfDescriptor); + break; + case ADD_COLUMN_FAMILY_POST_OPERATION: + cpHost.postAddColumnHandler(tableName, cfDescriptor); + break; + default: + throw new UnsupportedOperationException(this + " unhandled state=" + state); + } + return null; + } + }); + } + } + + private List getRegionInfoList(final MasterProcedureEnv env) throws IOException { + if (regionInfoList == null) { + regionInfoList = ProcedureSyncWait.getRegionsFromMeta(env, getTableName()); + } + return regionInfoList; + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java new file mode 100644 index 00000000000..dd6d38775d3 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java @@ -0,0 +1,442 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master.procedure; + +import java.io.InputStream; +import java.io.OutputStream; +import java.io.IOException; +import java.security.PrivilegedExceptionAction; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.TableDescriptor; +import org.apache.hadoop.hbase.TableExistsException; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.RegionReplicaUtil; +import org.apache.hadoop.hbase.client.TableState; +import org.apache.hadoop.hbase.master.AssignmentManager; +import org.apache.hadoop.hbase.master.MasterCoprocessorHost; +import org.apache.hadoop.hbase.master.MasterFileSystem; +import org.apache.hadoop.hbase.procedure2.StateMachineProcedure; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; +import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos; +import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableState; +import org.apache.hadoop.hbase.util.FSTableDescriptors; +import org.apache.hadoop.hbase.util.FSUtils; +import org.apache.hadoop.hbase.util.ModifyRegionUtils; +import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil; +import org.apache.hadoop.security.UserGroupInformation; + +import com.google.common.collect.Lists; + +@InterfaceAudience.Private +public class CreateTableProcedure + extends StateMachineProcedure + implements TableProcedureInterface { + private static final Log LOG = LogFactory.getLog(CreateTableProcedure.class); + + private final AtomicBoolean aborted = new AtomicBoolean(false); + + // used for compatibility with old clients + private final ProcedurePrepareLatch syncLatch; + + private HTableDescriptor hTableDescriptor; + private List newRegions; + private UserGroupInformation user; + + public CreateTableProcedure() { + // Required by the Procedure framework to create the procedure on replay + syncLatch = null; + } + + public CreateTableProcedure(final MasterProcedureEnv env, + final HTableDescriptor hTableDescriptor, final HRegionInfo[] newRegions) + throws IOException { + this(env, hTableDescriptor, newRegions, null); + } + + public CreateTableProcedure(final MasterProcedureEnv env, + final HTableDescriptor hTableDescriptor, final HRegionInfo[] newRegions, + final ProcedurePrepareLatch syncLatch) + throws IOException { + this.hTableDescriptor = hTableDescriptor; + this.newRegions = newRegions != null ? Lists.newArrayList(newRegions) : null; + this.user = env.getRequestUser().getUGI(); + + // used for compatibility with clients without procedures + // they need a sync TableExistsException + this.syncLatch = syncLatch; + } + + @Override + protected Flow executeFromState(final MasterProcedureEnv env, final CreateTableState state) { + if (LOG.isTraceEnabled()) { + LOG.trace(this + " execute state=" + state); + } + try { + switch (state) { + case CREATE_TABLE_PRE_OPERATION: + // Verify if we can create the table + boolean exists = !prepareCreate(env); + ProcedurePrepareLatch.releaseLatch(syncLatch, this); + + if (exists) { + assert isFailed() : "the delete should have an exception here"; + return Flow.NO_MORE_STATE; + } + + preCreate(env); + setNextState(CreateTableState.CREATE_TABLE_WRITE_FS_LAYOUT); + break; + case CREATE_TABLE_WRITE_FS_LAYOUT: + newRegions = createFsLayout(env, hTableDescriptor, newRegions); + setNextState(CreateTableState.CREATE_TABLE_ADD_TO_META); + break; + case CREATE_TABLE_ADD_TO_META: + newRegions = addTableToMeta(env, hTableDescriptor, newRegions); + setNextState(CreateTableState.CREATE_TABLE_ASSIGN_REGIONS); + break; + case CREATE_TABLE_ASSIGN_REGIONS: + assignRegions(env, getTableName(), newRegions); + setNextState(CreateTableState.CREATE_TABLE_UPDATE_DESC_CACHE); + break; + case CREATE_TABLE_UPDATE_DESC_CACHE: + updateTableDescCache(env, getTableName()); + setNextState(CreateTableState.CREATE_TABLE_POST_OPERATION); + break; + case CREATE_TABLE_POST_OPERATION: + postCreate(env); + return Flow.NO_MORE_STATE; + default: + throw new UnsupportedOperationException("unhandled state=" + state); + } + } catch (InterruptedException|IOException e) { + LOG.error("Error trying to create table=" + getTableName() + " state=" + state, e); + setFailure("master-create-table", e); + } + return Flow.HAS_MORE_STATE; + } + + @Override + protected void rollbackState(final MasterProcedureEnv env, final CreateTableState state) + throws IOException { + if (LOG.isTraceEnabled()) { + LOG.trace(this + " rollback state=" + state); + } + try { + switch (state) { + case CREATE_TABLE_POST_OPERATION: + break; + case CREATE_TABLE_UPDATE_DESC_CACHE: + DeleteTableProcedure.deleteTableDescriptorCache(env, getTableName()); + break; + case CREATE_TABLE_ASSIGN_REGIONS: + DeleteTableProcedure.deleteAssignmentState(env, getTableName()); + break; + case CREATE_TABLE_ADD_TO_META: + DeleteTableProcedure.deleteFromMeta(env, getTableName(), newRegions); + break; + case CREATE_TABLE_WRITE_FS_LAYOUT: + DeleteTableProcedure.deleteFromFs(env, getTableName(), newRegions, false); + break; + case CREATE_TABLE_PRE_OPERATION: + DeleteTableProcedure.deleteTableStates(env, getTableName()); + // TODO-MAYBE: call the deleteTable coprocessor event? + ProcedurePrepareLatch.releaseLatch(syncLatch, this); + break; + default: + throw new UnsupportedOperationException("unhandled state=" + state); + } + } catch (IOException e) { + // This will be retried. Unless there is a bug in the code, + // this should be just a "temporary error" (e.g. network down) + LOG.warn("Failed rollback attempt step=" + state + " table=" + getTableName(), e); + throw e; + } + } + + @Override + protected CreateTableState getState(final int stateId) { + return CreateTableState.valueOf(stateId); + } + + @Override + protected int getStateId(final CreateTableState state) { + return state.getNumber(); + } + + @Override + protected CreateTableState getInitialState() { + return CreateTableState.CREATE_TABLE_PRE_OPERATION; + } + + @Override + protected void setNextState(final CreateTableState state) { + if (aborted.get()) { + setAbortFailure("create-table", "abort requested"); + } else { + super.setNextState(state); + } + } + + @Override + public TableName getTableName() { + return hTableDescriptor.getTableName(); + } + + @Override + public TableOperationType getTableOperationType() { + return TableOperationType.CREATE; + } + + @Override + public boolean abort(final MasterProcedureEnv env) { + aborted.set(true); + return true; + } + + @Override + public void toStringClassDetails(StringBuilder sb) { + sb.append(getClass().getSimpleName()); + sb.append(" (table="); + sb.append(getTableName()); + sb.append(") user="); + sb.append(user); + } + + @Override + public void serializeStateData(final OutputStream stream) throws IOException { + super.serializeStateData(stream); + + MasterProcedureProtos.CreateTableStateData.Builder state = + MasterProcedureProtos.CreateTableStateData.newBuilder() + .setUserInfo(MasterProcedureUtil.toProtoUserInfo(this.user)) + .setTableSchema(hTableDescriptor.convert()); + if (newRegions != null) { + for (HRegionInfo hri: newRegions) { + state.addRegionInfo(HRegionInfo.convert(hri)); + } + } + state.build().writeDelimitedTo(stream); + } + + @Override + public void deserializeStateData(final InputStream stream) throws IOException { + super.deserializeStateData(stream); + + MasterProcedureProtos.CreateTableStateData state = + MasterProcedureProtos.CreateTableStateData.parseDelimitedFrom(stream); + user = MasterProcedureUtil.toUserInfo(state.getUserInfo()); + hTableDescriptor = HTableDescriptor.convert(state.getTableSchema()); + if (state.getRegionInfoCount() == 0) { + newRegions = null; + } else { + newRegions = new ArrayList(state.getRegionInfoCount()); + for (HBaseProtos.RegionInfo hri: state.getRegionInfoList()) { + newRegions.add(HRegionInfo.convert(hri)); + } + } + } + + @Override + protected boolean acquireLock(final MasterProcedureEnv env) { + return env.getProcedureQueue().tryAcquireTableWrite(getTableName(), "create table"); + } + + @Override + protected void releaseLock(final MasterProcedureEnv env) { + env.getProcedureQueue().releaseTableWrite(getTableName()); + } + + private boolean prepareCreate(final MasterProcedureEnv env) throws IOException { + final TableName tableName = getTableName(); + if (MetaTableAccessor.tableExists(env.getMasterServices().getConnection(), tableName)) { + setFailure("master-create-table", new TableExistsException(getTableName())); + return false; + } + return true; + } + + private void preCreate(final MasterProcedureEnv env) + throws IOException, InterruptedException { + final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost(); + if (cpHost != null) { + final HRegionInfo[] regions = newRegions == null ? null : + newRegions.toArray(new HRegionInfo[newRegions.size()]); + user.doAs(new PrivilegedExceptionAction() { + @Override + public Void run() throws Exception { + cpHost.preCreateTableHandler(hTableDescriptor, regions); + return null; + } + }); + } + } + + private void postCreate(final MasterProcedureEnv env) + throws IOException, InterruptedException { + final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost(); + if (cpHost != null) { + final HRegionInfo[] regions = (newRegions == null) ? null : + newRegions.toArray(new HRegionInfo[newRegions.size()]); + user.doAs(new PrivilegedExceptionAction() { + @Override + public Void run() throws Exception { + cpHost.postCreateTableHandler(hTableDescriptor, regions); + return null; + } + }); + } + } + + protected interface CreateHdfsRegions { + List createHdfsRegions(final MasterProcedureEnv env, + final Path tableRootDir, final TableName tableName, + final List newRegions) throws IOException; + } + + protected static List createFsLayout(final MasterProcedureEnv env, + final HTableDescriptor hTableDescriptor, final List newRegions) + throws IOException { + return createFsLayout(env, hTableDescriptor, newRegions, new CreateHdfsRegions() { + @Override + public List createHdfsRegions(final MasterProcedureEnv env, + final Path tableRootDir, final TableName tableName, + final List newRegions) throws IOException { + HRegionInfo[] regions = newRegions != null ? + newRegions.toArray(new HRegionInfo[newRegions.size()]) : null; + return ModifyRegionUtils.createRegions(env.getMasterConfiguration(), + tableRootDir, hTableDescriptor, regions, null); + } + }); + } + + protected static List createFsLayout(final MasterProcedureEnv env, + final HTableDescriptor hTableDescriptor, List newRegions, + final CreateHdfsRegions hdfsRegionHandler) throws IOException { + final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem(); + final Path tempdir = mfs.getTempDir(); + + // 1. Create Table Descriptor + // using a copy of descriptor, table will be created enabling first + TableDescriptor underConstruction = new TableDescriptor(hTableDescriptor); + final Path tempTableDir = FSUtils.getTableDir(tempdir, hTableDescriptor.getTableName()); + ((FSTableDescriptors)(env.getMasterServices().getTableDescriptors())) + .createTableDescriptorForTableDirectory( + tempTableDir, underConstruction, false); + + // 2. Create Regions + newRegions = hdfsRegionHandler.createHdfsRegions(env, tempdir, + hTableDescriptor.getTableName(), newRegions); + + // 3. Move Table temp directory to the hbase root location + final Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), hTableDescriptor.getTableName()); + FileSystem fs = mfs.getFileSystem(); + if (!fs.delete(tableDir, true) && fs.exists(tableDir)) { + throw new IOException("Couldn't delete " + tableDir); + } + if (!fs.rename(tempTableDir, tableDir)) { + throw new IOException("Unable to move table from temp=" + tempTableDir + + " to hbase root=" + tableDir); + } + return newRegions; + } + + protected static List addTableToMeta(final MasterProcedureEnv env, + final HTableDescriptor hTableDescriptor, + final List regions) throws IOException { + if (regions != null && regions.size() > 0) { + ProcedureSyncWait.waitMetaRegions(env); + + // Add regions to META + addRegionsToMeta(env, hTableDescriptor, regions); + // Add replicas if needed + List newRegions = addReplicas(env, hTableDescriptor, regions); + + // Setup replication for region replicas if needed + if (hTableDescriptor.getRegionReplication() > 1) { + ServerRegionReplicaUtil.setupRegionReplicaReplication(env.getMasterConfiguration()); + } + return newRegions; + } + return regions; + } + + /** + * Create any replicas for the regions (the default replicas that was + * already created is passed to the method) + * @param hTableDescriptor descriptor to use + * @param regions default replicas + * @return the combined list of default and non-default replicas + */ + private static List addReplicas(final MasterProcedureEnv env, + final HTableDescriptor hTableDescriptor, + final List regions) { + int numRegionReplicas = hTableDescriptor.getRegionReplication() - 1; + if (numRegionReplicas <= 0) { + return regions; + } + List hRegionInfos = + new ArrayList((numRegionReplicas+1)*regions.size()); + for (int i = 0; i < regions.size(); i++) { + for (int j = 1; j <= numRegionReplicas; j++) { + hRegionInfos.add(RegionReplicaUtil.getRegionInfoForReplica(regions.get(i), j)); + } + } + hRegionInfos.addAll(regions); + return hRegionInfos; + } + + protected static void assignRegions(final MasterProcedureEnv env, + final TableName tableName, final List regions) throws IOException { + ProcedureSyncWait.waitRegionServers(env); + + // Trigger immediate assignment of the regions in round-robin fashion + final AssignmentManager assignmentManager = env.getMasterServices().getAssignmentManager(); + ModifyRegionUtils.assignRegions(assignmentManager, regions); + + // Enable table + assignmentManager.getTableStateManager() + .setTableState(tableName, TableState.State.ENABLED); + } + + /** + * Add the specified set of regions to the hbase:meta table. + */ + protected static void addRegionsToMeta(final MasterProcedureEnv env, + final HTableDescriptor hTableDescriptor, + final List regionInfos) throws IOException { + MetaTableAccessor.addRegionsToMeta(env.getMasterServices().getConnection(), + regionInfos, hTableDescriptor.getRegionReplication()); + } + + protected static void updateTableDescCache(final MasterProcedureEnv env, + final TableName tableName) throws IOException { + env.getMasterServices().getTableDescriptors().get(tableName); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteColumnFamilyProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteColumnFamilyProcedure.java new file mode 100644 index 00000000000..6e969103f65 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteColumnFamilyProcedure.java @@ -0,0 +1,439 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master.procedure; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.security.PrivilegedExceptionAction; +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.InvalidFamilyOperationException; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.TableState; +import org.apache.hadoop.hbase.executor.EventType; +import org.apache.hadoop.hbase.master.MasterCoprocessorHost; +import org.apache.hadoop.hbase.procedure2.StateMachineProcedure; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos; +import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyState; +import org.apache.hadoop.hbase.util.ByteStringer; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.security.UserGroupInformation; + +/** + * The procedure to delete a column family from an existing table. + */ +@InterfaceAudience.Private +public class DeleteColumnFamilyProcedure + extends StateMachineProcedure + implements TableProcedureInterface { + private static final Log LOG = LogFactory.getLog(DeleteColumnFamilyProcedure.class); + + private final AtomicBoolean aborted = new AtomicBoolean(false); + + private HTableDescriptor unmodifiedHTableDescriptor; + private TableName tableName; + private byte [] familyName; + private UserGroupInformation user; + + private List regionInfoList; + private Boolean traceEnabled; + + public DeleteColumnFamilyProcedure() { + this.unmodifiedHTableDescriptor = null; + this.regionInfoList = null; + this.traceEnabled = null; + } + + public DeleteColumnFamilyProcedure( + final MasterProcedureEnv env, + final TableName tableName, + final byte[] familyName) throws IOException { + this.tableName = tableName; + this.familyName = familyName; + this.user = env.getRequestUser().getUGI(); + this.unmodifiedHTableDescriptor = null; + this.regionInfoList = null; + this.traceEnabled = null; + } + + @Override + protected Flow executeFromState(final MasterProcedureEnv env, DeleteColumnFamilyState state) { + if (isTraceEnabled()) { + LOG.trace(this + " execute state=" + state); + } + + try { + switch (state) { + case DELETE_COLUMN_FAMILY_PREPARE: + prepareDelete(env); + setNextState(DeleteColumnFamilyState.DELETE_COLUMN_FAMILY_PRE_OPERATION); + break; + case DELETE_COLUMN_FAMILY_PRE_OPERATION: + preDelete(env, state); + setNextState(DeleteColumnFamilyState.DELETE_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR); + break; + case DELETE_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR: + updateTableDescriptor(env); + setNextState(DeleteColumnFamilyState.DELETE_COLUMN_FAMILY_DELETE_FS_LAYOUT); + break; + case DELETE_COLUMN_FAMILY_DELETE_FS_LAYOUT: + deleteFromFs(env); + setNextState(DeleteColumnFamilyState.DELETE_COLUMN_FAMILY_POST_OPERATION); + break; + case DELETE_COLUMN_FAMILY_POST_OPERATION: + postDelete(env, state); + setNextState(DeleteColumnFamilyState.DELETE_COLUMN_FAMILY_REOPEN_ALL_REGIONS); + break; + case DELETE_COLUMN_FAMILY_REOPEN_ALL_REGIONS: + reOpenAllRegionsIfTableIsOnline(env); + return Flow.NO_MORE_STATE; + default: + throw new UnsupportedOperationException(this + " unhandled state=" + state); + } + } catch (InterruptedException|IOException e) { + if (!isRollbackSupported(state)) { + // We reach a state that cannot be rolled back. We just need to keep retry. + LOG.warn("Error trying to delete the column family " + getColumnFamilyName() + + " from table " + tableName + "(in state=" + state + ")", e); + } else { + LOG.error("Error trying to delete the column family " + getColumnFamilyName() + + " from table " + tableName + "(in state=" + state + ")", e); + setFailure("master-delete-column-family", e); + } + } + return Flow.HAS_MORE_STATE; + } + + @Override + protected void rollbackState(final MasterProcedureEnv env, final DeleteColumnFamilyState state) + throws IOException { + if (isTraceEnabled()) { + LOG.trace(this + " rollback state=" + state); + } + try { + switch (state) { + case DELETE_COLUMN_FAMILY_REOPEN_ALL_REGIONS: + break; // Nothing to undo. + case DELETE_COLUMN_FAMILY_POST_OPERATION: + // TODO-MAYBE: call the coprocessor event to undo? + break; + case DELETE_COLUMN_FAMILY_DELETE_FS_LAYOUT: + // Once we reach to this state - we could NOT rollback - as it is tricky to undelete + // the deleted files. We are not suppose to reach here, throw exception so that we know + // there is a code bug to investigate. + throw new UnsupportedOperationException(this + " rollback of state=" + state + + " is unsupported."); + case DELETE_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR: + restoreTableDescriptor(env); + break; + case DELETE_COLUMN_FAMILY_PRE_OPERATION: + // TODO-MAYBE: call the coprocessor event to undo? + break; + case DELETE_COLUMN_FAMILY_PREPARE: + break; // nothing to do + default: + throw new UnsupportedOperationException(this + " unhandled state=" + state); + } + } catch (IOException e) { + // This will be retried. Unless there is a bug in the code, + // this should be just a "temporary error" (e.g. network down) + LOG.warn("Failed rollback attempt step " + state + " for deleting the column family" + + getColumnFamilyName() + " to the table " + tableName, e); + throw e; + } + } + + @Override + protected DeleteColumnFamilyState getState(final int stateId) { + return DeleteColumnFamilyState.valueOf(stateId); + } + + @Override + protected int getStateId(final DeleteColumnFamilyState state) { + return state.getNumber(); + } + + @Override + protected DeleteColumnFamilyState getInitialState() { + return DeleteColumnFamilyState.DELETE_COLUMN_FAMILY_PREPARE; + } + + @Override + protected void setNextState(DeleteColumnFamilyState state) { + if (aborted.get() && isRollbackSupported(state)) { + setAbortFailure("delete-columnfamily", "abort requested"); + } else { + super.setNextState(state); + } + } + + @Override + public boolean abort(final MasterProcedureEnv env) { + aborted.set(true); + return true; + } + + @Override + protected boolean acquireLock(final MasterProcedureEnv env) { + if (!env.isInitialized()) return false; + return env.getProcedureQueue().tryAcquireTableWrite( + tableName, + EventType.C_M_DELETE_FAMILY.toString()); + } + + @Override + protected void releaseLock(final MasterProcedureEnv env) { + env.getProcedureQueue().releaseTableWrite(tableName); + } + + @Override + public void serializeStateData(final OutputStream stream) throws IOException { + super.serializeStateData(stream); + + MasterProcedureProtos.DeleteColumnFamilyStateData.Builder deleteCFMsg = + MasterProcedureProtos.DeleteColumnFamilyStateData.newBuilder() + .setUserInfo(MasterProcedureUtil.toProtoUserInfo(user)) + .setTableName(ProtobufUtil.toProtoTableName(tableName)) + .setColumnfamilyName(ByteStringer.wrap(familyName)); + if (unmodifiedHTableDescriptor != null) { + deleteCFMsg.setUnmodifiedTableSchema(unmodifiedHTableDescriptor.convert()); + } + + deleteCFMsg.build().writeDelimitedTo(stream); + } + + @Override + public void deserializeStateData(final InputStream stream) throws IOException { + super.deserializeStateData(stream); + MasterProcedureProtos.DeleteColumnFamilyStateData deleteCFMsg = + MasterProcedureProtos.DeleteColumnFamilyStateData.parseDelimitedFrom(stream); + user = MasterProcedureUtil.toUserInfo(deleteCFMsg.getUserInfo()); + tableName = ProtobufUtil.toTableName(deleteCFMsg.getTableName()); + familyName = deleteCFMsg.getColumnfamilyName().toByteArray(); + + if (deleteCFMsg.hasUnmodifiedTableSchema()) { + unmodifiedHTableDescriptor = HTableDescriptor.convert(deleteCFMsg.getUnmodifiedTableSchema()); + } + } + + @Override + public void toStringClassDetails(StringBuilder sb) { + sb.append(getClass().getSimpleName()); + sb.append(" (table="); + sb.append(tableName); + sb.append(", columnfamily="); + if (familyName != null) { + sb.append(getColumnFamilyName()); + } else { + sb.append("Unknown"); + } + sb.append(") user="); + sb.append(user); + } + + @Override + public TableName getTableName() { + return tableName; + } + + @Override + public TableOperationType getTableOperationType() { + return TableOperationType.EDIT; + } + + /** + * Action before any real action of deleting column family. + * @param env MasterProcedureEnv + * @throws IOException + */ + private void prepareDelete(final MasterProcedureEnv env) throws IOException { + // Checks whether the table is allowed to be modified. + MasterDDLOperationHelper.checkTableModifiable(env, tableName); + + // In order to update the descriptor, we need to retrieve the old descriptor for comparison. + unmodifiedHTableDescriptor = env.getMasterServices().getTableDescriptors().get(tableName); + if (unmodifiedHTableDescriptor == null) { + throw new IOException("HTableDescriptor missing for " + tableName); + } + if (!unmodifiedHTableDescriptor.hasFamily(familyName)) { + throw new InvalidFamilyOperationException("Family '" + getColumnFamilyName() + + "' does not exist, so it cannot be deleted"); + } + } + + /** + * Action before deleting column family. + * @param env MasterProcedureEnv + * @param state the procedure state + * @throws IOException + * @throws InterruptedException + */ + private void preDelete(final MasterProcedureEnv env, final DeleteColumnFamilyState state) + throws IOException, InterruptedException { + runCoprocessorAction(env, state); + } + + /** + * Remove the column family from the file system and update the table descriptor + */ + private void updateTableDescriptor(final MasterProcedureEnv env) throws IOException { + // Update table descriptor + LOG.info("DeleteColumn. Table = " + tableName + " family = " + getColumnFamilyName()); + + HTableDescriptor htd = env.getMasterServices().getTableDescriptors().get(tableName); + + if (!htd.hasFamily(familyName)) { + // It is possible to reach this situation, as we could already delete the column family + // from table descriptor, but the master failover happens before we complete this state. + // We should be able to handle running this function multiple times without causing problem. + return; + } + + htd.removeFamily(familyName); + env.getMasterServices().getTableDescriptors().add(htd); + } + + /** + * Restore back to the old descriptor + * @param env MasterProcedureEnv + * @throws IOException + **/ + private void restoreTableDescriptor(final MasterProcedureEnv env) throws IOException { + env.getMasterServices().getTableDescriptors().add(unmodifiedHTableDescriptor); + + // Make sure regions are opened after table descriptor is updated. + reOpenAllRegionsIfTableIsOnline(env); + } + + /** + * Remove the column family from the file system + **/ + private void deleteFromFs(final MasterProcedureEnv env) throws IOException { + MasterDDLOperationHelper.deleteColumnFamilyFromFileSystem(env, tableName, + getRegionInfoList(env), familyName); + } + + /** + * Action after deleting column family. + * @param env MasterProcedureEnv + * @param state the procedure state + * @throws IOException + * @throws InterruptedException + */ + private void postDelete(final MasterProcedureEnv env, final DeleteColumnFamilyState state) + throws IOException, InterruptedException { + runCoprocessorAction(env, state); + } + + /** + * Last action from the procedure - executed when online schema change is supported. + * @param env MasterProcedureEnv + * @throws IOException + */ + private void reOpenAllRegionsIfTableIsOnline(final MasterProcedureEnv env) throws IOException { + // This operation only run when the table is enabled. + if (!env.getMasterServices().getAssignmentManager().getTableStateManager() + .isTableState(getTableName(), TableState.State.ENABLED)) { + return; + } + + if (MasterDDLOperationHelper.reOpenAllRegions(env, getTableName(), getRegionInfoList(env))) { + LOG.info("Completed delete column family operation on table " + getTableName()); + } else { + LOG.warn("Error on reopening the regions on table " + getTableName()); + } + } + + /** + * The procedure could be restarted from a different machine. If the variable is null, we need to + * retrieve it. + * @return traceEnabled + */ + private Boolean isTraceEnabled() { + if (traceEnabled == null) { + traceEnabled = LOG.isTraceEnabled(); + } + return traceEnabled; + } + + private String getColumnFamilyName() { + return Bytes.toString(familyName); + } + + /** + * Coprocessor Action. + * @param env MasterProcedureEnv + * @param state the procedure state + * @throws IOException + * @throws InterruptedException + */ + private void runCoprocessorAction(final MasterProcedureEnv env, + final DeleteColumnFamilyState state) throws IOException, InterruptedException { + final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost(); + if (cpHost != null) { + user.doAs(new PrivilegedExceptionAction() { + @Override + public Void run() throws Exception { + switch (state) { + case DELETE_COLUMN_FAMILY_PRE_OPERATION: + cpHost.preDeleteColumnHandler(tableName, familyName); + break; + case DELETE_COLUMN_FAMILY_POST_OPERATION: + cpHost.postDeleteColumnHandler(tableName, familyName); + break; + default: + throw new UnsupportedOperationException(this + " unhandled state=" + state); + } + return null; + } + }); + } + } + + /* + * Check whether we are in the state that can be rollback + */ + private boolean isRollbackSupported(final DeleteColumnFamilyState state) { + switch (state) { + case DELETE_COLUMN_FAMILY_REOPEN_ALL_REGIONS: + case DELETE_COLUMN_FAMILY_POST_OPERATION: + case DELETE_COLUMN_FAMILY_DELETE_FS_LAYOUT: + // It is not safe to rollback if we reach to these states. + return false; + default: + break; + } + return true; + } + + private List getRegionInfoList(final MasterProcedureEnv env) throws IOException { + if (regionInfoList == null) { + regionInfoList = ProcedureSyncWait.getRegionsFromMeta(env, getTableName()); + } + return regionInfoList; + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java new file mode 100644 index 00000000000..7809e55cce7 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java @@ -0,0 +1,450 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master.procedure; + +import java.io.InputStream; +import java.io.IOException; +import java.io.OutputStream; +import java.security.PrivilegedExceptionAction; +import java.util.ArrayList; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.*; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.backup.HFileArchiver; +import org.apache.hadoop.hbase.client.ClusterConnection; +import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.ResultScanner; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.exceptions.HBaseException; +import org.apache.hadoop.hbase.mob.MobConstants; +import org.apache.hadoop.hbase.mob.MobUtils; +import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.master.AssignmentManager; +import org.apache.hadoop.hbase.master.MasterCoprocessorHost; +import org.apache.hadoop.hbase.master.MasterFileSystem; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; +import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos; +import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableState; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.procedure2.StateMachineProcedure; +import org.apache.hadoop.hbase.quotas.MasterQuotaManager; +import org.apache.hadoop.hbase.util.FSUtils; +import org.apache.hadoop.security.UserGroupInformation; + +@InterfaceAudience.Private +public class DeleteTableProcedure + extends StateMachineProcedure + implements TableProcedureInterface { + private static final Log LOG = LogFactory.getLog(DeleteTableProcedure.class); + + private List regions; + private UserGroupInformation user; + private TableName tableName; + + // used for compatibility with old clients + private final ProcedurePrepareLatch syncLatch; + + public DeleteTableProcedure() { + // Required by the Procedure framework to create the procedure on replay + syncLatch = null; + } + + public DeleteTableProcedure(final MasterProcedureEnv env, final TableName tableName) + throws IOException { + this(env, tableName, null); + } + + public DeleteTableProcedure(final MasterProcedureEnv env, final TableName tableName, + final ProcedurePrepareLatch syncLatch) throws IOException { + this.tableName = tableName; + this.user = env.getRequestUser().getUGI(); + + // used for compatibility with clients without procedures + // they need a sync TableNotFoundException, TableNotDisabledException, ... + this.syncLatch = syncLatch; + } + + @Override + protected Flow executeFromState(final MasterProcedureEnv env, DeleteTableState state) { + if (LOG.isTraceEnabled()) { + LOG.trace(this + " execute state=" + state); + } + try { + switch (state) { + case DELETE_TABLE_PRE_OPERATION: + // Verify if we can delete the table + boolean deletable = prepareDelete(env); + ProcedurePrepareLatch.releaseLatch(syncLatch, this); + if (!deletable) { + assert isFailed() : "the delete should have an exception here"; + return Flow.NO_MORE_STATE; + } + + // TODO: Move out... in the acquireLock() + LOG.debug("waiting for '" + getTableName() + "' regions in transition"); + regions = ProcedureSyncWait.getRegionsFromMeta(env, getTableName()); + assert regions != null && !regions.isEmpty() : "unexpected 0 regions"; + ProcedureSyncWait.waitRegionInTransition(env, regions); + + // Call coprocessors + preDelete(env); + + setNextState(DeleteTableState.DELETE_TABLE_REMOVE_FROM_META); + break; + case DELETE_TABLE_REMOVE_FROM_META: + LOG.debug("delete '" + getTableName() + "' regions from META"); + DeleteTableProcedure.deleteFromMeta(env, getTableName(), regions); + setNextState(DeleteTableState.DELETE_TABLE_CLEAR_FS_LAYOUT); + break; + case DELETE_TABLE_CLEAR_FS_LAYOUT: + LOG.debug("delete '" + getTableName() + "' from filesystem"); + DeleteTableProcedure.deleteFromFs(env, getTableName(), regions, true); + setNextState(DeleteTableState.DELETE_TABLE_UPDATE_DESC_CACHE); + regions = null; + break; + case DELETE_TABLE_UPDATE_DESC_CACHE: + LOG.debug("delete '" + getTableName() + "' descriptor"); + DeleteTableProcedure.deleteTableDescriptorCache(env, getTableName()); + setNextState(DeleteTableState.DELETE_TABLE_UNASSIGN_REGIONS); + break; + case DELETE_TABLE_UNASSIGN_REGIONS: + LOG.debug("delete '" + getTableName() + "' assignment state"); + DeleteTableProcedure.deleteAssignmentState(env, getTableName()); + setNextState(DeleteTableState.DELETE_TABLE_POST_OPERATION); + break; + case DELETE_TABLE_POST_OPERATION: + postDelete(env); + LOG.debug("delete '" + getTableName() + "' completed"); + return Flow.NO_MORE_STATE; + default: + throw new UnsupportedOperationException("unhandled state=" + state); + } + } catch (HBaseException|IOException e) { + LOG.warn("Retriable error trying to delete table=" + getTableName() + " state=" + state, e); + } catch (InterruptedException e) { + // if the interrupt is real, the executor will be stopped. + LOG.warn("Interrupted trying to delete table=" + getTableName() + " state=" + state, e); + } + return Flow.HAS_MORE_STATE; + } + + @Override + protected void rollbackState(final MasterProcedureEnv env, final DeleteTableState state) { + if (state == DeleteTableState.DELETE_TABLE_PRE_OPERATION) { + // nothing to rollback, pre-delete is just table-state checks. + // We can fail if the table does not exist or is not disabled. + ProcedurePrepareLatch.releaseLatch(syncLatch, this); + return; + } + + // The delete doesn't have a rollback. The execution will succeed, at some point. + throw new UnsupportedOperationException("unhandled state=" + state); + } + + @Override + protected DeleteTableState getState(final int stateId) { + return DeleteTableState.valueOf(stateId); + } + + @Override + protected int getStateId(final DeleteTableState state) { + return state.getNumber(); + } + + @Override + protected DeleteTableState getInitialState() { + return DeleteTableState.DELETE_TABLE_PRE_OPERATION; + } + + @Override + public TableName getTableName() { + return tableName; + } + + @Override + public TableOperationType getTableOperationType() { + return TableOperationType.DELETE; + } + + @Override + public boolean abort(final MasterProcedureEnv env) { + // TODO: We may be able to abort if the procedure is not started yet. + return false; + } + + @Override + protected boolean acquireLock(final MasterProcedureEnv env) { + if (!env.isInitialized()) return false; + return env.getProcedureQueue().tryAcquireTableWrite(getTableName(), "delete table"); + } + + @Override + protected void releaseLock(final MasterProcedureEnv env) { + env.getProcedureQueue().releaseTableWrite(getTableName()); + } + + @Override + public void toStringClassDetails(StringBuilder sb) { + sb.append(getClass().getSimpleName()); + sb.append(" (table="); + sb.append(getTableName()); + sb.append(") user="); + sb.append(user); + } + + @Override + public void serializeStateData(final OutputStream stream) throws IOException { + super.serializeStateData(stream); + + MasterProcedureProtos.DeleteTableStateData.Builder state = + MasterProcedureProtos.DeleteTableStateData.newBuilder() + .setUserInfo(MasterProcedureUtil.toProtoUserInfo(this.user)) + .setTableName(ProtobufUtil.toProtoTableName(tableName)); + if (regions != null) { + for (HRegionInfo hri: regions) { + state.addRegionInfo(HRegionInfo.convert(hri)); + } + } + state.build().writeDelimitedTo(stream); + } + + @Override + public void deserializeStateData(final InputStream stream) throws IOException { + super.deserializeStateData(stream); + + MasterProcedureProtos.DeleteTableStateData state = + MasterProcedureProtos.DeleteTableStateData.parseDelimitedFrom(stream); + user = MasterProcedureUtil.toUserInfo(state.getUserInfo()); + tableName = ProtobufUtil.toTableName(state.getTableName()); + if (state.getRegionInfoCount() == 0) { + regions = null; + } else { + regions = new ArrayList(state.getRegionInfoCount()); + for (HBaseProtos.RegionInfo hri: state.getRegionInfoList()) { + regions.add(HRegionInfo.convert(hri)); + } + } + } + + private boolean prepareDelete(final MasterProcedureEnv env) throws IOException { + try { + env.getMasterServices().checkTableModifiable(tableName); + } catch (TableNotFoundException|TableNotDisabledException e) { + setFailure("master-delete-table", e); + return false; + } + return true; + } + + private boolean preDelete(final MasterProcedureEnv env) + throws IOException, InterruptedException { + final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost(); + if (cpHost != null) { + final TableName tableName = this.tableName; + user.doAs(new PrivilegedExceptionAction() { + @Override + public Void run() throws Exception { + cpHost.preDeleteTableHandler(tableName); + return null; + } + }); + } + return true; + } + + private void postDelete(final MasterProcedureEnv env) + throws IOException, InterruptedException { + deleteTableStates(env, tableName); + + final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost(); + if (cpHost != null) { + final TableName tableName = this.tableName; + user.doAs(new PrivilegedExceptionAction() { + @Override + public Void run() throws Exception { + cpHost.postDeleteTableHandler(tableName); + return null; + } + }); + } + } + + protected static void deleteFromFs(final MasterProcedureEnv env, + final TableName tableName, final List regions, + final boolean archive) throws IOException { + final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem(); + final FileSystem fs = mfs.getFileSystem(); + final Path tempdir = mfs.getTempDir(); + + final Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName); + final Path tempTableDir = FSUtils.getTableDir(tempdir, tableName); + + if (fs.exists(tableDir)) { + // Ensure temp exists + if (!fs.exists(tempdir) && !fs.mkdirs(tempdir)) { + throw new IOException("HBase temp directory '" + tempdir + "' creation failure."); + } + + // Ensure parent exists + if (!fs.exists(tempTableDir.getParent()) && !fs.mkdirs(tempTableDir.getParent())) { + throw new IOException("HBase temp directory '" + tempdir + "' creation failure."); + } + + // Move the table in /hbase/.tmp + if (!fs.rename(tableDir, tempTableDir)) { + if (fs.exists(tempTableDir)) { + // TODO + // what's in this dir? something old? probably something manual from the user... + // let's get rid of this stuff... + FileStatus[] files = fs.listStatus(tempdir); + if (files != null && files.length > 0) { + for (int i = 0; i < files.length; ++i) { + if (!files[i].isDir()) continue; + HFileArchiver.archiveRegion(fs, mfs.getRootDir(), tempTableDir, files[i].getPath()); + } + } + fs.delete(tempdir, true); + } + throw new IOException("Unable to move '" + tableDir + "' to temp '" + tempTableDir + "'"); + } + } + + // Archive regions from FS (temp directory) + if (archive) { + for (HRegionInfo hri : regions) { + LOG.debug("Archiving region " + hri.getRegionNameAsString() + " from FS"); + HFileArchiver.archiveRegion(fs, mfs.getRootDir(), + tempTableDir, HRegion.getRegionDir(tempTableDir, hri.getEncodedName())); + } + LOG.debug("Table '" + tableName + "' archived!"); + } + + // Archive the mob data if there is a mob-enabled column + HTableDescriptor htd = env.getMasterServices().getTableDescriptors().get(tableName); + HColumnDescriptor[] hcds = htd.getColumnFamilies(); + boolean hasMob = false; + for (HColumnDescriptor hcd : hcds) { + if (hcd.isMobEnabled()) { + hasMob = true; + break; + } + } + Path mobTableDir = null; + if (hasMob) { + // Archive mob data + mobTableDir = FSUtils.getTableDir(new Path(mfs.getRootDir(), MobConstants.MOB_DIR_NAME), + tableName); + Path regionDir = + new Path(mobTableDir, MobUtils.getMobRegionInfo(tableName).getEncodedName()); + if (fs.exists(regionDir)) { + HFileArchiver.archiveRegion(fs, mfs.getRootDir(), mobTableDir, regionDir); + } + } + + + // Delete table directory from FS (temp directory) + if (!fs.delete(tempTableDir, true) && fs.exists(tempTableDir)) { + throw new IOException("Couldn't delete " + tempTableDir); + } + + // Delete the table directory where the mob files are saved + if (hasMob && mobTableDir != null && fs.exists(mobTableDir)) { + if (!fs.delete(mobTableDir, true)) { + LOG.error("Couldn't delete " + mobTableDir); + } + } + } + + /** + * There may be items for this table still up in hbase:meta in the case where the + * info:regioninfo column was empty because of some write error. Remove ALL rows from hbase:meta + * that have to do with this table. See HBASE-12980. + * @throws IOException + */ + private static void cleanAnyRemainingRows(final MasterProcedureEnv env, + final TableName tableName) throws IOException { + ClusterConnection connection = env.getMasterServices().getConnection(); + Scan tableScan = MetaTableAccessor.getScanForTableName(connection, tableName); + try (Table metaTable = + connection.getTable(TableName.META_TABLE_NAME)) { + List deletes = new ArrayList(); + try (ResultScanner resScanner = metaTable.getScanner(tableScan)) { + for (Result result : resScanner) { + deletes.add(new Delete(result.getRow())); + } + } + if (!deletes.isEmpty()) { + LOG.warn("Deleting some vestigal " + deletes.size() + " rows of " + tableName + + " from " + TableName.META_TABLE_NAME); + metaTable.delete(deletes); + } + } + } + + protected static void deleteFromMeta(final MasterProcedureEnv env, + final TableName tableName, List regions) throws IOException { + MetaTableAccessor.deleteRegions(env.getMasterServices().getConnection(), regions); + + // Clean any remaining rows for this table. + cleanAnyRemainingRows(env, tableName); + } + + protected static void deleteAssignmentState(final MasterProcedureEnv env, + final TableName tableName) throws IOException { + AssignmentManager am = env.getMasterServices().getAssignmentManager(); + + // Clean up regions of the table in RegionStates. + LOG.debug("Removing '" + tableName + "' from region states."); + am.getRegionStates().tableDeleted(tableName); + + // If entry for this table states, remove it. + LOG.debug("Marking '" + tableName + "' as deleted."); + am.getTableStateManager().setDeletedTable(tableName); + } + + protected static void deleteTableDescriptorCache(final MasterProcedureEnv env, + final TableName tableName) throws IOException { + LOG.debug("Removing '" + tableName + "' descriptor."); + env.getMasterServices().getTableDescriptors().remove(tableName); + } + + protected static void deleteTableStates(final MasterProcedureEnv env, final TableName tableName) + throws IOException { + getMasterQuotaManager(env).removeTableFromNamespaceQuota(tableName); + } + + private static MasterQuotaManager getMasterQuotaManager(final MasterProcedureEnv env) + throws IOException { + return ProcedureSyncWait.waitFor(env, "quota manager to be available", + new ProcedureSyncWait.Predicate() { + @Override + public MasterQuotaManager evaluate() throws IOException { + return env.getMasterServices().getMasterQuotaManager(); + } + }); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java new file mode 100644 index 00000000000..2507cecd66a --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java @@ -0,0 +1,540 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master.procedure; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.security.PrivilegedExceptionAction; +import java.util.List; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.atomic.AtomicBoolean; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.TableNotEnabledException; +import org.apache.hadoop.hbase.TableNotFoundException; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.TableState; +import org.apache.hadoop.hbase.constraint.ConstraintException; +import org.apache.hadoop.hbase.executor.EventType; +import org.apache.hadoop.hbase.master.AssignmentManager; +import org.apache.hadoop.hbase.master.BulkAssigner; +import org.apache.hadoop.hbase.master.MasterCoprocessorHost; +import org.apache.hadoop.hbase.master.RegionState; +import org.apache.hadoop.hbase.master.RegionStates; +import org.apache.hadoop.hbase.master.TableStateManager; +import org.apache.hadoop.hbase.procedure2.StateMachineProcedure; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos; +import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableState; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.htrace.Trace; + +@InterfaceAudience.Private +public class DisableTableProcedure + extends StateMachineProcedure + implements TableProcedureInterface { + private static final Log LOG = LogFactory.getLog(DisableTableProcedure.class); + + private final AtomicBoolean aborted = new AtomicBoolean(false); + + // This is for back compatible with 1.0 asynchronized operations. + private final ProcedurePrepareLatch syncLatch; + + private TableName tableName; + private boolean skipTableStateCheck; + private UserGroupInformation user; + + private Boolean traceEnabled = null; + + enum MarkRegionOfflineOpResult { + MARK_ALL_REGIONS_OFFLINE_SUCCESSFUL, + BULK_ASSIGN_REGIONS_FAILED, + MARK_ALL_REGIONS_OFFLINE_INTERRUPTED, + } + + public DisableTableProcedure() { + syncLatch = null; + } + + /** + * Constructor + * @param env MasterProcedureEnv + * @param tableName the table to operate on + * @param skipTableStateCheck whether to check table state + * @throws IOException + */ + public DisableTableProcedure( + final MasterProcedureEnv env, + final TableName tableName, + final boolean skipTableStateCheck) throws IOException { + this(env, tableName, skipTableStateCheck, null); + } + + /** + * Constructor + * @param env MasterProcedureEnv + * @param tableName the table to operate on + * @param skipTableStateCheck whether to check table state + * @throws IOException + */ + public DisableTableProcedure( + final MasterProcedureEnv env, + final TableName tableName, + final boolean skipTableStateCheck, + final ProcedurePrepareLatch syncLatch) throws IOException { + this.tableName = tableName; + this.skipTableStateCheck = skipTableStateCheck; + this.user = env.getRequestUser().getUGI(); + + // Compatible with 1.0: We use latch to make sure that this procedure implementation is + // compatible with 1.0 asynchronized operations. We need to lock the table and check + // whether the Disable operation could be performed (table exists and online; table state + // is ENABLED). Once it is done, we are good to release the latch and the client can + // start asynchronously wait for the operation. + // + // Note: the member syncLatch could be null if we are in failover or recovery scenario. + // This is ok for backward compatible, as 1.0 client would not able to peek at procedure. + this.syncLatch = syncLatch; + } + + @Override + protected Flow executeFromState(final MasterProcedureEnv env, final DisableTableState state) { + if (isTraceEnabled()) { + LOG.trace(this + " execute state=" + state); + } + + try { + switch (state) { + case DISABLE_TABLE_PREPARE: + if (prepareDisable(env)) { + setNextState(DisableTableState.DISABLE_TABLE_PRE_OPERATION); + } else { + assert isFailed() : "disable should have an exception here"; + return Flow.NO_MORE_STATE; + } + break; + case DISABLE_TABLE_PRE_OPERATION: + preDisable(env, state); + setNextState(DisableTableState.DISABLE_TABLE_SET_DISABLING_TABLE_STATE); + break; + case DISABLE_TABLE_SET_DISABLING_TABLE_STATE: + setTableStateToDisabling(env, tableName); + setNextState(DisableTableState.DISABLE_TABLE_MARK_REGIONS_OFFLINE); + break; + case DISABLE_TABLE_MARK_REGIONS_OFFLINE: + if (markRegionsOffline(env, tableName, true) == + MarkRegionOfflineOpResult.MARK_ALL_REGIONS_OFFLINE_SUCCESSFUL) { + setNextState(DisableTableState.DISABLE_TABLE_SET_DISABLED_TABLE_STATE); + } else { + LOG.trace("Retrying later to disable the missing regions"); + } + break; + case DISABLE_TABLE_SET_DISABLED_TABLE_STATE: + setTableStateToDisabled(env, tableName); + setNextState(DisableTableState.DISABLE_TABLE_POST_OPERATION); + break; + case DISABLE_TABLE_POST_OPERATION: + postDisable(env, state); + return Flow.NO_MORE_STATE; + default: + throw new UnsupportedOperationException("unhandled state=" + state); + } + } catch (InterruptedException|IOException e) { + LOG.warn("Retriable error trying to disable table=" + tableName + " state=" + state, e); + } + return Flow.HAS_MORE_STATE; + } + + @Override + protected void rollbackState(final MasterProcedureEnv env, final DisableTableState state) + throws IOException { + if (state == DisableTableState.DISABLE_TABLE_PREPARE) { + // nothing to rollback, prepare-disable is just table-state checks. + // We can fail if the table does not exist or is not disabled. + ProcedurePrepareLatch.releaseLatch(syncLatch, this); + return; + } + + // The delete doesn't have a rollback. The execution will succeed, at some point. + throw new UnsupportedOperationException("unhandled state=" + state); + } + + @Override + protected DisableTableState getState(final int stateId) { + return DisableTableState.valueOf(stateId); + } + + @Override + protected int getStateId(final DisableTableState state) { + return state.getNumber(); + } + + @Override + protected DisableTableState getInitialState() { + return DisableTableState.DISABLE_TABLE_PREPARE; + } + + @Override + protected void setNextState(final DisableTableState state) { + if (aborted.get()) { + setAbortFailure("disable-table", "abort requested"); + } else { + super.setNextState(state); + } + } + + @Override + public boolean abort(final MasterProcedureEnv env) { + aborted.set(true); + return true; + } + + @Override + protected boolean acquireLock(final MasterProcedureEnv env) { + if (!env.isInitialized()) return false; + return env.getProcedureQueue().tryAcquireTableWrite( + tableName, + EventType.C_M_DISABLE_TABLE.toString()); + } + + @Override + protected void releaseLock(final MasterProcedureEnv env) { + env.getProcedureQueue().releaseTableWrite(tableName); + } + + @Override + public void serializeStateData(final OutputStream stream) throws IOException { + super.serializeStateData(stream); + + MasterProcedureProtos.DisableTableStateData.Builder disableTableMsg = + MasterProcedureProtos.DisableTableStateData.newBuilder() + .setUserInfo(MasterProcedureUtil.toProtoUserInfo(user)) + .setTableName(ProtobufUtil.toProtoTableName(tableName)) + .setSkipTableStateCheck(skipTableStateCheck); + + disableTableMsg.build().writeDelimitedTo(stream); + } + + @Override + public void deserializeStateData(final InputStream stream) throws IOException { + super.deserializeStateData(stream); + + MasterProcedureProtos.DisableTableStateData disableTableMsg = + MasterProcedureProtos.DisableTableStateData.parseDelimitedFrom(stream); + user = MasterProcedureUtil.toUserInfo(disableTableMsg.getUserInfo()); + tableName = ProtobufUtil.toTableName(disableTableMsg.getTableName()); + skipTableStateCheck = disableTableMsg.getSkipTableStateCheck(); + } + + @Override + public void toStringClassDetails(StringBuilder sb) { + sb.append(getClass().getSimpleName()); + sb.append(" (table="); + sb.append(tableName); + sb.append(") user="); + sb.append(user); + } + + @Override + public TableName getTableName() { + return tableName; + } + + @Override + public TableOperationType getTableOperationType() { + return TableOperationType.DISABLE; + } + + /** + * Action before any real action of disabling table. Set the exception in the procedure instead + * of throwing it. This approach is to deal with backward compatible with 1.0. + * @param env MasterProcedureEnv + * @throws IOException + */ + private boolean prepareDisable(final MasterProcedureEnv env) throws IOException { + boolean canTableBeDisabled = true; + if (tableName.equals(TableName.META_TABLE_NAME)) { + setFailure("master-disable-table", new ConstraintException("Cannot disable catalog table")); + canTableBeDisabled = false; + } else if (!MetaTableAccessor.tableExists(env.getMasterServices().getConnection(), tableName)) { + setFailure("master-disable-table", new TableNotFoundException(tableName)); + canTableBeDisabled = false; + } else if (!skipTableStateCheck) { + // There could be multiple client requests trying to disable or enable + // the table at the same time. Ensure only the first request is honored + // After that, no other requests can be accepted until the table reaches + // DISABLED or ENABLED. + // + // Note: in 1.0 release, we called TableStateManager.setTableStateIfInStates() to set + // the state to DISABLING from ENABLED. The implementation was done before table lock + // was implemented. With table lock, there is no need to set the state here (it will + // set the state later on). A quick state check should be enough for us to move forward. + TableStateManager tsm = + env.getMasterServices().getAssignmentManager().getTableStateManager(); + if (!tsm.getTableState(tableName).equals(TableState.State.ENABLED)) { + LOG.info("Table " + tableName + " isn't enabled; skipping disable"); + setFailure("master-disable-table", new TableNotEnabledException(tableName)); + canTableBeDisabled = false; + } + } + + // We are done the check. Future actions in this procedure could be done asynchronously. + ProcedurePrepareLatch.releaseLatch(syncLatch, this); + + return canTableBeDisabled; + } + + /** + * Action before disabling table. + * @param env MasterProcedureEnv + * @param state the procedure state + * @throws IOException + * @throws InterruptedException + */ + protected void preDisable(final MasterProcedureEnv env, final DisableTableState state) + throws IOException, InterruptedException { + runCoprocessorAction(env, state); + } + + /** + * Mark table state to Disabling + * @param env MasterProcedureEnv + * @throws IOException + */ + protected static void setTableStateToDisabling( + final MasterProcedureEnv env, + final TableName tableName) throws IOException { + // Set table disabling flag up in zk. + env.getMasterServices().getAssignmentManager().getTableStateManager().setTableState( + tableName, + TableState.State.DISABLING); + } + + /** + * Mark regions of the table offline with retries + * @param env MasterProcedureEnv + * @param tableName the target table + * @param retryRequired whether to retry if the first run failed + * @return whether the operation is fully completed or being interrupted. + * @throws IOException + */ + protected static MarkRegionOfflineOpResult markRegionsOffline( + final MasterProcedureEnv env, + final TableName tableName, + final Boolean retryRequired) throws IOException { + // Dev consideration: add a config to control max number of retry. For now, it is hard coded. + int maxTry = (retryRequired ? 10 : 1); + MarkRegionOfflineOpResult operationResult = + MarkRegionOfflineOpResult.BULK_ASSIGN_REGIONS_FAILED; + do { + try { + operationResult = markRegionsOffline(env, tableName); + if (operationResult == MarkRegionOfflineOpResult.MARK_ALL_REGIONS_OFFLINE_SUCCESSFUL) { + break; + } + maxTry--; + } catch (Exception e) { + LOG.warn("Received exception while marking regions online. tries left: " + maxTry, e); + maxTry--; + if (maxTry > 0) { + continue; // we still have some retry left, try again. + } + throw e; + } + } while (maxTry > 0); + + if (operationResult != MarkRegionOfflineOpResult.MARK_ALL_REGIONS_OFFLINE_SUCCESSFUL) { + LOG.warn("Some or all regions of the Table '" + tableName + "' were still online"); + } + + return operationResult; + } + + /** + * Mark regions of the table offline + * @param env MasterProcedureEnv + * @param tableName the target table + * @return whether the operation is fully completed or being interrupted. + * @throws IOException + */ + private static MarkRegionOfflineOpResult markRegionsOffline( + final MasterProcedureEnv env, + final TableName tableName) throws IOException { + // Get list of online regions that are of this table. Regions that are + // already closed will not be included in this list; i.e. the returned + // list is not ALL regions in a table, its all online regions according + // to the in-memory state on this master. + MarkRegionOfflineOpResult operationResult = + MarkRegionOfflineOpResult.MARK_ALL_REGIONS_OFFLINE_SUCCESSFUL; + final List regions = + env.getMasterServices().getAssignmentManager().getRegionStates() + .getRegionsOfTable(tableName); + if (regions.size() > 0) { + LOG.info("Offlining " + regions.size() + " regions."); + + BulkDisabler bd = new BulkDisabler(env, tableName, regions); + try { + if (!bd.bulkAssign()) { + operationResult = MarkRegionOfflineOpResult.BULK_ASSIGN_REGIONS_FAILED; + } + } catch (InterruptedException e) { + LOG.warn("Disable was interrupted"); + // Preserve the interrupt. + Thread.currentThread().interrupt(); + operationResult = MarkRegionOfflineOpResult.MARK_ALL_REGIONS_OFFLINE_INTERRUPTED; + } + } + return operationResult; + } + + /** + * Mark table state to Disabled + * @param env MasterProcedureEnv + * @throws IOException + */ + protected static void setTableStateToDisabled( + final MasterProcedureEnv env, + final TableName tableName) throws IOException { + // Flip the table to disabled + env.getMasterServices().getAssignmentManager().getTableStateManager().setTableState( + tableName, + TableState.State.DISABLED); + LOG.info("Disabled table, " + tableName + ", is completed."); + } + + /** + * Action after disabling table. + * @param env MasterProcedureEnv + * @param state the procedure state + * @throws IOException + * @throws InterruptedException + */ + protected void postDisable(final MasterProcedureEnv env, final DisableTableState state) + throws IOException, InterruptedException { + runCoprocessorAction(env, state); + } + + /** + * The procedure could be restarted from a different machine. If the variable is null, we need to + * retrieve it. + * @return traceEnabled + */ + private Boolean isTraceEnabled() { + if (traceEnabled == null) { + traceEnabled = LOG.isTraceEnabled(); + } + return traceEnabled; + } + + /** + * Coprocessor Action. + * @param env MasterProcedureEnv + * @param state the procedure state + * @throws IOException + * @throws InterruptedException + */ + private void runCoprocessorAction(final MasterProcedureEnv env, final DisableTableState state) + throws IOException, InterruptedException { + final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost(); + if (cpHost != null) { + user.doAs(new PrivilegedExceptionAction() { + @Override + public Void run() throws Exception { + switch (state) { + case DISABLE_TABLE_PRE_OPERATION: + cpHost.preDisableTableHandler(tableName); + break; + case DISABLE_TABLE_POST_OPERATION: + cpHost.postDisableTableHandler(tableName); + break; + default: + throw new UnsupportedOperationException(this + " unhandled state=" + state); + } + return null; + } + }); + } + } + + /** + * Run bulk disable. + */ + private static class BulkDisabler extends BulkAssigner { + private final AssignmentManager assignmentManager; + private final List regions; + private final TableName tableName; + private final int waitingTimeForEvents; + + public BulkDisabler(final MasterProcedureEnv env, final TableName tableName, + final List regions) { + super(env.getMasterServices()); + this.assignmentManager = env.getMasterServices().getAssignmentManager(); + this.tableName = tableName; + this.regions = regions; + this.waitingTimeForEvents = + env.getMasterServices().getConfiguration() + .getInt("hbase.master.event.waiting.time", 1000); + } + + @Override + protected void populatePool(ExecutorService pool) { + RegionStates regionStates = assignmentManager.getRegionStates(); + for (final HRegionInfo region : regions) { + if (regionStates.isRegionInTransition(region) + && !regionStates.isRegionInState(region, RegionState.State.FAILED_CLOSE)) { + continue; + } + pool.execute(Trace.wrap("DisableTableHandler.BulkDisabler", new Runnable() { + @Override + public void run() { + assignmentManager.unassign(region); + } + })); + } + } + + @Override + protected boolean waitUntilDone(long timeout) throws InterruptedException { + long startTime = EnvironmentEdgeManager.currentTime(); + long remaining = timeout; + List regions = null; + long lastLogTime = startTime; + while (!server.isStopped() && remaining > 0) { + Thread.sleep(waitingTimeForEvents); + regions = assignmentManager.getRegionStates().getRegionsOfTable(tableName); + long now = EnvironmentEdgeManager.currentTime(); + // Don't log more than once every ten seconds. Its obnoxious. And only log table regions + // if we are waiting a while for them to go down... + if (LOG.isDebugEnabled() && ((now - lastLogTime) > 10000)) { + lastLogTime = now; + LOG.debug("Disable waiting until done; " + remaining + " ms remaining; " + regions); + } + if (regions.isEmpty()) break; + remaining = timeout - (now - startTime); + } + return regions != null && regions.isEmpty(); + } + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java new file mode 100644 index 00000000000..aefb0b1c0cc --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java @@ -0,0 +1,582 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master.procedure; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.security.PrivilegedExceptionAction; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.TableNotDisabledException; +import org.apache.hadoop.hbase.TableNotFoundException; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.TableState; +import org.apache.hadoop.hbase.executor.EventType; +import org.apache.hadoop.hbase.master.AssignmentManager; +import org.apache.hadoop.hbase.master.BulkAssigner; +import org.apache.hadoop.hbase.master.GeneralBulkAssigner; +import org.apache.hadoop.hbase.master.MasterCoprocessorHost; +import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.master.RegionStates; +import org.apache.hadoop.hbase.master.ServerManager; +import org.apache.hadoop.hbase.master.TableStateManager; +import org.apache.hadoop.hbase.procedure2.StateMachineProcedure; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos; +import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableState; +import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; +import org.apache.hadoop.security.UserGroupInformation; + +@InterfaceAudience.Private +public class EnableTableProcedure + extends StateMachineProcedure + implements TableProcedureInterface { + private static final Log LOG = LogFactory.getLog(EnableTableProcedure.class); + + private final AtomicBoolean aborted = new AtomicBoolean(false); + + // This is for back compatible with 1.0 asynchronized operations. + private final ProcedurePrepareLatch syncLatch; + + private TableName tableName; + private boolean skipTableStateCheck; + private UserGroupInformation user; + + private Boolean traceEnabled = null; + + public EnableTableProcedure() { + syncLatch = null; + } + + /** + * Constructor + * @param env MasterProcedureEnv + * @param tableName the table to operate on + * @param skipTableStateCheck whether to check table state + * @throws IOException + */ + public EnableTableProcedure( + final MasterProcedureEnv env, + final TableName tableName, + final boolean skipTableStateCheck) throws IOException { + this(env, tableName, skipTableStateCheck, null); + } + + /** + * Constructor + * @param env MasterProcedureEnv + * @throws IOException + * @param tableName the table to operate on + * @param skipTableStateCheck whether to check table state + */ + public EnableTableProcedure( + final MasterProcedureEnv env, + final TableName tableName, + final boolean skipTableStateCheck, + final ProcedurePrepareLatch syncLatch) throws IOException { + this.tableName = tableName; + this.skipTableStateCheck = skipTableStateCheck; + this.user = env.getRequestUser().getUGI(); + + // Compatible with 1.0: We use latch to make sure that this procedure implementation is + // compatible with 1.0 asynchronized operations. We need to lock the table and check + // whether the Enable operation could be performed (table exists and offline; table state + // is DISABLED). Once it is done, we are good to release the latch and the client can + // start asynchronously wait for the operation. + // + // Note: the member syncLatch could be null if we are in failover or recovery scenario. + // This is ok for backward compatible, as 1.0 client would not able to peek at procedure. + this.syncLatch = syncLatch; + } + + @Override + protected Flow executeFromState(final MasterProcedureEnv env, final EnableTableState state) { + if (isTraceEnabled()) { + LOG.trace(this + " execute state=" + state); + } + + try { + switch (state) { + case ENABLE_TABLE_PREPARE: + if (prepareEnable(env)) { + setNextState(EnableTableState.ENABLE_TABLE_PRE_OPERATION); + } else { + assert isFailed() : "enable should have an exception here"; + return Flow.NO_MORE_STATE; + } + break; + case ENABLE_TABLE_PRE_OPERATION: + preEnable(env, state); + setNextState(EnableTableState.ENABLE_TABLE_SET_ENABLING_TABLE_STATE); + break; + case ENABLE_TABLE_SET_ENABLING_TABLE_STATE: + setTableStateToEnabling(env, tableName); + setNextState(EnableTableState.ENABLE_TABLE_MARK_REGIONS_ONLINE); + break; + case ENABLE_TABLE_MARK_REGIONS_ONLINE: + markRegionsOnline(env, tableName, true); + setNextState(EnableTableState.ENABLE_TABLE_SET_ENABLED_TABLE_STATE); + break; + case ENABLE_TABLE_SET_ENABLED_TABLE_STATE: + setTableStateToEnabled(env, tableName); + setNextState(EnableTableState.ENABLE_TABLE_POST_OPERATION); + break; + case ENABLE_TABLE_POST_OPERATION: + postEnable(env, state); + return Flow.NO_MORE_STATE; + default: + throw new UnsupportedOperationException("unhandled state=" + state); + } + } catch (InterruptedException|IOException e) { + LOG.error("Error trying to enable table=" + tableName + " state=" + state, e); + setFailure("master-enable-table", e); + } + return Flow.HAS_MORE_STATE; + } + + @Override + protected void rollbackState(final MasterProcedureEnv env, final EnableTableState state) + throws IOException { + if (isTraceEnabled()) { + LOG.trace(this + " rollback state=" + state); + } + try { + switch (state) { + case ENABLE_TABLE_POST_OPERATION: + // TODO-MAYBE: call the coprocessor event to undo (eg. DisableTableProcedure.preDisable())? + break; + case ENABLE_TABLE_SET_ENABLED_TABLE_STATE: + DisableTableProcedure.setTableStateToDisabling(env, tableName); + break; + case ENABLE_TABLE_MARK_REGIONS_ONLINE: + markRegionsOfflineDuringRecovery(env); + break; + case ENABLE_TABLE_SET_ENABLING_TABLE_STATE: + DisableTableProcedure.setTableStateToDisabled(env, tableName); + break; + case ENABLE_TABLE_PRE_OPERATION: + // TODO-MAYBE: call the coprocessor event to undo (eg. DisableTableProcedure.postDisable())? + break; + case ENABLE_TABLE_PREPARE: + // Nothing to undo for this state. + // We do need to count down the latch count so that we don't stuck. + ProcedurePrepareLatch.releaseLatch(syncLatch, this); + break; + default: + throw new UnsupportedOperationException("unhandled state=" + state); + } + } catch (IOException e) { + // This will be retried. Unless there is a bug in the code, + // this should be just a "temporary error" (e.g. network down) + LOG.warn("Failed enable table rollback attempt step=" + state + " table=" + tableName, e); + throw e; + } + } + + @Override + protected EnableTableState getState(final int stateId) { + return EnableTableState.valueOf(stateId); + } + + @Override + protected int getStateId(final EnableTableState state) { + return state.getNumber(); + } + + @Override + protected EnableTableState getInitialState() { + return EnableTableState.ENABLE_TABLE_PREPARE; + } + + @Override + protected void setNextState(final EnableTableState state) { + if (aborted.get()) { + setAbortFailure("Enable-table", "abort requested"); + } else { + super.setNextState(state); + } + } + + @Override + public boolean abort(final MasterProcedureEnv env) { + aborted.set(true); + return true; + } + + @Override + protected boolean acquireLock(final MasterProcedureEnv env) { + if (!env.isInitialized()) return false; + return env.getProcedureQueue().tryAcquireTableWrite( + tableName, + EventType.C_M_ENABLE_TABLE.toString()); + } + + @Override + protected void releaseLock(final MasterProcedureEnv env) { + env.getProcedureQueue().releaseTableWrite(tableName); + } + + @Override + public void serializeStateData(final OutputStream stream) throws IOException { + super.serializeStateData(stream); + + MasterProcedureProtos.EnableTableStateData.Builder enableTableMsg = + MasterProcedureProtos.EnableTableStateData.newBuilder() + .setUserInfo(MasterProcedureUtil.toProtoUserInfo(user)) + .setTableName(ProtobufUtil.toProtoTableName(tableName)) + .setSkipTableStateCheck(skipTableStateCheck); + + enableTableMsg.build().writeDelimitedTo(stream); + } + + @Override + public void deserializeStateData(final InputStream stream) throws IOException { + super.deserializeStateData(stream); + + MasterProcedureProtos.EnableTableStateData enableTableMsg = + MasterProcedureProtos.EnableTableStateData.parseDelimitedFrom(stream); + user = MasterProcedureUtil.toUserInfo(enableTableMsg.getUserInfo()); + tableName = ProtobufUtil.toTableName(enableTableMsg.getTableName()); + skipTableStateCheck = enableTableMsg.getSkipTableStateCheck(); + } + + @Override + public void toStringClassDetails(StringBuilder sb) { + sb.append(getClass().getSimpleName()); + sb.append(" (table="); + sb.append(tableName); + sb.append(") user="); + sb.append(user); + } + + @Override + public TableName getTableName() { + return tableName; + } + + @Override + public TableOperationType getTableOperationType() { + return TableOperationType.ENABLE; + } + + + /** + * Action before any real action of enabling table. Set the exception in the procedure instead + * of throwing it. This approach is to deal with backward compatible with 1.0. + * @param env MasterProcedureEnv + * @return whether the table passes the necessary checks + * @throws IOException + */ + private boolean prepareEnable(final MasterProcedureEnv env) throws IOException { + boolean canTableBeEnabled = true; + + // Check whether table exists + if (!MetaTableAccessor.tableExists(env.getMasterServices().getConnection(), tableName)) { + setFailure("master-enable-table", new TableNotFoundException(tableName)); + canTableBeEnabled = false; + } else if (!skipTableStateCheck) { + // There could be multiple client requests trying to disable or enable + // the table at the same time. Ensure only the first request is honored + // After that, no other requests can be accepted until the table reaches + // DISABLED or ENABLED. + // + // Note: in 1.0 release, we called TableStateManager.setTableStateIfInStates() to set + // the state to ENABLING from DISABLED. The implementation was done before table lock + // was implemented. With table lock, there is no need to set the state here (it will + // set the state later on). A quick state check should be enough for us to move forward. + TableStateManager tsm = env.getMasterServices().getAssignmentManager().getTableStateManager(); + if (!tsm.getTableState(tableName).equals(TableState.State.DISABLED)) { + LOG.info("Table " + tableName + " isn't disabled; skipping enable"); + setFailure("master-enable-table", new TableNotDisabledException(this.tableName)); + canTableBeEnabled = false; + } + } + + // We are done the check. Future actions in this procedure could be done asynchronously. + ProcedurePrepareLatch.releaseLatch(syncLatch, this); + + return canTableBeEnabled; + } + + /** + * Action before enabling table. + * @param env MasterProcedureEnv + * @param state the procedure state + * @throws IOException + * @throws InterruptedException + */ + private void preEnable(final MasterProcedureEnv env, final EnableTableState state) + throws IOException, InterruptedException { + runCoprocessorAction(env, state); + } + + /** + * Mark table state to Enabling + * @param env MasterProcedureEnv + * @param tableName the target table + * @throws IOException + */ + protected static void setTableStateToEnabling( + final MasterProcedureEnv env, + final TableName tableName) throws IOException { + // Set table disabling flag up in zk. + LOG.info("Attempting to enable the table " + tableName); + env.getMasterServices().getAssignmentManager().getTableStateManager().setTableState( + tableName, + TableState.State.ENABLING); + } + + /** + * Mark offline regions of the table online with retry + * @param env MasterProcedureEnv + * @param tableName the target table + * @param retryRequired whether to retry if the first run failed + * @throws IOException + */ + protected static void markRegionsOnline( + final MasterProcedureEnv env, + final TableName tableName, + final Boolean retryRequired) throws IOException { + // This is best effort approach to make all regions of a table online. If we fail to do + // that, it is ok that the table has some offline regions; user can fix it manually. + + // Dev consideration: add a config to control max number of retry. For now, it is hard coded. + int maxTry = (retryRequired ? 10 : 1); + boolean done = false; + + do { + try { + done = markRegionsOnline(env, tableName); + if (done) { + break; + } + maxTry--; + } catch (Exception e) { + LOG.warn("Received exception while marking regions online. tries left: " + maxTry, e); + maxTry--; + if (maxTry > 0) { + continue; // we still have some retry left, try again. + } + throw e; + } + } while (maxTry > 0); + + if (!done) { + LOG.warn("Some or all regions of the Table '" + tableName + "' were offline"); + } + } + + /** + * Mark offline regions of the table online + * @param env MasterProcedureEnv + * @param tableName the target table + * @return whether the operation is fully completed or being interrupted. + * @throws IOException + */ + private static boolean markRegionsOnline(final MasterProcedureEnv env, final TableName tableName) + throws IOException { + final AssignmentManager assignmentManager = env.getMasterServices().getAssignmentManager(); + final MasterServices masterServices = env.getMasterServices(); + final ServerManager serverManager = masterServices.getServerManager(); + boolean done = false; + // Get the regions of this table. We're done when all listed + // tables are onlined. + List> tableRegionsAndLocations; + + if (TableName.META_TABLE_NAME.equals(tableName)) { + tableRegionsAndLocations = + new MetaTableLocator().getMetaRegionsAndLocations(masterServices.getZooKeeper()); + } else { + tableRegionsAndLocations = + MetaTableAccessor.getTableRegionsAndLocations(masterServices.getConnection(), tableName); + } + + int countOfRegionsInTable = tableRegionsAndLocations.size(); + Map regionsToAssign = + regionsToAssignWithServerName(env, tableRegionsAndLocations); + + // need to potentially create some regions for the replicas + List unrecordedReplicas = + AssignmentManager.replicaRegionsNotRecordedInMeta(new HashSet( + regionsToAssign.keySet()), masterServices); + Map> srvToUnassignedRegs = + assignmentManager.getBalancer().roundRobinAssignment(unrecordedReplicas, + serverManager.getOnlineServersList()); + if (srvToUnassignedRegs != null) { + for (Map.Entry> entry : srvToUnassignedRegs.entrySet()) { + for (HRegionInfo h : entry.getValue()) { + regionsToAssign.put(h, entry.getKey()); + } + } + } + + int offlineRegionsCount = regionsToAssign.size(); + + LOG.info("Table '" + tableName + "' has " + countOfRegionsInTable + " regions, of which " + + offlineRegionsCount + " are offline."); + if (offlineRegionsCount == 0) { + return true; + } + + List onlineServers = serverManager.createDestinationServersList(); + Map> bulkPlan = + env.getMasterServices().getAssignmentManager().getBalancer() + .retainAssignment(regionsToAssign, onlineServers); + if (bulkPlan != null) { + LOG.info("Bulk assigning " + offlineRegionsCount + " region(s) across " + bulkPlan.size() + + " server(s), retainAssignment=true"); + + BulkAssigner ba = new GeneralBulkAssigner(masterServices, bulkPlan, assignmentManager, true); + try { + if (ba.bulkAssign()) { + done = true; + } + } catch (InterruptedException e) { + LOG.warn("Enable operation was interrupted when enabling table '" + tableName + "'"); + // Preserve the interrupt. + Thread.currentThread().interrupt(); + } + } else { + LOG.info("Balancer was unable to find suitable servers for table " + tableName + + ", leaving unassigned"); + } + return done; + } + + /** + * Mark regions of the table offline during recovery + * @param env MasterProcedureEnv + */ + private void markRegionsOfflineDuringRecovery(final MasterProcedureEnv env) { + try { + // This is a best effort attempt. We will move on even it does not succeed. We will retry + // several times until we giving up. + DisableTableProcedure.markRegionsOffline(env, tableName, true); + } catch (Exception e) { + LOG.debug("Failed to offline all regions of table " + tableName + ". Ignoring", e); + } + } + + /** + * Mark table state to Enabled + * @param env MasterProcedureEnv + * @throws IOException + */ + protected static void setTableStateToEnabled( + final MasterProcedureEnv env, + final TableName tableName) throws IOException { + // Flip the table to Enabled + env.getMasterServices().getAssignmentManager().getTableStateManager().setTableState( + tableName, + TableState.State.ENABLED); + LOG.info("Table '" + tableName + "' was successfully enabled."); + } + + /** + * Action after enabling table. + * @param env MasterProcedureEnv + * @param state the procedure state + * @throws IOException + * @throws InterruptedException + */ + private void postEnable(final MasterProcedureEnv env, final EnableTableState state) + throws IOException, InterruptedException { + runCoprocessorAction(env, state); + } + + /** + * The procedure could be restarted from a different machine. If the variable is null, we need to + * retrieve it. + * @return traceEnabled + */ + private Boolean isTraceEnabled() { + if (traceEnabled == null) { + traceEnabled = LOG.isTraceEnabled(); + } + return traceEnabled; + } + + /** + * @param regionsInMeta + * @return List of regions neither in transition nor assigned. + * @throws IOException + */ + private static Map regionsToAssignWithServerName( + final MasterProcedureEnv env, + final List> regionsInMeta) throws IOException { + Map regionsToAssign = + new HashMap(regionsInMeta.size()); + RegionStates regionStates = env.getMasterServices().getAssignmentManager().getRegionStates(); + for (Pair regionLocation : regionsInMeta) { + HRegionInfo hri = regionLocation.getFirst(); + ServerName sn = regionLocation.getSecond(); + if (regionStates.isRegionOffline(hri)) { + regionsToAssign.put(hri, sn); + } else { + if (LOG.isDebugEnabled()) { + LOG.debug("Skipping assign for the region " + hri + " during enable table " + + hri.getTable() + " because its already in tranition or assigned."); + } + } + } + return regionsToAssign; + } + + /** + * Coprocessor Action. + * @param env MasterProcedureEnv + * @param state the procedure state + * @throws IOException + * @throws InterruptedException + */ + private void runCoprocessorAction(final MasterProcedureEnv env, final EnableTableState state) + throws IOException, InterruptedException { + final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost(); + if (cpHost != null) { + user.doAs(new PrivilegedExceptionAction() { + @Override + public Void run() throws Exception { + switch (state) { + case ENABLE_TABLE_PRE_OPERATION: + cpHost.preEnableTableHandler(getTableName()); + break; + case ENABLE_TABLE_POST_OPERATION: + cpHost.postEnableTableHandler(getTableName()); + break; + default: + throw new UnsupportedOperationException(this + " unhandled state=" + state); + } + return null; + } + }); + } + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterDDLOperationHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterDDLOperationHelper.java new file mode 100644 index 00000000000..c6ff1b6e3e8 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterDDLOperationHelper.java @@ -0,0 +1,167 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master.procedure; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.List; +import java.util.NavigableMap; +import java.util.TreeMap; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.TableNotDisabledException; +import org.apache.hadoop.hbase.TableNotFoundException; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.RegionLocator; +import org.apache.hadoop.hbase.client.TableState; +import org.apache.hadoop.hbase.master.AssignmentManager; +import org.apache.hadoop.hbase.master.BulkReOpen; +import org.apache.hadoop.hbase.master.MasterFileSystem; +import org.apache.hadoop.hbase.util.Bytes; + +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; + +/** + * Helper class for schema change procedures + */ +@InterfaceAudience.Private +public final class MasterDDLOperationHelper { + private static final Log LOG = LogFactory.getLog(MasterDDLOperationHelper.class); + + private MasterDDLOperationHelper() {} + + /** + * Check whether online schema change is allowed from config + **/ + public static boolean isOnlineSchemaChangeAllowed(final MasterProcedureEnv env) { + return env.getMasterServices().getConfiguration() + .getBoolean("hbase.online.schema.update.enable", false); + } + + /** + * Check whether a table is modifiable - exists and either offline or online with config set + * @param env MasterProcedureEnv + * @param tableName name of the table + * @throws IOException + */ + public static void checkTableModifiable(final MasterProcedureEnv env, final TableName tableName) + throws IOException { + // Checks whether the table exists + if (!MetaTableAccessor.tableExists(env.getMasterServices().getConnection(), tableName)) { + throw new TableNotFoundException(tableName); + } + + // We only execute this procedure with table online if online schema change config is set. + if (!env.getMasterServices().getAssignmentManager().getTableStateManager() + .isTableState(tableName, TableState.State.DISABLED) + && !MasterDDLOperationHelper.isOnlineSchemaChangeAllowed(env)) { + throw new TableNotDisabledException(tableName); + } + } + + /** + * Remove the column family from the file system + **/ + public static void deleteColumnFamilyFromFileSystem( + final MasterProcedureEnv env, + final TableName tableName, + List regionInfoList, + final byte[] familyName) throws IOException { + final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem(); + if (LOG.isDebugEnabled()) { + LOG.debug("Removing family=" + Bytes.toString(familyName) + " from table=" + tableName); + } + if (regionInfoList == null) { + regionInfoList = ProcedureSyncWait.getRegionsFromMeta(env, tableName); + } + for (HRegionInfo hri : regionInfoList) { + // Delete the family directory in FS for all the regions one by one + mfs.deleteFamilyFromFS(hri, familyName); + } + } + + /** + * Reopen all regions from a table after a schema change operation. + **/ + public static boolean reOpenAllRegions( + final MasterProcedureEnv env, + final TableName tableName, + final List regionInfoList) throws IOException { + boolean done = false; + LOG.info("Bucketing regions by region server..."); + List regionLocations = null; + Connection connection = env.getMasterServices().getConnection(); + try (RegionLocator locator = connection.getRegionLocator(tableName)) { + regionLocations = locator.getAllRegionLocations(); + } + // Convert List to Map. + NavigableMap hri2Sn = new TreeMap(); + for (HRegionLocation location : regionLocations) { + hri2Sn.put(location.getRegionInfo(), location.getServerName()); + } + TreeMap> serverToRegions = Maps.newTreeMap(); + List reRegions = new ArrayList(); + for (HRegionInfo hri : regionInfoList) { + ServerName sn = hri2Sn.get(hri); + // Skip the offlined split parent region + // See HBASE-4578 for more information. + if (null == sn) { + LOG.info("Skip " + hri); + continue; + } + if (!serverToRegions.containsKey(sn)) { + LinkedList hriList = Lists.newLinkedList(); + serverToRegions.put(sn, hriList); + } + reRegions.add(hri); + serverToRegions.get(sn).add(hri); + } + + LOG.info("Reopening " + reRegions.size() + " regions on " + serverToRegions.size() + + " region servers."); + AssignmentManager am = env.getMasterServices().getAssignmentManager(); + am.setRegionsToReopen(reRegions); + BulkReOpen bulkReopen = new BulkReOpen(env.getMasterServices(), serverToRegions, am); + while (true) { + try { + if (bulkReopen.bulkReOpen()) { + done = true; + break; + } else { + LOG.warn("Timeout before reopening all regions"); + } + } catch (InterruptedException e) { + LOG.warn("Reopen was interrupted"); + // Preserve the interrupt. + Thread.currentThread().interrupt(); + break; + } + } + return done; + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureConstants.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureConstants.java new file mode 100644 index 00000000000..90ed4eeb769 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureConstants.java @@ -0,0 +1,31 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master.procedure; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; + +@InterfaceAudience.Private +public final class MasterProcedureConstants { + private MasterProcedureConstants() {} + + public static final String MASTER_PROCEDURE_LOGDIR = "MasterProcWALs"; + + public static final String MASTER_PROCEDURE_THREADS = "hbase.master.procedure.threads"; + public static final int DEFAULT_MIN_MASTER_PROCEDURE_THREADS = 4; +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.java new file mode 100644 index 00000000000..0a33cd4356c --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.java @@ -0,0 +1,123 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master.procedure; + +import java.io.IOException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.ipc.RpcServer; +import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.master.MasterCoprocessorHost; +import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.procedure2.store.ProcedureStore; +import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore; +import org.apache.hadoop.hbase.security.User; +import org.apache.hadoop.hbase.security.UserProvider; +import org.apache.hadoop.hbase.util.CancelableProgressable; +import org.apache.hadoop.hbase.util.FSUtils; + +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class MasterProcedureEnv { + private static final Log LOG = LogFactory.getLog(MasterProcedureEnv.class); + + @InterfaceAudience.Private + public static class WALStoreLeaseRecovery implements WALProcedureStore.LeaseRecovery { + private final HMaster master; + + public WALStoreLeaseRecovery(final HMaster master) { + this.master = master; + } + + @Override + public void recoverFileLease(final FileSystem fs, final Path path) throws IOException { + final Configuration conf = master.getConfiguration(); + final FSUtils fsUtils = FSUtils.getInstance(fs, conf); + fsUtils.recoverFileLease(fs, path, conf, new CancelableProgressable() { + @Override + public boolean progress() { + LOG.debug("Recover Procedure Store log lease: " + path); + return master.isActiveMaster(); + } + }); + } + } + + @InterfaceAudience.Private + public static class MasterProcedureStoreListener + implements ProcedureStore.ProcedureStoreListener { + private final HMaster master; + + public MasterProcedureStoreListener(final HMaster master) { + this.master = master; + } + + @Override + public void abortProcess() { + master.abort("The Procedure Store lost the lease"); + } + } + + private final MasterProcedureQueue procQueue; + private final MasterServices master; + + public MasterProcedureEnv(final MasterServices master) { + this.master = master; + this.procQueue = new MasterProcedureQueue(master.getConfiguration(), + master.getTableLockManager()); + } + + public User getRequestUser() throws IOException { + User user = RpcServer.getRequestUser(); + if (user == null) { + user = UserProvider.instantiate(getMasterConfiguration()).getCurrent(); + } + return user; + } + + public MasterServices getMasterServices() { + return master; + } + + public Configuration getMasterConfiguration() { + return master.getConfiguration(); + } + + public MasterCoprocessorHost getMasterCoprocessorHost() { + return master.getMasterCoprocessorHost(); + } + + public MasterProcedureQueue getProcedureQueue() { + return procQueue; + } + + public boolean isRunning() { + return master.getMasterProcedureExecutor().isRunning(); + } + + public boolean isInitialized() { + return master.isInitialized(); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureQueue.java new file mode 100644 index 00000000000..0dd0c3df6fc --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureQueue.java @@ -0,0 +1,448 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master.procedure; + +import java.io.IOException; +import java.util.ArrayDeque; +import java.util.Deque; +import java.util.concurrent.locks.Condition; +import java.util.concurrent.locks.ReentrantLock; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.TableExistsException; +import org.apache.hadoop.hbase.TableNotFoundException; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.procedure2.Procedure; +import org.apache.hadoop.hbase.procedure2.ProcedureFairRunQueues; +import org.apache.hadoop.hbase.procedure2.ProcedureRunnableSet; +import org.apache.hadoop.hbase.master.TableLockManager; +import org.apache.hadoop.hbase.master.TableLockManager.TableLock; +import org.apache.hadoop.hbase.master.procedure.TableProcedureInterface.TableOperationType; + +/** + * ProcedureRunnableSet for the Master Procedures. + * This RunnableSet tries to provide to the ProcedureExecutor procedures + * that can be executed without having to wait on a lock. + * Most of the master operations can be executed concurrently, if the they + * are operating on different tables (e.g. two create table can be performed + * at the same, time assuming table A and table B). + * + * Each procedure should implement an interface providing information for this queue. + * for example table related procedures should implement TableProcedureInterface. + * each procedure will be pushed in its own queue, and based on the operation type + * we may take smarter decision. e.g. we can abort all the operations preceding + * a delete table, or similar. + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class MasterProcedureQueue implements ProcedureRunnableSet { + private static final Log LOG = LogFactory.getLog(MasterProcedureQueue.class); + + private final ProcedureFairRunQueues fairq; + private final ReentrantLock lock = new ReentrantLock(); + private final Condition waitCond = lock.newCondition(); + private final TableLockManager lockManager; + + private final int metaTablePriority; + private final int userTablePriority; + private final int sysTablePriority; + + private int queueSize; + + public MasterProcedureQueue(final Configuration conf, final TableLockManager lockManager) { + this.fairq = new ProcedureFairRunQueues(1); + this.lockManager = lockManager; + + // TODO: should this be part of the HTD? + metaTablePriority = conf.getInt("hbase.master.procedure.queue.meta.table.priority", 3); + sysTablePriority = conf.getInt("hbase.master.procedure.queue.system.table.priority", 2); + userTablePriority = conf.getInt("hbase.master.procedure.queue.user.table.priority", 1); + } + + @Override + public void addFront(final Procedure proc) { + lock.lock(); + try { + getRunQueueOrCreate(proc).addFront(proc); + queueSize++; + waitCond.signal(); + } finally { + lock.unlock(); + } + } + + @Override + public void addBack(final Procedure proc) { + lock.lock(); + try { + getRunQueueOrCreate(proc).addBack(proc); + queueSize++; + waitCond.signal(); + } finally { + lock.unlock(); + } + } + + @Override + public void yield(final Procedure proc) { + addFront(proc); + } + + @Override + @edu.umd.cs.findbugs.annotations.SuppressWarnings("WA_AWAIT_NOT_IN_LOOP") + public Long poll() { + lock.lock(); + try { + if (queueSize == 0) { + waitCond.await(); + if (queueSize == 0) { + return null; + } + } + + RunQueue queue = fairq.poll(); + if (queue != null && queue.isAvailable()) { + queueSize--; + return queue.poll(); + } + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + return null; + } finally { + lock.unlock(); + } + return null; + } + + @Override + public void signalAll() { + lock.lock(); + try { + waitCond.signalAll(); + } finally { + lock.unlock(); + } + } + + @Override + public void clear() { + lock.lock(); + try { + fairq.clear(); + queueSize = 0; + } finally { + lock.unlock(); + } + } + + @Override + public int size() { + lock.lock(); + try { + return queueSize; + } finally { + lock.unlock(); + } + } + + @Override + public String toString() { + lock.lock(); + try { + return "MasterProcedureQueue size=" + queueSize + ": " + fairq; + } finally { + lock.unlock(); + } + } + + @Override + public void completionCleanup(Procedure proc) { + if (proc instanceof TableProcedureInterface) { + TableProcedureInterface iProcTable = (TableProcedureInterface)proc; + boolean tableDeleted; + if (proc.hasException()) { + IOException procEx = proc.getException().unwrapRemoteException(); + if (iProcTable.getTableOperationType() == TableOperationType.CREATE) { + // create failed because the table already exist + tableDeleted = !(procEx instanceof TableExistsException); + } else { + // the operation failed because the table does not exist + tableDeleted = (procEx instanceof TableNotFoundException); + } + } else { + // the table was deleted + tableDeleted = (iProcTable.getTableOperationType() == TableOperationType.DELETE); + } + if (tableDeleted) { + markTableAsDeleted(iProcTable.getTableName()); + } + } + } + + private RunQueue getRunQueueOrCreate(final Procedure proc) { + if (proc instanceof TableProcedureInterface) { + final TableName table = ((TableProcedureInterface)proc).getTableName(); + return getRunQueueOrCreate(table); + } + // TODO: at the moment we only have Table procedures + // if you are implementing a non-table procedure, you have two option create + // a group for all the non-table procedures or try to find a key for your + // non-table procedure and implement something similar to the TableRunQueue. + throw new UnsupportedOperationException("RQs for non-table procedures are not implemented yet"); + } + + private TableRunQueue getRunQueueOrCreate(final TableName table) { + final TableRunQueue queue = getRunQueue(table); + if (queue != null) return queue; + return (TableRunQueue)fairq.add(table, createTableRunQueue(table)); + } + + private TableRunQueue createTableRunQueue(final TableName table) { + int priority = userTablePriority; + if (table.equals(TableName.META_TABLE_NAME)) { + priority = metaTablePriority; + } else if (table.isSystemTable()) { + priority = sysTablePriority; + } + return new TableRunQueue(priority); + } + + private TableRunQueue getRunQueue(final TableName table) { + return (TableRunQueue)fairq.get(table); + } + + /** + * Try to acquire the read lock on the specified table. + * other read operations in the table-queue may be executed concurrently, + * otherwise they have to wait until all the read-locks are released. + * @param table Table to lock + * @param purpose Human readable reason for locking the table + * @return true if we were able to acquire the lock on the table, otherwise false. + */ + public boolean tryAcquireTableRead(final TableName table, final String purpose) { + return getRunQueueOrCreate(table).tryRead(lockManager, table, purpose); + } + + /** + * Release the read lock taken with tryAcquireTableRead() + * @param table the name of the table that has the read lock + */ + public void releaseTableRead(final TableName table) { + getRunQueue(table).releaseRead(lockManager, table); + } + + /** + * Try to acquire the write lock on the specified table. + * other operations in the table-queue will be executed after the lock is released. + * @param table Table to lock + * @param purpose Human readable reason for locking the table + * @return true if we were able to acquire the lock on the table, otherwise false. + */ + public boolean tryAcquireTableWrite(final TableName table, final String purpose) { + return getRunQueueOrCreate(table).tryWrite(lockManager, table, purpose); + } + + /** + * Release the write lock taken with tryAcquireTableWrite() + * @param table the name of the table that has the write lock + */ + public void releaseTableWrite(final TableName table) { + getRunQueue(table).releaseWrite(lockManager, table); + } + + /** + * Tries to remove the queue and the table-lock of the specified table. + * If there are new operations pending (e.g. a new create), + * the remove will not be performed. + * @param table the name of the table that should be marked as deleted + * @return true if deletion succeeded, false otherwise meaning that there are + * other new operations pending for that table (e.g. a new create). + */ + protected boolean markTableAsDeleted(final TableName table) { + TableRunQueue queue = getRunQueue(table); + if (queue != null) { + lock.lock(); + try { + if (queue.isEmpty() && !queue.isLocked()) { + fairq.remove(table); + + // Remove the table lock + try { + lockManager.tableDeleted(table); + } catch (IOException e) { + LOG.warn("Received exception from TableLockManager.tableDeleted:", e); //not critical + } + } else { + // TODO: If there are no create, we can drop all the other ops + return false; + } + } finally { + lock.unlock(); + } + } + return true; + } + + private interface RunQueue extends ProcedureFairRunQueues.FairObject { + void addFront(Procedure proc); + void addBack(Procedure proc); + Long poll(); + boolean isLocked(); + } + + /** + * Run Queue for a Table. It contains a read-write lock that is used by the + * MasterProcedureQueue to decide if we should fetch an item from this queue + * or skip to another one which will be able to run without waiting for locks. + */ + private static class TableRunQueue implements RunQueue { + private final Deque runnables = new ArrayDeque(); + private final int priority; + + private TableLock tableLock = null; + private boolean wlock = false; + private int rlock = 0; + + public TableRunQueue(int priority) { + this.priority = priority; + } + + @Override + public void addFront(final Procedure proc) { + runnables.addFirst(proc.getProcId()); + } + + // TODO: Improve run-queue push with TableProcedureInterface.getType() + // we can take smart decisions based on the type of the operation (e.g. create/delete) + @Override + public void addBack(final Procedure proc) { + runnables.addLast(proc.getProcId()); + } + + @Override + public Long poll() { + return runnables.poll(); + } + + @Override + public boolean isAvailable() { + synchronized (this) { + return !wlock && !runnables.isEmpty(); + } + } + + public boolean isEmpty() { + return runnables.isEmpty(); + } + + @Override + public boolean isLocked() { + synchronized (this) { + return wlock || rlock > 0; + } + } + + public boolean tryRead(final TableLockManager lockManager, + final TableName tableName, final String purpose) { + synchronized (this) { + if (wlock) { + return false; + } + + // Take zk-read-lock + tableLock = lockManager.readLock(tableName, purpose); + try { + tableLock.acquire(); + } catch (IOException e) { + LOG.error("failed acquire read lock on " + tableName, e); + tableLock = null; + return false; + } + + rlock++; + } + return true; + } + + public void releaseRead(final TableLockManager lockManager, + final TableName tableName) { + synchronized (this) { + releaseTableLock(lockManager, rlock == 1); + rlock--; + } + } + + public boolean tryWrite(final TableLockManager lockManager, + final TableName tableName, final String purpose) { + synchronized (this) { + if (wlock || rlock > 0) { + return false; + } + + // Take zk-write-lock + tableLock = lockManager.writeLock(tableName, purpose); + try { + tableLock.acquire(); + } catch (IOException e) { + LOG.error("failed acquire write lock on " + tableName, e); + tableLock = null; + return false; + } + wlock = true; + } + return true; + } + + public void releaseWrite(final TableLockManager lockManager, + final TableName tableName) { + synchronized (this) { + releaseTableLock(lockManager, true); + wlock = false; + } + } + + private void releaseTableLock(final TableLockManager lockManager, boolean reset) { + for (int i = 0; i < 3; ++i) { + try { + tableLock.release(); + if (reset) { + tableLock = null; + } + break; + } catch (IOException e) { + LOG.warn("Could not release the table write-lock", e); + } + } + } + + @Override + public int getPriority() { + return priority; + } + + @Override + public String toString() { + return runnables.toString(); + } + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureUtil.java new file mode 100644 index 00000000000..d7c0b922fad --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureUtil.java @@ -0,0 +1,56 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master.procedure; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation; +import org.apache.hadoop.security.UserGroupInformation; + +@InterfaceAudience.Private +@InterfaceStability.Evolving +public final class MasterProcedureUtil { + private static final Log LOG = LogFactory.getLog(MasterProcedureUtil.class); + + private MasterProcedureUtil() {} + + public static UserInformation toProtoUserInfo(UserGroupInformation ugi) { + UserInformation.Builder userInfoPB = UserInformation.newBuilder(); + userInfoPB.setEffectiveUser(ugi.getUserName()); + if (ugi.getRealUser() != null) { + userInfoPB.setRealUser(ugi.getRealUser().getUserName()); + } + return userInfoPB.build(); + } + + public static UserGroupInformation toUserInfo(UserInformation userInfoProto) { + if (userInfoProto.hasEffectiveUser()) { + String effectiveUser = userInfoProto.getEffectiveUser(); + if (userInfoProto.hasRealUser()) { + String realUser = userInfoProto.getRealUser(); + UserGroupInformation realUserUgi = UserGroupInformation.createRemoteUser(realUser); + return UserGroupInformation.createProxyUser(effectiveUser, realUserUgi); + } + return UserGroupInformation.createRemoteUser(effectiveUser); + } + return null; + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyColumnFamilyProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyColumnFamilyProcedure.java new file mode 100644 index 00000000000..5aa04db81e4 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyColumnFamilyProcedure.java @@ -0,0 +1,382 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master.procedure; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.security.PrivilegedExceptionAction; +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.InvalidFamilyOperationException; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.TableState; +import org.apache.hadoop.hbase.executor.EventType; +import org.apache.hadoop.hbase.master.MasterCoprocessorHost; +import org.apache.hadoop.hbase.procedure2.StateMachineProcedure; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos; +import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyState; +import org.apache.hadoop.security.UserGroupInformation; + +/** + * The procedure to modify a column family from an existing table. + */ +@InterfaceAudience.Private +public class ModifyColumnFamilyProcedure + extends StateMachineProcedure + implements TableProcedureInterface { + private static final Log LOG = LogFactory.getLog(ModifyColumnFamilyProcedure.class); + + private final AtomicBoolean aborted = new AtomicBoolean(false); + + private TableName tableName; + private HTableDescriptor unmodifiedHTableDescriptor; + private HColumnDescriptor cfDescriptor; + private UserGroupInformation user; + + private Boolean traceEnabled; + + public ModifyColumnFamilyProcedure() { + this.unmodifiedHTableDescriptor = null; + this.traceEnabled = null; + } + + public ModifyColumnFamilyProcedure( + final MasterProcedureEnv env, + final TableName tableName, + final HColumnDescriptor cfDescriptor) throws IOException { + this.tableName = tableName; + this.cfDescriptor = cfDescriptor; + this.user = env.getRequestUser().getUGI(); + this.unmodifiedHTableDescriptor = null; + this.traceEnabled = null; + } + + @Override + protected Flow executeFromState(final MasterProcedureEnv env, + final ModifyColumnFamilyState state) { + if (isTraceEnabled()) { + LOG.trace(this + " execute state=" + state); + } + + try { + switch (state) { + case MODIFY_COLUMN_FAMILY_PREPARE: + prepareModify(env); + setNextState(ModifyColumnFamilyState.MODIFY_COLUMN_FAMILY_PRE_OPERATION); + break; + case MODIFY_COLUMN_FAMILY_PRE_OPERATION: + preModify(env, state); + setNextState(ModifyColumnFamilyState.MODIFY_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR); + break; + case MODIFY_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR: + updateTableDescriptor(env); + setNextState(ModifyColumnFamilyState.MODIFY_COLUMN_FAMILY_POST_OPERATION); + break; + case MODIFY_COLUMN_FAMILY_POST_OPERATION: + postModify(env, state); + setNextState(ModifyColumnFamilyState.MODIFY_COLUMN_FAMILY_REOPEN_ALL_REGIONS); + break; + case MODIFY_COLUMN_FAMILY_REOPEN_ALL_REGIONS: + reOpenAllRegionsIfTableIsOnline(env); + return Flow.NO_MORE_STATE; + default: + throw new UnsupportedOperationException(this + " unhandled state=" + state); + } + } catch (InterruptedException|IOException e) { + LOG.warn("Error trying to modify the column family " + getColumnFamilyName() + + " of the table " + tableName + "(in state=" + state + ")", e); + + setFailure("master-modify-columnfamily", e); + } + return Flow.HAS_MORE_STATE; + } + + @Override + protected void rollbackState(final MasterProcedureEnv env, final ModifyColumnFamilyState state) + throws IOException { + if (isTraceEnabled()) { + LOG.trace(this + " rollback state=" + state); + } + try { + switch (state) { + case MODIFY_COLUMN_FAMILY_REOPEN_ALL_REGIONS: + break; // Nothing to undo. + case MODIFY_COLUMN_FAMILY_POST_OPERATION: + // TODO-MAYBE: call the coprocessor event to undo? + break; + case MODIFY_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPTOR: + restoreTableDescriptor(env); + break; + case MODIFY_COLUMN_FAMILY_PRE_OPERATION: + // TODO-MAYBE: call the coprocessor event to undo? + break; + case MODIFY_COLUMN_FAMILY_PREPARE: + break; // nothing to do + default: + throw new UnsupportedOperationException(this + " unhandled state=" + state); + } + } catch (IOException e) { + // This will be retried. Unless there is a bug in the code, + // this should be just a "temporary error" (e.g. network down) + LOG.warn("Failed rollback attempt step " + state + " for adding the column family" + + getColumnFamilyName() + " to the table " + tableName, e); + throw e; + } + } + + @Override + protected ModifyColumnFamilyState getState(final int stateId) { + return ModifyColumnFamilyState.valueOf(stateId); + } + + @Override + protected int getStateId(final ModifyColumnFamilyState state) { + return state.getNumber(); + } + + @Override + protected ModifyColumnFamilyState getInitialState() { + return ModifyColumnFamilyState.MODIFY_COLUMN_FAMILY_PREPARE; + } + + @Override + protected void setNextState(ModifyColumnFamilyState state) { + if (aborted.get()) { + setAbortFailure("modify-columnfamily", "abort requested"); + } else { + super.setNextState(state); + } + } + + @Override + public boolean abort(final MasterProcedureEnv env) { + aborted.set(true); + return true; + } + + @Override + protected boolean acquireLock(final MasterProcedureEnv env) { + if (!env.isInitialized()) return false; + return env.getProcedureQueue().tryAcquireTableWrite( + tableName, + EventType.C_M_MODIFY_FAMILY.toString()); + } + + @Override + protected void releaseLock(final MasterProcedureEnv env) { + env.getProcedureQueue().releaseTableWrite(tableName); + } + + @Override + public void serializeStateData(final OutputStream stream) throws IOException { + super.serializeStateData(stream); + + MasterProcedureProtos.ModifyColumnFamilyStateData.Builder modifyCFMsg = + MasterProcedureProtos.ModifyColumnFamilyStateData.newBuilder() + .setUserInfo(MasterProcedureUtil.toProtoUserInfo(user)) + .setTableName(ProtobufUtil.toProtoTableName(tableName)) + .setColumnfamilySchema(cfDescriptor.convert()); + if (unmodifiedHTableDescriptor != null) { + modifyCFMsg.setUnmodifiedTableSchema(unmodifiedHTableDescriptor.convert()); + } + + modifyCFMsg.build().writeDelimitedTo(stream); + } + + @Override + public void deserializeStateData(final InputStream stream) throws IOException { + super.deserializeStateData(stream); + + MasterProcedureProtos.ModifyColumnFamilyStateData modifyCFMsg = + MasterProcedureProtos.ModifyColumnFamilyStateData.parseDelimitedFrom(stream); + user = MasterProcedureUtil.toUserInfo(modifyCFMsg.getUserInfo()); + tableName = ProtobufUtil.toTableName(modifyCFMsg.getTableName()); + cfDescriptor = HColumnDescriptor.convert(modifyCFMsg.getColumnfamilySchema()); + if (modifyCFMsg.hasUnmodifiedTableSchema()) { + unmodifiedHTableDescriptor = HTableDescriptor.convert(modifyCFMsg.getUnmodifiedTableSchema()); + } + } + + @Override + public void toStringClassDetails(StringBuilder sb) { + sb.append(getClass().getSimpleName()); + sb.append(" (table="); + sb.append(tableName); + sb.append(", columnfamily="); + if (cfDescriptor != null) { + sb.append(getColumnFamilyName()); + } else { + sb.append("Unknown"); + } + sb.append(") user="); + sb.append(user); + } + + @Override + public TableName getTableName() { + return tableName; + } + + @Override + public TableOperationType getTableOperationType() { + return TableOperationType.EDIT; + } + + /** + * Action before any real action of modifying column family. + * @param env MasterProcedureEnv + * @throws IOException + */ + private void prepareModify(final MasterProcedureEnv env) throws IOException { + // Checks whether the table is allowed to be modified. + MasterDDLOperationHelper.checkTableModifiable(env, tableName); + + unmodifiedHTableDescriptor = env.getMasterServices().getTableDescriptors().get(tableName); + if (unmodifiedHTableDescriptor == null) { + throw new IOException("HTableDescriptor missing for " + tableName); + } + if (!unmodifiedHTableDescriptor.hasFamily(cfDescriptor.getName())) { + throw new InvalidFamilyOperationException("Family '" + getColumnFamilyName() + + "' does not exist, so it cannot be modified"); + } + } + + /** + * Action before modifying column family. + * @param env MasterProcedureEnv + * @param state the procedure state + * @throws IOException + * @throws InterruptedException + */ + private void preModify(final MasterProcedureEnv env, final ModifyColumnFamilyState state) + throws IOException, InterruptedException { + runCoprocessorAction(env, state); + } + + /** + * Modify the column family from the file system + */ + private void updateTableDescriptor(final MasterProcedureEnv env) throws IOException { + // Update table descriptor + LOG.info("ModifyColumnFamily. Table = " + tableName + " HCD = " + cfDescriptor.toString()); + + HTableDescriptor htd = env.getMasterServices().getTableDescriptors().get(tableName); + htd.modifyFamily(cfDescriptor); + env.getMasterServices().getTableDescriptors().add(htd); + } + + /** + * Restore back to the old descriptor + * @param env MasterProcedureEnv + * @throws IOException + **/ + private void restoreTableDescriptor(final MasterProcedureEnv env) throws IOException { + env.getMasterServices().getTableDescriptors().add(unmodifiedHTableDescriptor); + + // Make sure regions are opened after table descriptor is updated. + reOpenAllRegionsIfTableIsOnline(env); + } + + /** + * Action after modifying column family. + * @param env MasterProcedureEnv + * @param state the procedure state + * @throws IOException + * @throws InterruptedException + */ + private void postModify(final MasterProcedureEnv env, final ModifyColumnFamilyState state) + throws IOException, InterruptedException { + runCoprocessorAction(env, state); + } + + /** + * Last action from the procedure - executed when online schema change is supported. + * @param env MasterProcedureEnv + * @throws IOException + */ + private void reOpenAllRegionsIfTableIsOnline(final MasterProcedureEnv env) throws IOException { + // This operation only run when the table is enabled. + if (!env.getMasterServices().getAssignmentManager().getTableStateManager() + .isTableState(getTableName(), TableState.State.ENABLED)) { + return; + } + + List regionInfoList = ProcedureSyncWait.getRegionsFromMeta(env, getTableName()); + if (MasterDDLOperationHelper.reOpenAllRegions(env, getTableName(), regionInfoList)) { + LOG.info("Completed add column family operation on table " + getTableName()); + } else { + LOG.warn("Error on reopening the regions on table " + getTableName()); + } + } + + /** + * The procedure could be restarted from a different machine. If the variable is null, we need to + * retrieve it. + * @return traceEnabled + */ + private Boolean isTraceEnabled() { + if (traceEnabled == null) { + traceEnabled = LOG.isTraceEnabled(); + } + return traceEnabled; + } + + private String getColumnFamilyName() { + return cfDescriptor.getNameAsString(); + } + + /** + * Coprocessor Action. + * @param env MasterProcedureEnv + * @param state the procedure state + * @throws IOException + * @throws InterruptedException + */ + private void runCoprocessorAction(final MasterProcedureEnv env, + final ModifyColumnFamilyState state) throws IOException, InterruptedException { + final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost(); + if (cpHost != null) { + user.doAs(new PrivilegedExceptionAction() { + @Override + public Void run() throws Exception { + switch (state) { + case MODIFY_COLUMN_FAMILY_PRE_OPERATION: + cpHost.preModifyColumnHandler(tableName, cfDescriptor); + break; + case MODIFY_COLUMN_FAMILY_POST_OPERATION: + cpHost.postModifyColumnHandler(tableName, cfDescriptor); + break; + default: + throw new UnsupportedOperationException(this + " unhandled state=" + state); + } + return null; + } + }); + } + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java new file mode 100644 index 00000000000..a082ea48e7f --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java @@ -0,0 +1,510 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master.procedure; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.security.PrivilegedExceptionAction; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.TableNotDisabledException; +import org.apache.hadoop.hbase.TableNotFoundException; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.ResultScanner; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableState; +import org.apache.hadoop.hbase.executor.EventType; +import org.apache.hadoop.hbase.master.MasterCoprocessorHost; +import org.apache.hadoop.hbase.procedure2.StateMachineProcedure; +import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos; +import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableState; +import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil; +import org.apache.hadoop.security.UserGroupInformation; + +@InterfaceAudience.Private +public class ModifyTableProcedure + extends StateMachineProcedure + implements TableProcedureInterface { + private static final Log LOG = LogFactory.getLog(ModifyTableProcedure.class); + + private final AtomicBoolean aborted = new AtomicBoolean(false); + + private HTableDescriptor unmodifiedHTableDescriptor = null; + private HTableDescriptor modifiedHTableDescriptor; + private UserGroupInformation user; + private boolean deleteColumnFamilyInModify; + + private List regionInfoList; + private Boolean traceEnabled = null; + + public ModifyTableProcedure() { + initilize(); + } + + public ModifyTableProcedure( + final MasterProcedureEnv env, + final HTableDescriptor htd) throws IOException { + initilize(); + this.modifiedHTableDescriptor = htd; + this.user = env.getRequestUser().getUGI(); + } + + private void initilize() { + this.unmodifiedHTableDescriptor = null; + this.regionInfoList = null; + this.traceEnabled = null; + this.deleteColumnFamilyInModify = false; + } + + @Override + protected Flow executeFromState(final MasterProcedureEnv env, final ModifyTableState state) { + if (isTraceEnabled()) { + LOG.trace(this + " execute state=" + state); + } + + try { + switch (state) { + case MODIFY_TABLE_PREPARE: + prepareModify(env); + setNextState(ModifyTableState.MODIFY_TABLE_PRE_OPERATION); + break; + case MODIFY_TABLE_PRE_OPERATION: + preModify(env, state); + setNextState(ModifyTableState.MODIFY_TABLE_UPDATE_TABLE_DESCRIPTOR); + break; + case MODIFY_TABLE_UPDATE_TABLE_DESCRIPTOR: + updateTableDescriptor(env); + setNextState(ModifyTableState.MODIFY_TABLE_REMOVE_REPLICA_COLUMN); + break; + case MODIFY_TABLE_REMOVE_REPLICA_COLUMN: + updateReplicaColumnsIfNeeded(env, unmodifiedHTableDescriptor, modifiedHTableDescriptor); + if (deleteColumnFamilyInModify) { + setNextState(ModifyTableState.MODIFY_TABLE_DELETE_FS_LAYOUT); + } else { + setNextState(ModifyTableState.MODIFY_TABLE_POST_OPERATION); + } + break; + case MODIFY_TABLE_DELETE_FS_LAYOUT: + deleteFromFs(env, unmodifiedHTableDescriptor, modifiedHTableDescriptor); + setNextState(ModifyTableState.MODIFY_TABLE_POST_OPERATION); + break; + case MODIFY_TABLE_POST_OPERATION: + postModify(env, state); + setNextState(ModifyTableState.MODIFY_TABLE_REOPEN_ALL_REGIONS); + break; + case MODIFY_TABLE_REOPEN_ALL_REGIONS: + reOpenAllRegionsIfTableIsOnline(env); + return Flow.NO_MORE_STATE; + default: + throw new UnsupportedOperationException("unhandled state=" + state); + } + } catch (InterruptedException|IOException e) { + if (!isRollbackSupported(state)) { + // We reach a state that cannot be rolled back. We just need to keep retry. + LOG.warn("Error trying to modify table=" + getTableName() + " state=" + state, e); + } else { + LOG.error("Error trying to modify table=" + getTableName() + " state=" + state, e); + setFailure("master-modify-table", e); + } + } + return Flow.HAS_MORE_STATE; + } + + @Override + protected void rollbackState(final MasterProcedureEnv env, final ModifyTableState state) + throws IOException { + if (isTraceEnabled()) { + LOG.trace(this + " rollback state=" + state); + } + try { + switch (state) { + case MODIFY_TABLE_REOPEN_ALL_REGIONS: + break; // Nothing to undo. + case MODIFY_TABLE_POST_OPERATION: + // TODO-MAYBE: call the coprocessor event to un-modify? + break; + case MODIFY_TABLE_DELETE_FS_LAYOUT: + // Once we reach to this state - we could NOT rollback - as it is tricky to undelete + // the deleted files. We are not suppose to reach here, throw exception so that we know + // there is a code bug to investigate. + assert deleteColumnFamilyInModify; + throw new UnsupportedOperationException(this + " rollback of state=" + state + + " is unsupported."); + case MODIFY_TABLE_REMOVE_REPLICA_COLUMN: + // Undo the replica column update. + updateReplicaColumnsIfNeeded(env, modifiedHTableDescriptor, unmodifiedHTableDescriptor); + break; + case MODIFY_TABLE_UPDATE_TABLE_DESCRIPTOR: + restoreTableDescriptor(env); + break; + case MODIFY_TABLE_PRE_OPERATION: + // TODO-MAYBE: call the coprocessor event to un-modify? + break; + case MODIFY_TABLE_PREPARE: + break; // Nothing to undo. + default: + throw new UnsupportedOperationException("unhandled state=" + state); + } + } catch (IOException e) { + LOG.warn("Fail trying to rollback modify table=" + getTableName() + " state=" + state, e); + throw e; + } + } + + @Override + protected ModifyTableState getState(final int stateId) { + return ModifyTableState.valueOf(stateId); + } + + @Override + protected int getStateId(final ModifyTableState state) { + return state.getNumber(); + } + + @Override + protected ModifyTableState getInitialState() { + return ModifyTableState.MODIFY_TABLE_PREPARE; + } + + @Override + protected void setNextState(final ModifyTableState state) { + if (aborted.get() && isRollbackSupported(state)) { + setAbortFailure("modify-table", "abort requested"); + } else { + super.setNextState(state); + } + } + + @Override + public boolean abort(final MasterProcedureEnv env) { + aborted.set(true); + return true; + } + + @Override + protected boolean acquireLock(final MasterProcedureEnv env) { + if (!env.isInitialized()) return false; + return env.getProcedureQueue().tryAcquireTableWrite( + getTableName(), + EventType.C_M_MODIFY_TABLE.toString()); + } + + @Override + protected void releaseLock(final MasterProcedureEnv env) { + env.getProcedureQueue().releaseTableWrite(getTableName()); + } + + @Override + public void serializeStateData(final OutputStream stream) throws IOException { + super.serializeStateData(stream); + + MasterProcedureProtos.ModifyTableStateData.Builder modifyTableMsg = + MasterProcedureProtos.ModifyTableStateData.newBuilder() + .setUserInfo(MasterProcedureUtil.toProtoUserInfo(user)) + .setModifiedTableSchema(modifiedHTableDescriptor.convert()) + .setDeleteColumnFamilyInModify(deleteColumnFamilyInModify); + + if (unmodifiedHTableDescriptor != null) { + modifyTableMsg.setUnmodifiedTableSchema(unmodifiedHTableDescriptor.convert()); + } + + modifyTableMsg.build().writeDelimitedTo(stream); + } + + @Override + public void deserializeStateData(final InputStream stream) throws IOException { + super.deserializeStateData(stream); + + MasterProcedureProtos.ModifyTableStateData modifyTableMsg = + MasterProcedureProtos.ModifyTableStateData.parseDelimitedFrom(stream); + user = MasterProcedureUtil.toUserInfo(modifyTableMsg.getUserInfo()); + modifiedHTableDescriptor = HTableDescriptor.convert(modifyTableMsg.getModifiedTableSchema()); + deleteColumnFamilyInModify = modifyTableMsg.getDeleteColumnFamilyInModify(); + + if (modifyTableMsg.hasUnmodifiedTableSchema()) { + unmodifiedHTableDescriptor = + HTableDescriptor.convert(modifyTableMsg.getUnmodifiedTableSchema()); + } + } + + @Override + public void toStringClassDetails(StringBuilder sb) { + sb.append(getClass().getSimpleName()); + sb.append(" (table="); + sb.append(getTableName()); + sb.append(") user="); + sb.append(user); + } + + @Override + public TableName getTableName() { + return modifiedHTableDescriptor.getTableName(); + } + + @Override + public TableOperationType getTableOperationType() { + return TableOperationType.EDIT; + } + + /** + * Check conditions before any real action of modifying a table. + * @param env MasterProcedureEnv + * @throws IOException + */ + private void prepareModify(final MasterProcedureEnv env) throws IOException { + // Checks whether the table exists + if (!MetaTableAccessor.tableExists(env.getMasterServices().getConnection(), getTableName())) { + throw new TableNotFoundException(getTableName()); + } + + // In order to update the descriptor, we need to retrieve the old descriptor for comparison. + this.unmodifiedHTableDescriptor = + env.getMasterServices().getTableDescriptors().get(getTableName()); + + if (env.getMasterServices().getAssignmentManager().getTableStateManager() + .isTableState(getTableName(), TableState.State.ENABLED)) { + // We only execute this procedure with table online if online schema change config is set. + if (!MasterDDLOperationHelper.isOnlineSchemaChangeAllowed(env)) { + throw new TableNotDisabledException(getTableName()); + } + + if (modifiedHTableDescriptor.getRegionReplication() != unmodifiedHTableDescriptor + .getRegionReplication()) { + throw new IOException("REGION_REPLICATION change is not supported for enabled tables"); + } + } + + // Find out whether all column families in unmodifiedHTableDescriptor also exists in + // the modifiedHTableDescriptor. This is to determine whether we are safe to rollback. + final Set oldFamilies = unmodifiedHTableDescriptor.getFamiliesKeys(); + final Set newFamilies = modifiedHTableDescriptor.getFamiliesKeys(); + for (byte[] familyName : oldFamilies) { + if (!newFamilies.contains(familyName)) { + this.deleteColumnFamilyInModify = true; + break; + } + } + } + + /** + * Action before modifying table. + * @param env MasterProcedureEnv + * @param state the procedure state + * @throws IOException + * @throws InterruptedException + */ + private void preModify(final MasterProcedureEnv env, final ModifyTableState state) + throws IOException, InterruptedException { + runCoprocessorAction(env, state); + } + + /** + * Update descriptor + * @param env MasterProcedureEnv + * @throws IOException + **/ + private void updateTableDescriptor(final MasterProcedureEnv env) throws IOException { + env.getMasterServices().getTableDescriptors().add(modifiedHTableDescriptor); + } + + /** + * Undo the descriptor change (for rollback) + * @param env MasterProcedureEnv + * @throws IOException + **/ + private void restoreTableDescriptor(final MasterProcedureEnv env) throws IOException { + env.getMasterServices().getTableDescriptors().add(unmodifiedHTableDescriptor); + + // delete any new column families from the modifiedHTableDescriptor. + deleteFromFs(env, modifiedHTableDescriptor, unmodifiedHTableDescriptor); + + // Make sure regions are opened after table descriptor is updated. + reOpenAllRegionsIfTableIsOnline(env); + } + + /** + * Removes from hdfs the families that are not longer present in the new table descriptor. + * @param env MasterProcedureEnv + * @throws IOException + */ + private void deleteFromFs(final MasterProcedureEnv env, + final HTableDescriptor oldHTableDescriptor, final HTableDescriptor newHTableDescriptor) + throws IOException { + final Set oldFamilies = oldHTableDescriptor.getFamiliesKeys(); + final Set newFamilies = newHTableDescriptor.getFamiliesKeys(); + for (byte[] familyName : oldFamilies) { + if (!newFamilies.contains(familyName)) { + MasterDDLOperationHelper.deleteColumnFamilyFromFileSystem( + env, + getTableName(), + getRegionInfoList(env), + familyName); + } + } + } + + /** + * update replica column families if necessary. + * @param env MasterProcedureEnv + * @throws IOException + */ + private void updateReplicaColumnsIfNeeded( + final MasterProcedureEnv env, + final HTableDescriptor oldHTableDescriptor, + final HTableDescriptor newHTableDescriptor) throws IOException { + final int oldReplicaCount = oldHTableDescriptor.getRegionReplication(); + final int newReplicaCount = newHTableDescriptor.getRegionReplication(); + + if (newReplicaCount < oldReplicaCount) { + Set tableRows = new HashSet(); + Connection connection = env.getMasterServices().getConnection(); + Scan scan = MetaTableAccessor.getScanForTableName(connection, getTableName()); + scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); + + try (Table metaTable = connection.getTable(TableName.META_TABLE_NAME)) { + ResultScanner resScanner = metaTable.getScanner(scan); + for (Result result : resScanner) { + tableRows.add(result.getRow()); + } + MetaTableAccessor.removeRegionReplicasFromMeta( + tableRows, + newReplicaCount, + oldReplicaCount - newReplicaCount, + connection); + } + } + + // Setup replication for region replicas if needed + if (newReplicaCount > 1 && oldReplicaCount <= 1) { + ServerRegionReplicaUtil.setupRegionReplicaReplication(env.getMasterConfiguration()); + } + } + + /** + * Action after modifying table. + * @param env MasterProcedureEnv + * @param state the procedure state + * @throws IOException + * @throws InterruptedException + */ + private void postModify(final MasterProcedureEnv env, final ModifyTableState state) + throws IOException, InterruptedException { + runCoprocessorAction(env, state); + } + + /** + * Last action from the procedure - executed when online schema change is supported. + * @param env MasterProcedureEnv + * @throws IOException + */ + private void reOpenAllRegionsIfTableIsOnline(final MasterProcedureEnv env) throws IOException { + // This operation only run when the table is enabled. + if (!env.getMasterServices().getAssignmentManager().getTableStateManager() + .isTableState(getTableName(), TableState.State.ENABLED)) { + return; + } + + if (MasterDDLOperationHelper.reOpenAllRegions(env, getTableName(), getRegionInfoList(env))) { + LOG.info("Completed modify table operation on table " + getTableName()); + } else { + LOG.warn("Error on reopening the regions on table " + getTableName()); + } + } + + /** + * The procedure could be restarted from a different machine. If the variable is null, we need to + * retrieve it. + * @return traceEnabled whether the trace is enabled + */ + private Boolean isTraceEnabled() { + if (traceEnabled == null) { + traceEnabled = LOG.isTraceEnabled(); + } + return traceEnabled; + } + + /** + * Coprocessor Action. + * @param env MasterProcedureEnv + * @param state the procedure state + * @throws IOException + * @throws InterruptedException + */ + private void runCoprocessorAction(final MasterProcedureEnv env, final ModifyTableState state) + throws IOException, InterruptedException { + final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost(); + if (cpHost != null) { + user.doAs(new PrivilegedExceptionAction() { + @Override + public Void run() throws Exception { + switch (state) { + case MODIFY_TABLE_PRE_OPERATION: + cpHost.preModifyTableHandler(getTableName(), modifiedHTableDescriptor); + break; + case MODIFY_TABLE_POST_OPERATION: + cpHost.postModifyTableHandler(getTableName(), modifiedHTableDescriptor); + break; + default: + throw new UnsupportedOperationException(this + " unhandled state=" + state); + } + return null; + } + }); + } + } + + /* + * Check whether we are in the state that can be rollback + */ + private boolean isRollbackSupported(final ModifyTableState state) { + if (deleteColumnFamilyInModify) { + switch (state) { + case MODIFY_TABLE_DELETE_FS_LAYOUT: + case MODIFY_TABLE_POST_OPERATION: + case MODIFY_TABLE_REOPEN_ALL_REGIONS: + // It is not safe to rollback if we reach to these states. + return false; + default: + break; + } + } + return true; + } + + private List getRegionInfoList(final MasterProcedureEnv env) throws IOException { + if (regionInfoList == null) { + regionInfoList = ProcedureSyncWait.getRegionsFromMeta(env, getTableName()); + } + return regionInfoList; + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ProcedurePrepareLatch.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ProcedurePrepareLatch.java new file mode 100644 index 00000000000..2a1abca134c --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ProcedurePrepareLatch.java @@ -0,0 +1,105 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master.procedure; + +import java.io.IOException; +import java.io.InterruptedIOException; +import java.util.concurrent.CountDownLatch; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.ipc.RpcServer; +import org.apache.hadoop.hbase.ipc.RpcCallContext; +import org.apache.hadoop.hbase.procedure2.Procedure; +import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo; + +/** + * Latch used by the Master to have the prepare() sync behaviour for old + * clients, that can only get exceptions in a synchronous way. + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public abstract class ProcedurePrepareLatch { + private static final NoopLatch noopLatch = new NoopLatch(); + + public static ProcedurePrepareLatch createLatch() { + // don't use the latch if we have procedure support + return hasProcedureSupport() ? noopLatch : new CompatibilityLatch(); + } + + public static boolean hasProcedureSupport() { + return currentClientHasMinimumVersion(1, 1); + } + + private static boolean currentClientHasMinimumVersion(int major, int minor) { + RpcCallContext call = RpcServer.getCurrentCall(); + VersionInfo versionInfo = call != null ? call.getClientVersionInfo() : null; + if (versionInfo != null) { + String[] components = versionInfo.getVersion().split("\\."); + + int clientMajor = components.length > 0 ? Integer.parseInt(components[0]) : 0; + if (clientMajor != major) { + return clientMajor > major; + } + + int clientMinor = components.length > 1 ? Integer.parseInt(components[1]) : 0; + return clientMinor >= minor; + } + return false; + } + + protected abstract void countDown(final Procedure proc); + public abstract void await() throws IOException; + + protected static void releaseLatch(final ProcedurePrepareLatch latch, final Procedure proc) { + if (latch != null) { + latch.countDown(proc); + } + } + + private static class NoopLatch extends ProcedurePrepareLatch { + protected void countDown(final Procedure proc) {} + public void await() throws IOException {} + } + + protected static class CompatibilityLatch extends ProcedurePrepareLatch { + private final CountDownLatch latch = new CountDownLatch(1); + + private IOException exception = null; + + protected void countDown(final Procedure proc) { + if (proc.hasException()) { + exception = proc.getException().unwrapRemoteException(); + } + latch.countDown(); + } + + public void await() throws IOException { + try { + latch.await(); + } catch (InterruptedException e) { + throw (InterruptedIOException)new InterruptedIOException().initCause(e); + } + + if (exception != null) { + throw exception; + } + } + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ProcedureSyncWait.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ProcedureSyncWait.java new file mode 100644 index 00000000000..903dbd3591e --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ProcedureSyncWait.java @@ -0,0 +1,179 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master.procedure; + +import java.io.IOException; +import java.io.InterruptedIOException; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.CoordinatedStateException; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.exceptions.TimeoutIOException; +import org.apache.hadoop.hbase.master.AssignmentManager; +import org.apache.hadoop.hbase.master.RegionStates; +import org.apache.hadoop.hbase.master.RegionState.State; +import org.apache.hadoop.hbase.master.ServerManager; +import org.apache.hadoop.hbase.procedure2.Procedure; +import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; +import org.apache.hadoop.hbase.procedure2.ProcedureResult; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.util.Threads; +import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; + +/** + * Helper to synchronously wait on conditions. + * This will be removed in the future (mainly when the AssignmentManager will be + * replaced with a Procedure version) by using ProcedureYieldException, + * and the queue will handle waiting and scheduling based on events. + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public final class ProcedureSyncWait { + private static final Log LOG = LogFactory.getLog(ProcedureSyncWait.class); + + private ProcedureSyncWait() {} + + @InterfaceAudience.Private + public interface Predicate { + T evaluate() throws IOException; + } + + public static byte[] submitAndWaitProcedure(ProcedureExecutor procExec, + final Procedure proc) throws IOException { + long procId = procExec.submitProcedure(proc); + return waitForProcedureToComplete(procExec, procId); + } + + public static byte[] waitForProcedureToComplete(ProcedureExecutor procExec, + final long procId) throws IOException { + while (!procExec.isFinished(procId) && procExec.isRunning()) { + // TODO: add a config to make it tunable + // Dev Consideration: are we waiting forever, or we can set up some timeout value? + Threads.sleepWithoutInterrupt(250); + } + ProcedureResult result = procExec.getResult(procId); + if (result != null) { + if (result.isFailed()) { + // If the procedure fails, we should always have an exception captured. Throw it. + throw result.getException().unwrapRemoteException(); + } + return result.getResult(); + } else { + if (procExec.isRunning()) { + throw new IOException("Procedure " + procId + "not found"); + } else { + throw new IOException("The Master is Aborting"); + } + } + } + + public static T waitFor(MasterProcedureEnv env, String purpose, Predicate predicate) + throws IOException { + final Configuration conf = env.getMasterConfiguration(); + final long waitTime = conf.getLong("hbase.master.wait.on.region", 5 * 60 * 1000); + final long waitingTimeForEvents = conf.getInt("hbase.master.event.waiting.time", 1000); + return waitFor(env, waitTime, waitingTimeForEvents, purpose, predicate); + } + + public static T waitFor(MasterProcedureEnv env, long waitTime, long waitingTimeForEvents, + String purpose, Predicate predicate) throws IOException { + final long done = EnvironmentEdgeManager.currentTime() + waitTime; + do { + T result = predicate.evaluate(); + if (result != null && !result.equals(Boolean.FALSE)) { + return result; + } + try { + Thread.sleep(waitingTimeForEvents); + } catch (InterruptedException e) { + LOG.warn("Interrupted while sleeping, waiting on " + purpose); + throw (InterruptedIOException)new InterruptedIOException().initCause(e); + } + LOG.debug("Waiting on " + purpose); + } while (EnvironmentEdgeManager.currentTime() < done && env.isRunning()); + + throw new TimeoutIOException("Timed out while waiting on " + purpose); + } + + protected static void waitMetaRegions(final MasterProcedureEnv env) throws IOException { + int timeout = env.getMasterConfiguration().getInt("hbase.client.catalog.timeout", 10000); + try { + if (env.getMasterServices().getMetaTableLocator().waitMetaRegionLocation( + env.getMasterServices().getZooKeeper(), timeout) == null) { + throw new NotAllMetaRegionsOnlineException(); + } + } catch (InterruptedException e) { + throw (InterruptedIOException)new InterruptedIOException().initCause(e); + } + } + + protected static void waitRegionServers(final MasterProcedureEnv env) throws IOException { + final ServerManager sm = env.getMasterServices().getServerManager(); + ProcedureSyncWait.waitFor(env, "server to assign region(s)", + new ProcedureSyncWait.Predicate() { + @Override + public Boolean evaluate() throws IOException { + List servers = sm.createDestinationServersList(); + return servers != null && !servers.isEmpty(); + } + }); + } + + protected static List getRegionsFromMeta(final MasterProcedureEnv env, + final TableName tableName) throws IOException { + return ProcedureSyncWait.waitFor(env, "regions of table=" + tableName + " from meta", + new ProcedureSyncWait.Predicate>() { + @Override + public List evaluate() throws IOException { + if (TableName.META_TABLE_NAME.equals(tableName)) { + return new MetaTableLocator().getMetaRegions(env.getMasterServices().getZooKeeper()); + } + return MetaTableAccessor.getTableRegions(env.getMasterServices().getConnection(),tableName); + } + }); + } + + protected static void waitRegionInTransition(final MasterProcedureEnv env, + final List regions) throws IOException, CoordinatedStateException { + final AssignmentManager am = env.getMasterServices().getAssignmentManager(); + final RegionStates states = am.getRegionStates(); + for (final HRegionInfo region : regions) { + ProcedureSyncWait.waitFor(env, "regions " + region.getRegionNameAsString() + " in transition", + new ProcedureSyncWait.Predicate() { + @Override + public Boolean evaluate() throws IOException { + if (states.isRegionInState(region, State.FAILED_OPEN)) { + am.regionOffline(region); + } + return !states.isRegionInTransition(region); + } + }); + } + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java new file mode 100644 index 00000000000..6928d026d41 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java @@ -0,0 +1,48 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master.procedure; + +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + +/** + * Procedures that operates on a specific Table (e.g. create, delete, snapshot, ...) + * must implement this interface to allow the system handle the lock/concurrency problems. + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public interface TableProcedureInterface { + public enum TableOperationType { + CREATE, DELETE, DISABLE, EDIT, ENABLE, READ, + }; + + /** + * @return the name of the table the procedure is operating on + */ + TableName getTableName(); + + /** + * Given an operation type we can take decisions about what to do with pending operations. + * e.g. if we get a delete and we have some table operation pending (e.g. add column) + * we can abort those operations. + * @return the operation type that the procedure is executing. + */ + TableOperationType getTableOperationType(); +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java new file mode 100644 index 00000000000..5ef0a19905b --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java @@ -0,0 +1,291 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master.procedure; + +import java.io.InputStream; +import java.io.IOException; +import java.io.OutputStream; +import java.security.PrivilegedExceptionAction; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.TableNotDisabledException; +import org.apache.hadoop.hbase.TableNotFoundException; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.exceptions.HBaseException; +import org.apache.hadoop.hbase.master.MasterCoprocessorHost; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; +import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos; +import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableState; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.procedure2.StateMachineProcedure; +import org.apache.hadoop.hbase.util.ModifyRegionUtils; +import org.apache.hadoop.security.UserGroupInformation; + +@InterfaceAudience.Private +public class TruncateTableProcedure + extends StateMachineProcedure + implements TableProcedureInterface { + private static final Log LOG = LogFactory.getLog(TruncateTableProcedure.class); + + private boolean preserveSplits; + private List regions; + private UserGroupInformation user; + private HTableDescriptor hTableDescriptor; + private TableName tableName; + + public TruncateTableProcedure() { + // Required by the Procedure framework to create the procedure on replay + } + + public TruncateTableProcedure(final MasterProcedureEnv env, final TableName tableName, + boolean preserveSplits) throws IOException { + this.tableName = tableName; + this.preserveSplits = preserveSplits; + this.user = env.getRequestUser().getUGI(); + } + + @Override + protected Flow executeFromState(final MasterProcedureEnv env, TruncateTableState state) { + if (LOG.isTraceEnabled()) { + LOG.trace(this + " execute state=" + state); + } + try { + switch (state) { + case TRUNCATE_TABLE_PRE_OPERATION: + // Verify if we can truncate the table + if (!prepareTruncate(env)) { + assert isFailed() : "the truncate should have an exception here"; + return Flow.NO_MORE_STATE; + } + + // TODO: Move out... in the acquireLock() + LOG.debug("waiting for '" + getTableName() + "' regions in transition"); + regions = ProcedureSyncWait.getRegionsFromMeta(env, getTableName()); + assert regions != null && !regions.isEmpty() : "unexpected 0 regions"; + ProcedureSyncWait.waitRegionInTransition(env, regions); + + // Call coprocessors + preTruncate(env); + + setNextState(TruncateTableState.TRUNCATE_TABLE_REMOVE_FROM_META); + break; + case TRUNCATE_TABLE_REMOVE_FROM_META: + hTableDescriptor = env.getMasterServices().getTableDescriptors() + .getDescriptor(tableName).getHTableDescriptor(); + DeleteTableProcedure.deleteFromMeta(env, getTableName(), regions); + DeleteTableProcedure.deleteAssignmentState(env, getTableName()); + setNextState(TruncateTableState.TRUNCATE_TABLE_CLEAR_FS_LAYOUT); + break; + case TRUNCATE_TABLE_CLEAR_FS_LAYOUT: + DeleteTableProcedure.deleteFromFs(env, getTableName(), regions, true); + if (!preserveSplits) { + // if we are not preserving splits, generate a new single region + regions = Arrays.asList(ModifyRegionUtils.createHRegionInfos(hTableDescriptor, null)); + } + setNextState(TruncateTableState.TRUNCATE_TABLE_CREATE_FS_LAYOUT); + break; + case TRUNCATE_TABLE_CREATE_FS_LAYOUT: + regions = CreateTableProcedure.createFsLayout(env, hTableDescriptor, regions); + CreateTableProcedure.updateTableDescCache(env, getTableName()); + setNextState(TruncateTableState.TRUNCATE_TABLE_ADD_TO_META); + break; + case TRUNCATE_TABLE_ADD_TO_META: + regions = CreateTableProcedure.addTableToMeta(env, hTableDescriptor, regions); + setNextState(TruncateTableState.TRUNCATE_TABLE_ASSIGN_REGIONS); + break; + case TRUNCATE_TABLE_ASSIGN_REGIONS: + CreateTableProcedure.assignRegions(env, getTableName(), regions); + setNextState(TruncateTableState.TRUNCATE_TABLE_POST_OPERATION); + hTableDescriptor = null; + regions = null; + break; + case TRUNCATE_TABLE_POST_OPERATION: + postTruncate(env); + LOG.debug("truncate '" + getTableName() + "' completed"); + return Flow.NO_MORE_STATE; + default: + throw new UnsupportedOperationException("unhandled state=" + state); + } + } catch (HBaseException|IOException e) { + LOG.warn("Retriable error trying to truncate table=" + getTableName() + " state=" + state, e); + } catch (InterruptedException e) { + // if the interrupt is real, the executor will be stopped. + LOG.warn("Interrupted trying to truncate table=" + getTableName() + " state=" + state, e); + } + return Flow.HAS_MORE_STATE; + } + + @Override + protected void rollbackState(final MasterProcedureEnv env, final TruncateTableState state) { + if (state == TruncateTableState.TRUNCATE_TABLE_PRE_OPERATION) { + // nothing to rollback, pre-truncate is just table-state checks. + // We can fail if the table does not exist or is not disabled. + return; + } + + // The truncate doesn't have a rollback. The execution will succeed, at some point. + throw new UnsupportedOperationException("unhandled state=" + state); + } + + @Override + protected TruncateTableState getState(final int stateId) { + return TruncateTableState.valueOf(stateId); + } + + @Override + protected int getStateId(final TruncateTableState state) { + return state.getNumber(); + } + + @Override + protected TruncateTableState getInitialState() { + return TruncateTableState.TRUNCATE_TABLE_PRE_OPERATION; + } + + @Override + public TableName getTableName() { + return tableName; + } + + @Override + public TableOperationType getTableOperationType() { + return TableOperationType.EDIT; + } + + @Override + public boolean abort(final MasterProcedureEnv env) { + // TODO: We may be able to abort if the procedure is not started yet. + return false; + } + + @Override + protected boolean acquireLock(final MasterProcedureEnv env) { + if (!env.isInitialized()) return false; + return env.getProcedureQueue().tryAcquireTableWrite(getTableName(), "truncate table"); + } + + @Override + protected void releaseLock(final MasterProcedureEnv env) { + env.getProcedureQueue().releaseTableWrite(getTableName()); + } + + @Override + public void toStringClassDetails(StringBuilder sb) { + sb.append(getClass().getSimpleName()); + sb.append(" (table="); + sb.append(getTableName()); + sb.append(" preserveSplits="); + sb.append(preserveSplits); + sb.append(") user="); + sb.append(user); + } + + @Override + public void serializeStateData(final OutputStream stream) throws IOException { + super.serializeStateData(stream); + + MasterProcedureProtos.TruncateTableStateData.Builder state = + MasterProcedureProtos.TruncateTableStateData.newBuilder() + .setUserInfo(MasterProcedureUtil.toProtoUserInfo(this.user)) + .setPreserveSplits(preserveSplits); + if (hTableDescriptor != null) { + state.setTableSchema(hTableDescriptor.convert()); + } else { + state.setTableName(ProtobufUtil.toProtoTableName(tableName)); + } + if (regions != null) { + for (HRegionInfo hri: regions) { + state.addRegionInfo(HRegionInfo.convert(hri)); + } + } + state.build().writeDelimitedTo(stream); + } + + @Override + public void deserializeStateData(final InputStream stream) throws IOException { + super.deserializeStateData(stream); + + MasterProcedureProtos.TruncateTableStateData state = + MasterProcedureProtos.TruncateTableStateData.parseDelimitedFrom(stream); + user = MasterProcedureUtil.toUserInfo(state.getUserInfo()); + if (state.hasTableSchema()) { + hTableDescriptor = HTableDescriptor.convert(state.getTableSchema()); + tableName = hTableDescriptor.getTableName(); + } else { + tableName = ProtobufUtil.toTableName(state.getTableName()); + } + preserveSplits = state.getPreserveSplits(); + if (state.getRegionInfoCount() == 0) { + regions = null; + } else { + regions = new ArrayList(state.getRegionInfoCount()); + for (HBaseProtos.RegionInfo hri: state.getRegionInfoList()) { + regions.add(HRegionInfo.convert(hri)); + } + } + } + + private boolean prepareTruncate(final MasterProcedureEnv env) throws IOException { + try { + env.getMasterServices().checkTableModifiable(getTableName()); + } catch (TableNotFoundException|TableNotDisabledException e) { + setFailure("master-truncate-table", e); + return false; + } + return true; + } + + private boolean preTruncate(final MasterProcedureEnv env) + throws IOException, InterruptedException { + final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost(); + if (cpHost != null) { + final TableName tableName = getTableName(); + user.doAs(new PrivilegedExceptionAction() { + @Override + public Void run() throws Exception { + cpHost.preTruncateTableHandler(tableName); + return null; + } + }); + } + return true; + } + + private void postTruncate(final MasterProcedureEnv env) + throws IOException, InterruptedException { + final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost(); + if (cpHost != null) { + final TableName tableName = getTableName(); + user.doAs(new PrivilegedExceptionAction() { + @Override + public Void run() throws Exception { + cpHost.postTruncateTableHandler(tableName); + return null; + } + }); + } + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java index b7a891d01ba..177ced2ad49 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java @@ -47,7 +47,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.errorhandling.ForeignException; import org.apache.hadoop.hbase.executor.ExecutorService; -import org.apache.hadoop.hbase.ipc.RequestContext; +import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.master.AssignmentManager; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.MasterFileSystem; @@ -567,7 +567,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable if (!snapshot.hasVersion()) { builder.setVersion(SnapshotDescriptionUtils.SNAPSHOT_LAYOUT_VERSION); } - User user = RequestContext.getRequestUser(); + User user = RpcServer.getRequestUser(); if (User.isHBaseSecurityEnabled(master.getConfiguration()) && user != null) { builder.setOwner(user.getShortName()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobCompactor.java index c2abc7c4d1d..d54dca4fd49 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobCompactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobCompactor.java @@ -34,15 +34,8 @@ import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.TagType; import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.regionserver.HMobStore; -import org.apache.hadoop.hbase.regionserver.HStore; -import org.apache.hadoop.hbase.regionserver.InternalScanner; -import org.apache.hadoop.hbase.regionserver.MobCompactionStoreScanner; -import org.apache.hadoop.hbase.regionserver.ScanType; -import org.apache.hadoop.hbase.regionserver.Store; -import org.apache.hadoop.hbase.regionserver.StoreFile; +import org.apache.hadoop.hbase.regionserver.*; import org.apache.hadoop.hbase.regionserver.StoreFile.Writer; -import org.apache.hadoop.hbase.regionserver.StoreFileScanner; import org.apache.hadoop.hbase.regionserver.compactions.CompactionThroughputController; import org.apache.hadoop.hbase.regionserver.compactions.DefaultCompactor; import org.apache.hadoop.hbase.util.Bytes; @@ -185,8 +178,12 @@ public class DefaultMobCompactor extends DefaultCompactor { } delFileWriter = mobStore.createDelFileWriterInTmp(new Date(fd.latestPutTs), fd.maxKeyCount, store.getFamily().getCompression(), store.getRegionInfo().getStartKey()); + ScannerContext scannerContext = + ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build(); + + do { - hasMore = compactionScanner.next(cells, compactionKVMax); + hasMore = compactionScanner.next(cells, scannerContext); // output to writer: for (Cell c : cells) { if (cleanSeqId && c.getSequenceId() <= smallestReadPoint) { @@ -212,7 +209,6 @@ public class DefaultMobCompactor extends DefaultCompactor { Cell mobCell = mobStore.resolve(c, false); if (mobCell.getValueLength() != 0) { // put the mob data back to the store file - // KeyValue mobKv = KeyValueUtil.ensureKeyValue(cell); CellUtil.setSequenceId(mobCell, c.getSequenceId()); writer.append(mobCell); mobCompactedFromMobCellsCount++; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java index 00b3421b057..44387f5a8a2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java @@ -35,13 +35,9 @@ import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.TagType; import org.apache.hadoop.hbase.monitoring.MonitoredTask; -import org.apache.hadoop.hbase.regionserver.DefaultStoreFlusher; -import org.apache.hadoop.hbase.regionserver.HMobStore; -import org.apache.hadoop.hbase.regionserver.InternalScanner; -import org.apache.hadoop.hbase.regionserver.MemStoreSnapshot; -import org.apache.hadoop.hbase.regionserver.Store; -import org.apache.hadoop.hbase.regionserver.StoreFile; +import org.apache.hadoop.hbase.regionserver.*; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.util.StringUtils; /** * An implementation of the StoreFlusher. It extends the DefaultStoreFlusher. @@ -127,8 +123,9 @@ public class DefaultMobStoreFlusher extends DefaultStoreFlusher { scanner.close(); } LOG.info("Flushed, sequenceid=" + cacheFlushId + ", memsize=" - + snapshot.getSize() + ", hasBloomFilter=" + writer.hasGeneralBloom() - + ", into tmp file " + writer.getPath()); + + StringUtils.TraditionalBinaryPrefix.long2String(snapshot.getSize(), "", 1) + + ", hasBloomFilter=" + writer.hasGeneralBloom() + + ", into tmp file " + writer.getPath()); result.add(writer.getPath()); return result; } @@ -168,8 +165,11 @@ public class DefaultMobStoreFlusher extends DefaultStoreFlusher { .getName()); List cells = new ArrayList(); boolean hasMore; + ScannerContext scannerContext = + ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build(); + do { - hasMore = scanner.next(cells, compactionKVMax); + hasMore = scanner.next(cells, scannerContext); if (!cells.isEmpty()) { for (Cell c : cells) { // If we know that this KV is going to be included always, then let us diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/filecompactions/PartitionedMobFileCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/filecompactions/PartitionedMobFileCompactor.java index 0778ac1ab4d..718b5133028 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/filecompactions/PartitionedMobFileCompactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/filecompactions/PartitionedMobFileCompactor.java @@ -47,8 +47,7 @@ import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.TagType; -import org.apache.hadoop.hbase.client.HTable; -import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.*; import org.apache.hadoop.hbase.io.HFileLink; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles; @@ -58,15 +57,8 @@ import org.apache.hadoop.hbase.mob.MobUtils; import org.apache.hadoop.hbase.mob.filecompactions.MobFileCompactionRequest.CompactionType; import org.apache.hadoop.hbase.mob.filecompactions.PartitionedMobFileCompactionRequest.CompactionPartition; import org.apache.hadoop.hbase.mob.filecompactions.PartitionedMobFileCompactionRequest.CompactionPartitionId; -import org.apache.hadoop.hbase.regionserver.BloomType; -import org.apache.hadoop.hbase.regionserver.HStore; -import org.apache.hadoop.hbase.regionserver.ScanInfo; -import org.apache.hadoop.hbase.regionserver.ScanType; -import org.apache.hadoop.hbase.regionserver.StoreFile; +import org.apache.hadoop.hbase.regionserver.*; import org.apache.hadoop.hbase.regionserver.StoreFile.Writer; -import org.apache.hadoop.hbase.regionserver.StoreFileInfo; -import org.apache.hadoop.hbase.regionserver.StoreFileScanner; -import org.apache.hadoop.hbase.regionserver.StoreScanner; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; @@ -240,7 +232,8 @@ public class PartitionedMobFileCompactor extends MobFileCompactor { return Collections.emptyList(); } List paths = new ArrayList(); - final HTable table = new HTable(conf, tableName); + Connection c = ConnectionFactory.createConnection(conf); + final Table table = c.getTable(tableName); try { Map>> results = new HashMap>>(); @@ -289,7 +282,7 @@ public class PartitionedMobFileCompactor extends MobFileCompactor { * @throws IOException */ private List compactMobFilePartition(PartitionedMobFileCompactionRequest request, - CompactionPartition partition, List delFiles, HTable table) throws IOException { + CompactionPartition partition, List delFiles, Table table) throws IOException { List newFiles = new ArrayList(); List files = partition.listFiles(); int offset = 0; @@ -343,7 +336,7 @@ public class PartitionedMobFileCompactor extends MobFileCompactor { * @throws IOException */ private void compactMobFilesInBatch(PartitionedMobFileCompactionRequest request, - CompactionPartition partition, HTable table, List filesToCompact, int batch, + CompactionPartition partition, Table table, List filesToCompact, int batch, Path bulkloadPathOfPartition, Path bulkloadColumnPath, List newFiles) throws IOException { // open scanner to the selected mob files and del files. @@ -370,8 +363,10 @@ public class PartitionedMobFileCompactor extends MobFileCompactor { refFilePath = refFileWriter.getPath(); List cells = new ArrayList(); boolean hasMore = false; + ScannerContext scannerContext = + ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build(); do { - hasMore = scanner.next(cells, compactionKVMax); + hasMore = scanner.next(cells, scannerContext); for (Cell cell : cells) { // TODO remove this after the new code are introduced. KeyValue kv = KeyValueUtil.ensureKeyValue(cell); @@ -475,8 +470,10 @@ public class PartitionedMobFileCompactor extends MobFileCompactor { filePath = writer.getPath(); List cells = new ArrayList(); boolean hasMore = false; + ScannerContext scannerContext = + ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build(); do { - hasMore = scanner.next(cells, compactionKVMax); + hasMore = scanner.next(cells, scannerContext); for (Cell cell : cells) { // TODO remove this after the new code are introduced. KeyValue kv = KeyValueUtil.ensureKeyValue(cell); @@ -532,12 +529,12 @@ public class PartitionedMobFileCompactor extends MobFileCompactor { * @param fileName The current file name. * @throws IOException */ - private void bulkloadRefFile(HTable table, Path bulkloadDirectory, String fileName) + private void bulkloadRefFile(Table table, Path bulkloadDirectory, String fileName) throws IOException { // bulkload the ref file try { LoadIncrementalHFiles bulkload = new LoadIncrementalHFiles(conf); - bulkload.doBulkLoad(bulkloadDirectory, table); + bulkload.doBulkLoad(bulkloadDirectory, (HTable)table); } catch (Exception e) { // delete the committed mob file deletePath(new Path(mobFamilyDir, fileName)); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/MemStoreWrapper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/MemStoreWrapper.java index d286b7259ff..37d4461fce8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/MemStoreWrapper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/MemStoreWrapper.java @@ -33,8 +33,10 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.TagType; +import org.apache.hadoop.hbase.client.BufferedMutator; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.mob.MobConstants; import org.apache.hadoop.hbase.mob.MobUtils; @@ -70,13 +72,13 @@ public class MemStoreWrapper { private SweepPartitionId partitionId; private Context context; private Configuration conf; - private HTable table; + private BufferedMutator table; private HColumnDescriptor hcd; private Path mobFamilyDir; private FileSystem fs; private CacheConfig cacheConfig; - public MemStoreWrapper(Context context, FileSystem fs, HTable table, HColumnDescriptor hcd, + public MemStoreWrapper(Context context, FileSystem fs, BufferedMutator table, HColumnDescriptor hcd, MemStore memstore, CacheConfig cacheConfig) throws IOException { this.memstore = memstore; this.context = context; @@ -153,16 +155,16 @@ public class MemStoreWrapper { scanner = snapshot.getScanner(); scanner.seek(KeyValueUtil.createFirstOnRow(HConstants.EMPTY_START_ROW)); cell = null; - Tag tableNameTag = new Tag(TagType.MOB_TABLE_NAME_TAG_TYPE, this.table.getTableName()); + Tag tableNameTag = new Tag(TagType.MOB_TABLE_NAME_TAG_TYPE, Bytes.toBytes(this.table.getName().toString())); while (null != (cell = scanner.next())) { KeyValue reference = MobUtils.createMobRefKeyValue(cell, referenceValue, tableNameTag); Put put = new Put(reference.getRowArray(), reference.getRowOffset(), reference.getRowLength()); put.add(reference); - table.put(put); + table.mutate(put); context.getCounter(SweepCounter.RECORDS_UPDATED).increment(1); } - table.flushCommits(); + table.flush(); scanner.close(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/SweepReducer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/SweepReducer.java index 73ca1a2ff61..cbefd8a5be4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/SweepReducer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/SweepReducer.java @@ -43,8 +43,7 @@ import org.apache.hadoop.hbase.InvalidFamilyOperationException; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.HBaseAdmin; -import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.*; import org.apache.hadoop.hbase.io.HFileLink; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.mapreduce.TableInputFormat; @@ -93,7 +92,7 @@ public class SweepReducer extends Reducer { private Path familyDir; private CacheConfig cacheConfig; private long compactionBegin; - private HTable table; + private BufferedMutator table; private HColumnDescriptor family; private long mobCompactionDelay; private Path mobTableDir; @@ -101,6 +100,7 @@ public class SweepReducer extends Reducer { @Override protected void setup(Context context) throws IOException, InterruptedException { this.conf = context.getConfiguration(); + Connection c = ConnectionFactory.createConnection(this.conf); this.fs = FileSystem.get(conf); // the MOB_SWEEP_JOB_DELAY is ONE_DAY by default. Its value is only changed when testing. mobCompactionDelay = conf.getLong(SweepJob.MOB_SWEEP_JOB_DELAY, SweepJob.ONE_DAY); @@ -108,7 +108,7 @@ public class SweepReducer extends Reducer { String familyName = conf.get(TableInputFormat.SCAN_COLUMN_FAMILY); TableName tn = TableName.valueOf(tableName); this.familyDir = MobUtils.getMobFamilyPath(conf, tn, familyName); - HBaseAdmin admin = new HBaseAdmin(this.conf); + Admin admin = c.getAdmin(); try { family = admin.getTableDescriptor(tn).getFamily(Bytes.toBytes(familyName)); if (family == null) { @@ -128,10 +128,7 @@ public class SweepReducer extends Reducer { copyOfConf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0f); this.cacheConfig = new CacheConfig(copyOfConf); - table = new HTable(this.conf, Bytes.toBytes(tableName)); - table.setAutoFlush(false, false); - - table.setWriteBufferSize(1 * 1024 * 1024); // 1MB + table = c.getBufferedMutator(new BufferedMutatorParams(tn).writeBufferSize(1*1024*1024)); memstore = new MemStoreWrapper(context, fs, table, family, new DefaultMemStore(), cacheConfig); // The start time of the sweep tool. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/namespace/NamespaceAuditor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/namespace/NamespaceAuditor.java index 726b040f3f6..0afafe46d94 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/namespace/NamespaceAuditor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/namespace/NamespaceAuditor.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.namespace; import java.io.IOException; -import java.io.InterruptedIOException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -31,7 +30,6 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.quotas.QuotaExceededException; -import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import com.google.common.annotations.VisibleForTesting; @@ -55,19 +53,6 @@ public class NamespaceAuditor { public void start() throws IOException { stateManager.start(); - long startTime = EnvironmentEdgeManager.currentTime(); - int timeout = masterServices.getConfiguration().getInt(NS_AUDITOR_INIT_TIMEOUT, - DEFAULT_NS_AUDITOR_INIT_TIMEOUT); - try { - while (!stateManager.isInitialized()) { - if (EnvironmentEdgeManager.currentTime() - startTime + 1000 > timeout) { - throw new HBaseIOException("Timed out waiting for namespace auditor to be initialized."); - } - Thread.sleep(1000); - } - } catch (InterruptedException e) { - throw (InterruptedIOException) new InterruptedIOException().initCause(e); - } LOG.info("NamespaceAuditor started."); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/namespace/NamespaceStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/namespace/NamespaceStateManager.java index c34a12340f2..ea1e21a8db0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/namespace/NamespaceStateManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/namespace/NamespaceStateManager.java @@ -19,21 +19,19 @@ package org.apache.hadoop.hbase.namespace; import java.io.IOException; import java.util.List; -import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.NamespaceDescriptor; -import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.MetaScanner; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.TableNamespaceManager; +import org.apache.hadoop.hbase.quotas.QuotaExceededException; import org.apache.hadoop.hbase.util.Bytes; /** @@ -125,13 +123,14 @@ class NamespaceStateManager { NamespaceTableAndRegionInfo currentStatus; currentStatus = getState(nspdesc.getName()); if ((currentStatus.getTables().size()) >= TableNamespaceManager.getMaxTables(nspdesc)) { - throw new DoNotRetryIOException("The table " + table.getNameAsString() + throw new QuotaExceededException("The table " + table.getNameAsString() + "cannot be created as it would exceed maximum number of tables allowed " - + " in the namespace."); + + " in the namespace. The total number of tables permitted is " + + TableNamespaceManager.getMaxTables(nspdesc)); } if ((currentStatus.getRegionCount() + numRegions) > TableNamespaceManager .getMaxRegions(nspdesc)) { - throw new DoNotRetryIOException("The table " + table.getNameAsString() + throw new QuotaExceededException("The table " + table.getNameAsString() + " is not allowed to have " + numRegions + " regions. The total number of regions permitted is only " + TableNamespaceManager.getMaxRegions(nspdesc) @@ -185,30 +184,22 @@ class NamespaceStateManager { /** * Initialize namespace state cache by scanning meta table. */ - void initialize() { - try { - List namespaces = this.master.listNamespaceDescriptors(); - for (NamespaceDescriptor namespace : namespaces) { - addNamespace(namespace.getName()); - List tables = this.master.listTableNamesByNamespace(namespace.getName()); - for (TableName table : tables) { - int regionCount = 0; - Map regions = MetaScanner.allTableRegions( - this.master.getConnection(), table); - for (HRegionInfo info : regions.keySet()) { - if (!info.isSplit()) { - regionCount++; - } - } - addTable(table, regionCount); + private void initialize() throws IOException { + List namespaces = this.master.listNamespaceDescriptors(); + for (NamespaceDescriptor namespace : namespaces) { + addNamespace(namespace.getName()); + List tables = this.master.listTableNamesByNamespace(namespace.getName()); + for (TableName table : tables) { + if (table.isSystemTable()) { + continue; } + List regions = + MetaTableAccessor.getTableRegions(this.master.getConnection(), table, true); + addTable(table, regions.size()); } - LOG.info("Finished updating state of " + nsStateCache.size() + " namespaces. "); - initialized = true; - } catch (IOException e) { - LOG.error("Error while update namespace state.", e); - initialized = false; } + LOG.info("Finished updating state of " + nsStateCache.size() + " namespaces. "); + initialized = true; } boolean isInitialized() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/namespace/NamespaceTableAndRegionInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/namespace/NamespaceTableAndRegionInfo.java index 66fcaa61b84..86651e42c10 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/namespace/NamespaceTableAndRegionInfo.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/namespace/NamespaceTableAndRegionInfo.java @@ -24,10 +24,10 @@ import java.util.Map.Entry; import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; -import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hbase.TableName; import com.google.common.base.Joiner; +import org.apache.hadoop.hbase.classification.InterfaceAudience; /** * NamespaceTableAndRegionInfo is a helper class that contains information diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureMemberRpcs.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureMemberRpcs.java index a13292edb9b..387c2e45f45 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureMemberRpcs.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ZKProcedureMemberRpcs.java @@ -200,7 +200,6 @@ public class ZKProcedureMemberRpcs implements ProcedureMemberRpcs { Subprocedure subproc = null; try { byte[] data = ZKUtil.getData(zkController.getWatcher(), path); - LOG.debug("start proc data length is " + data.length); if (!ProtobufUtil.isPBMagicPrefix(data)) { String msg = "Data in for starting procuedure " + opName + " is illegally formatted (no pb magic). " + @@ -208,6 +207,7 @@ public class ZKProcedureMemberRpcs implements ProcedureMemberRpcs { LOG.error(msg); throw new IllegalArgumentException(msg); } + LOG.debug("start proc data length is " + data.length); data = Arrays.copyOfRange(data, ProtobufUtil.lengthOfPBMagic(), data.length); LOG.debug("Found data for znode:" + path); subproc = member.createSubprocedure(opName, data); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/FlushTableSubprocedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/FlushTableSubprocedure.java index d032ebadb43..8d64f2aafc7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/FlushTableSubprocedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/FlushTableSubprocedure.java @@ -28,7 +28,8 @@ import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; import org.apache.hadoop.hbase.procedure.ProcedureMember; import org.apache.hadoop.hbase.procedure.Subprocedure; import org.apache.hadoop.hbase.procedure.flush.RegionServerFlushTableProcedureManager.FlushTableSubprocedurePool; -import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.regionserver.Region; +import org.apache.hadoop.hbase.regionserver.Region.Operation; /** * This flush region implementation uses the distributed procedure framework to flush @@ -40,12 +41,12 @@ public class FlushTableSubprocedure extends Subprocedure { private static final Log LOG = LogFactory.getLog(FlushTableSubprocedure.class); private final String table; - private final List regions; + private final List regions; private final FlushTableSubprocedurePool taskManager; public FlushTableSubprocedure(ProcedureMember member, ForeignExceptionDispatcher errorListener, long wakeFrequency, long timeout, - List regions, String table, + List regions, String table, FlushTableSubprocedurePool taskManager) { super(member, table, errorListener, wakeFrequency, timeout); this.table = table; @@ -54,8 +55,8 @@ public class FlushTableSubprocedure extends Subprocedure { } private static class RegionFlushTask implements Callable { - HRegion region; - RegionFlushTask(HRegion region) { + Region region; + RegionFlushTask(Region region) { this.region = region; } @@ -65,7 +66,7 @@ public class FlushTableSubprocedure extends Subprocedure { region.startRegionOperation(); try { LOG.debug("Flush region " + region.toString() + " started..."); - region.flushcache(); + region.flush(true); } finally { LOG.debug("Closing region operation on " + region); region.closeRegionOperation(); @@ -89,7 +90,7 @@ public class FlushTableSubprocedure extends Subprocedure { } // Add all hfiles already existing in region. - for (HRegion region : regions) { + for (Region region : regions) { // submit one task per region for parallelize by region. taskManager.submitTask(new RegionFlushTask(region)); monitor.rethrowException(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java index e6f074df43f..7664deedaca 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java @@ -44,8 +44,8 @@ import org.apache.hadoop.hbase.procedure.RegionServerProcedureManager; import org.apache.hadoop.hbase.procedure.Subprocedure; import org.apache.hadoop.hbase.procedure.SubprocedureFactory; import org.apache.hadoop.hbase.procedure.ZKProcedureMemberRpcs; -import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.zookeeper.KeeperException; @@ -137,7 +137,7 @@ public class RegionServerFlushTableProcedureManager extends RegionServerProcedur } // check to see if this server is hosting any regions for the table - List involvedRegions; + List involvedRegions; try { involvedRegions = getRegionsToFlush(table); } catch (IOException e1) { @@ -172,7 +172,7 @@ public class RegionServerFlushTableProcedureManager extends RegionServerProcedur * @return the list of online regions. Empty list is returned if no regions. * @throws IOException */ - private List getRegionsToFlush(String table) throws IOException { + private List getRegionsToFlush(String table) throws IOException { return rss.getOnlineRegions(TableName.valueOf(table)); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java index 9893fc87e10..5fe5f8cf847 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java @@ -31,7 +31,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.master.MasterServices; -import org.apache.hadoop.hbase.master.handler.CreateTableHandler; +import org.apache.hadoop.hbase.master.procedure.CreateTableProcedure; import org.apache.hadoop.hbase.namespace.NamespaceAuditor; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest; @@ -444,14 +444,11 @@ public class MasterQuotaManager implements RegionStateListener { new HRegionInfo(QuotaUtil.QUOTA_TABLE_NAME) }; - masterServices.getExecutorService() - .submit(new CreateTableHandler(masterServices, - masterServices.getMasterFileSystem(), - QuotaUtil.QUOTA_TABLE_DESC, - masterServices.getConfiguration(), - newRegions, - masterServices) - .prepare()); + masterServices.getMasterProcedureExecutor() + .submitProcedure(new CreateTableProcedure( + masterServices.getMasterProcedureExecutor().getEnvironment(), + QuotaUtil.QUOTA_TABLE_DESC, + newRegions)); } private static class NamedLock { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerQuotaManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerQuotaManager.java index 836025f87dc..0a63c139b9f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerQuotaManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerQuotaManager.java @@ -27,9 +27,9 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.ipc.RpcScheduler; -import org.apache.hadoop.hbase.ipc.RequestContext; +import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; -import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.security.UserGroupInformation; @@ -129,7 +129,7 @@ public class RegionServerQuotaManager { * @return the OperationQuota * @throws ThrottlingException if the operation cannot be executed due to quota exceeded. */ - public OperationQuota checkQuota(final HRegion region, + public OperationQuota checkQuota(final Region region, final OperationQuota.OperationType type) throws IOException, ThrottlingException { switch (type) { case SCAN: return checkQuota(region, 0, 0, 1); @@ -148,7 +148,7 @@ public class RegionServerQuotaManager { * @return the OperationQuota * @throws ThrottlingException if the operation cannot be executed due to quota exceeded. */ - public OperationQuota checkQuota(final HRegion region, + public OperationQuota checkQuota(final Region region, final List actions) throws IOException, ThrottlingException { int numWrites = 0; int numReads = 0; @@ -173,12 +173,13 @@ public class RegionServerQuotaManager { * @return the OperationQuota * @throws ThrottlingException if the operation cannot be executed due to quota exceeded. */ - private OperationQuota checkQuota(final HRegion region, + private OperationQuota checkQuota(final Region region, final int numWrites, final int numReads, final int numScans) throws IOException, ThrottlingException { + User user = RpcServer.getRequestUser(); UserGroupInformation ugi; - if (RequestContext.isInRequestContext()) { - ugi = RequestContext.getRequestUser().getUGI(); + if (user != null) { + ugi = user.getUGI(); } else { ugi = User.getCurrent().getUGI(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionStateListener.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionStateListener.java index 6b954ac3708..368e21e838c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionStateListener.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionStateListener.java @@ -19,8 +19,8 @@ package org.apache.hadoop.hbase.quotas; import java.io.IOException; -import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.classification.InterfaceAudience; /** * The listener interface for receiving region state events. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionStateListener.java~HEAD_0 b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionStateListener.java~HEAD_0 deleted file mode 100644 index 6b954ac3708..00000000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionStateListener.java~HEAD_0 +++ /dev/null @@ -1,54 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.quotas; - -import java.io.IOException; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hbase.HRegionInfo; - -/** - * The listener interface for receiving region state events. - */ -@InterfaceAudience.Private -public interface RegionStateListener { - - /** - * Process region split event. - * - * @param hri An instance of HRegionInfo - * @throws IOException - */ - void onRegionSplit(HRegionInfo hri) throws IOException; - - /** - * Process region split reverted event. - * - * @param hri An instance of HRegionInfo - * @throws IOException Signals that an I/O exception has occurred. - */ - void onRegionSplitReverted(HRegionInfo hri) throws IOException; - - /** - * Process region merge event. - * - * @param hri An instance of HRegionInfo - * @throws IOException - */ - void onRegionMerged(HRegionInfo hri) throws IOException; -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionStateListener.java~jon_master b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionStateListener.java~jon_master deleted file mode 100644 index 6b954ac3708..00000000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionStateListener.java~jon_master +++ /dev/null @@ -1,54 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.quotas; - -import java.io.IOException; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hbase.HRegionInfo; - -/** - * The listener interface for receiving region state events. - */ -@InterfaceAudience.Private -public interface RegionStateListener { - - /** - * Process region split event. - * - * @param hri An instance of HRegionInfo - * @throws IOException - */ - void onRegionSplit(HRegionInfo hri) throws IOException; - - /** - * Process region split reverted event. - * - * @param hri An instance of HRegionInfo - * @throws IOException Signals that an I/O exception has occurred. - */ - void onRegionSplitReverted(HRegionInfo hri) throws IOException; - - /** - * Process region merge event. - * - * @param hri An instance of HRegionInfo - * @throws IOException - */ - void onRegionMerged(HRegionInfo hri) throws IOException; -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionStateListener.java~master b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionStateListener.java~master deleted file mode 100644 index 6b954ac3708..00000000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionStateListener.java~master +++ /dev/null @@ -1,54 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.quotas; - -import java.io.IOException; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hbase.HRegionInfo; - -/** - * The listener interface for receiving region state events. - */ -@InterfaceAudience.Private -public interface RegionStateListener { - - /** - * Process region split event. - * - * @param hri An instance of HRegionInfo - * @throws IOException - */ - void onRegionSplit(HRegionInfo hri) throws IOException; - - /** - * Process region split reverted event. - * - * @param hri An instance of HRegionInfo - * @throws IOException Signals that an I/O exception has occurred. - */ - void onRegionSplitReverted(HRegionInfo hri) throws IOException; - - /** - * Process region merge event. - * - * @param hri An instance of HRegionInfo - * @throws IOException - */ - void onRegionMerged(HRegionInfo hri) throws IOException; -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/TimeBasedLimiter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/TimeBasedLimiter.java index 8ca7e6ba0f0..79687a9a209 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/TimeBasedLimiter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/TimeBasedLimiter.java @@ -19,12 +19,10 @@ package org.apache.hadoop.hbase.quotas; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; -import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle; import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota; import org.apache.hadoop.hbase.quotas.OperationQuota.AvgOperationSize; import org.apache.hadoop.hbase.quotas.OperationQuota.OperationType; @@ -36,8 +34,6 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; @InterfaceAudience.Private @InterfaceStability.Evolving public class TimeBasedLimiter implements QuotaLimiter { - private static final Log LOG = LogFactory.getLog(TimeBasedLimiter.class); - private long writeLastTs = 0; private long readLastTs = 0; @@ -110,7 +106,8 @@ public class TimeBasedLimiter implements QuotaLimiter { ThrottlingException.throwNumRequestsExceeded(reqsLimiter.waitInterval()); } if (!reqSizeLimiter.canExecute(now, lastTs, writeSize + readSize)) { - ThrottlingException.throwNumRequestsExceeded(reqSizeLimiter.waitInterval(writeSize+readSize)); + ThrottlingException.throwRequestSizeExceeded(reqSizeLimiter + .waitInterval(writeSize + readSize)); } if (writeSize > 0) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AnnotationReadingPriorityFunction.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AnnotationReadingPriorityFunction.java index ddeabfa8190..29228db7c18 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AnnotationReadingPriorityFunction.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AnnotationReadingPriorityFunction.java @@ -176,10 +176,11 @@ class AnnotationReadingPriorityFunction implements PriorityFunction { if (hasRegion != null && (Boolean)hasRegion.invoke(param, (Object[])null)) { Method getRegion = methodMap.get("getRegion").get(rpcArgClass); regionSpecifier = (RegionSpecifier)getRegion.invoke(param, (Object[])null); - HRegion region = rpcServices.getRegion(regionSpecifier); + Region region = rpcServices.getRegion(regionSpecifier); if (region.getRegionInfo().isSystemTable()) { if (LOG.isTraceEnabled()) { - LOG.trace("High priority because region=" + region.getRegionNameAsString()); + LOG.trace("High priority because region=" + + region.getRegionInfo().getRegionNameAsString()); } return HConstants.SYSTEMTABLE_QOS; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java index 3e5003661f3..dbe4d0103d2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java @@ -75,6 +75,10 @@ public class CompactSplitThread implements CompactionRequestor, PropagatingConfi // Configuration keys for merge threads public final static String MERGE_THREADS = "hbase.regionserver.thread.merge"; public final static int MERGE_THREADS_DEFAULT = 1; + + public static final String REGION_SERVER_REGION_SPLIT_LIMIT = + "hbase.regionserver.regionSplitLimit"; + public static final int DEFAULT_REGION_SERVER_REGION_SPLIT_LIMIT= 1000; private final HRegionServer server; private final Configuration conf; @@ -98,8 +102,8 @@ public class CompactSplitThread implements CompactionRequestor, PropagatingConfi super(); this.server = server; this.conf = server.getConfiguration(); - this.regionSplitLimit = conf.getInt("hbase.regionserver.regionSplitLimit", - Integer.MAX_VALUE); + this.regionSplitLimit = conf.getInt(REGION_SERVER_REGION_SPLIT_LIMIT, + DEFAULT_REGION_SERVER_REGION_SPLIT_LIMIT); int largeThreads = Math.max(1, conf.getInt( LARGE_COMPACTION_THREADS, LARGE_COMPACTION_THREADS_DEFAULT)); @@ -214,8 +218,8 @@ public class CompactSplitThread implements CompactionRequestor, PropagatingConfi return queueLists.toString(); } - public synchronized void requestRegionsMerge(final HRegion a, - final HRegion b, final boolean forcible) { + public synchronized void requestRegionsMerge(final Region a, + final Region b, final boolean forcible) { try { mergePool.execute(new RegionMergeRequest(a, b, this.server, forcible)); if (LOG.isDebugEnabled()) { @@ -228,10 +232,10 @@ public class CompactSplitThread implements CompactionRequestor, PropagatingConfi } } - public synchronized boolean requestSplit(final HRegion r) { + public synchronized boolean requestSplit(final Region r) { // don't split regions that are blocking - if (shouldSplitRegion() && r.getCompactPriority() >= Store.PRIORITY_USER) { - byte[] midKey = r.checkSplit(); + if (shouldSplitRegion() && ((HRegion)r).getCompactPriority() >= Store.PRIORITY_USER) { + byte[] midKey = ((HRegion)r).checkSplit(); if (midKey != null) { requestSplit(r, midKey); return true; @@ -240,12 +244,12 @@ public class CompactSplitThread implements CompactionRequestor, PropagatingConfi return false; } - public synchronized void requestSplit(final HRegion r, byte[] midKey) { + public synchronized void requestSplit(final Region r, byte[] midKey) { if (midKey == null) { - LOG.debug("Region " + r.getRegionNameAsString() + + LOG.debug("Region " + r.getRegionInfo().getRegionNameAsString() + " not splittable because midkey=null"); - if (r.shouldForceSplit()) { - r.clearSplit(); + if (((HRegion)r).shouldForceSplit()) { + ((HRegion)r).clearSplit(); } return; } @@ -260,36 +264,36 @@ public class CompactSplitThread implements CompactionRequestor, PropagatingConfi } @Override - public synchronized List requestCompaction(final HRegion r, final String why) + public synchronized List requestCompaction(final Region r, final String why) throws IOException { return requestCompaction(r, why, null); } @Override - public synchronized List requestCompaction(final HRegion r, final String why, + public synchronized List requestCompaction(final Region r, final String why, List> requests) throws IOException { return requestCompaction(r, why, Store.NO_PRIORITY, requests); } @Override - public synchronized CompactionRequest requestCompaction(final HRegion r, final Store s, + public synchronized CompactionRequest requestCompaction(final Region r, final Store s, final String why, CompactionRequest request) throws IOException { return requestCompaction(r, s, why, Store.NO_PRIORITY, request); } @Override - public synchronized List requestCompaction(final HRegion r, final String why, + public synchronized List requestCompaction(final Region r, final String why, int p, List> requests) throws IOException { return requestCompactionInternal(r, why, p, requests, true); } - private List requestCompactionInternal(final HRegion r, final String why, + private List requestCompactionInternal(final Region r, final String why, int p, List> requests, boolean selectNow) throws IOException { // not a special compaction request, so make our own list List ret = null; if (requests == null) { ret = selectNow ? new ArrayList(r.getStores().size()) : null; - for (Store s : r.getStores().values()) { + for (Store s : r.getStores()) { CompactionRequest cr = requestCompactionInternal(r, s, why, p, null, selectNow); if (selectNow) ret.add(cr); } @@ -303,30 +307,30 @@ public class CompactSplitThread implements CompactionRequestor, PropagatingConfi return ret; } - public CompactionRequest requestCompaction(final HRegion r, final Store s, + public CompactionRequest requestCompaction(final Region r, final Store s, final String why, int priority, CompactionRequest request) throws IOException { return requestCompactionInternal(r, s, why, priority, request, true); } public synchronized void requestSystemCompaction( - final HRegion r, final String why) throws IOException { + final Region r, final String why) throws IOException { requestCompactionInternal(r, why, Store.NO_PRIORITY, null, false); } public void requestSystemCompaction( - final HRegion r, final Store s, final String why) throws IOException { + final Region r, final Store s, final String why) throws IOException { requestCompactionInternal(r, s, why, Store.NO_PRIORITY, null, false); } /** - * @param r HRegion store belongs to + * @param r region store belongs to * @param s Store to request compaction on * @param why Why compaction requested -- used in debug messages * @param priority override the default priority (NO_PRIORITY == decide) * @param request custom compaction request. Can be null in which case a simple * compaction will be used. */ - private synchronized CompactionRequest requestCompactionInternal(final HRegion r, final Store s, + private synchronized CompactionRequest requestCompactionInternal(final Region r, final Store s, final String why, int priority, CompactionRequest request, boolean selectNow) throws IOException { if (this.server.isStopped() @@ -354,12 +358,12 @@ public class CompactSplitThread implements CompactionRequestor, PropagatingConfi return selectNow ? compaction.getRequest() : null; } - private CompactionContext selectCompaction(final HRegion r, final Store s, + private CompactionContext selectCompaction(final Region r, final Store s, int priority, CompactionRequest request) throws IOException { CompactionContext compaction = s.requestCompaction(priority, request); if (compaction == null) { if(LOG.isDebugEnabled()) { - LOG.debug("Not compacting " + r.getRegionNameAsString() + + LOG.debug("Not compacting " + r.getRegionInfo().getRegionNameAsString() + " because compaction request was cancelled"); } return null; @@ -427,6 +431,10 @@ public class CompactSplitThread implements CompactionRequestor, PropagatingConfi } private boolean shouldSplitRegion() { + if(server.getNumberOfOnlineRegions() > 0.9*regionSplitLimit) { + LOG.warn("Total number of regions is approaching the upper limit " + regionSplitLimit + ". " + + "Please consider taking a look at http://hbase.apache.org/book.html#ops.regionmgt"); + } return (regionSplitLimit > server.getNumberOfOnlineRegions()); } @@ -446,11 +454,11 @@ public class CompactSplitThread implements CompactionRequestor, PropagatingConfi private int queuedPriority; private ThreadPoolExecutor parent; - public CompactionRunner(Store store, HRegion region, + public CompactionRunner(Store store, Region region, CompactionContext compaction, ThreadPoolExecutor parent) { super(); this.store = store; - this.region = region; + this.region = (HRegion)region; this.compaction = compaction; this.queuedPriority = (this.compaction == null) ? store.getCompactPriority() : compaction.getRequest().getPriority(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionRequestor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionRequestor.java index 93a73e96970..d40b21d74b9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionRequestor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionRequestor.java @@ -34,7 +34,7 @@ public interface CompactionRequestor { * compactions were started * @throws IOException */ - List requestCompaction(final HRegion r, final String why) + List requestCompaction(final Region r, final String why) throws IOException; /** @@ -48,7 +48,7 @@ public interface CompactionRequestor { * @throws IOException */ List requestCompaction( - final HRegion r, final String why, List> requests + final Region r, final String why, List> requests ) throws IOException; @@ -56,13 +56,13 @@ public interface CompactionRequestor { * @param r Region to compact * @param s Store within region to compact * @param why Why compaction was requested -- used in debug messages - * @param request custom compaction request for the {@link HRegion} and {@link Store}. Custom + * @param request custom compaction request for the {@link Region} and {@link Store}. Custom * request must be null or be constructed with matching region and store. * @return The created {@link CompactionRequest} or null if no compaction was started. * @throws IOException */ CompactionRequest requestCompaction( - final HRegion r, final Store s, final String why, CompactionRequest request + final Region r, final Store s, final String why, CompactionRequest request ) throws IOException; /** @@ -77,7 +77,7 @@ public interface CompactionRequestor { * @throws IOException */ List requestCompaction( - final HRegion r, final String why, int pri, List> requests + final Region r, final String why, int pri, List> requests ) throws IOException; /** @@ -85,12 +85,12 @@ public interface CompactionRequestor { * @param s Store within region to compact * @param why Why compaction was requested -- used in debug messages * @param pri Priority of this compaction. minHeap. <=0 is critical - * @param request custom compaction request to run. {@link Store} and {@link HRegion} for the + * @param request custom compaction request to run. {@link Store} and {@link Region} for the * request must match the region and store specified here. * @return The created {@link CompactionRequest} or null if no compaction was started * @throws IOException */ CompactionRequest requestCompaction( - final HRegion r, final Store s, final String why, int pri, CompactionRequest request + final Region r, final Store s, final String why, int pri, CompactionRequest request ) throws IOException; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ConstantSizeRegionSplitPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ConstantSizeRegionSplitPolicy.java index fba5b2a2544..66ef712519c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ConstantSizeRegionSplitPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ConstantSizeRegionSplitPolicy.java @@ -23,6 +23,8 @@ import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; +import java.util.Random; + /** * A {@link RegionSplitPolicy} implementation which splits a region * as soon as any of its store files exceeds a maximum configurable @@ -34,6 +36,8 @@ import org.apache.hadoop.hbase.HTableDescriptor; */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class ConstantSizeRegionSplitPolicy extends RegionSplitPolicy { + private static final Random RANDOM = new Random(); + private long desiredMaxFileSize; @Override @@ -48,6 +52,8 @@ public class ConstantSizeRegionSplitPolicy extends RegionSplitPolicy { this.desiredMaxFileSize = conf.getLong(HConstants.HREGION_MAX_FILESIZE, HConstants.DEFAULT_MAX_FILE_SIZE); } + double jitter = conf.getDouble("hbase.hregion.max.filesize.jitter", 0.25D); + this.desiredMaxFileSize += (long)(desiredMaxFileSize * (RANDOM.nextFloat() - 0.5D) * jitter); } @Override @@ -55,7 +61,7 @@ public class ConstantSizeRegionSplitPolicy extends RegionSplitPolicy { boolean force = region.shouldForceSplit(); boolean foundABigStore = false; - for (Store store : region.getStores().values()) { + for (Store store : region.getStores()) { // If any of the stores are unable to split (eg they contain reference files) // then don't split if ((!store.canSplit())) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java index 48b78c2305b..3da0c0b7ebd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java @@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.util.CollectionBackedScanner; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.ReflectionUtils; +import org.apache.htrace.Trace; /** * The MemStore holds in-memory modifications to the Store. Modifications @@ -208,6 +209,11 @@ public class DefaultMemStore implements MemStore { return this.snapshotSize > 0 ? this.snapshotSize : keySize(); } + @Override + public long getSnapshotSize() { + return this.snapshotSize; + } + /** * Write an update * @param cell @@ -462,6 +468,7 @@ public class DefaultMemStore implements MemStore { * @param now * @return Timestamp */ + @Override public long updateColumnValue(byte[] row, byte[] family, byte[] qualifier, @@ -524,7 +531,7 @@ public class DefaultMemStore implements MemStore { * atomically. Scans will only see each KeyValue update as atomic. * * @param cells - * @param readpoint readpoint below which we can safely remove duplicate KVs + * @param readpoint readpoint below which we can safely remove duplicate KVs * @return change in memstore size */ @Override @@ -581,7 +588,7 @@ public class DefaultMemStore implements MemStore { // only remove Puts that concurrent scanners cannot possibly see if (cur.getTypeByte() == KeyValue.Type.Put.getCode() && cur.getSequenceId() <= readpoint) { - if (versionsVisible > 1) { + if (versionsVisible >= 1) { // if we get here we have seen at least one version visible to the oldest scanner, // which means we can prove that no scanner will see this version @@ -731,6 +738,9 @@ public class DefaultMemStore implements MemStore { this.snapshotAllocatorAtCreation = snapshotAllocator; this.snapshotAllocatorAtCreation.incScannerCount(); } + if (Trace.isTracing() && Trace.currentSpan() != null) { + Trace.currentSpan().addTimelineAnnotation("Creating MemStoreScanner"); + } } /** @@ -1031,7 +1041,7 @@ public class DefaultMemStore implements MemStore { public long size() { return heapSize(); } - + /** * Code to help figure if our approximation of object heap sizes is close * enough. See hbase-900. Fills memstores then waits so user can heap diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFlusher.java index 73b8cb9334f..8ff4840aa22 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFlusher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFlusher.java @@ -85,7 +85,7 @@ public class DefaultStoreFlusher extends StoreFlusher { scanner.close(); } LOG.info("Flushed, sequenceid=" + cacheFlushId +", memsize=" - + StringUtils.humanReadableInt(snapshot.getSize()) + + + StringUtils.TraditionalBinaryPrefix.long2String(snapshot.getSize(), "", 1) + ", hasBloomFilter=" + writer.hasGeneralBloom() + ", into tmp file " + writer.getPath()); result.add(writer.getPath()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ExplicitColumnTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ExplicitColumnTracker.java index 470d36ac487..b779e223ad7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ExplicitColumnTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ExplicitColumnTracker.java @@ -56,10 +56,6 @@ public class ExplicitColumnTracker implements ColumnTracker { private final int maxVersions; private final int minVersions; - // hint for the tracker about how many KVs we will attempt to search via next() - // before we schedule a (re)seek operation - private final int lookAhead; - /** * Contains the list of columns that the ExplicitColumnTracker is tracking. * Each ColumnCount instance also tracks how many versions of the requested @@ -72,7 +68,6 @@ public class ExplicitColumnTracker implements ColumnTracker { * Used to eliminate duplicates. */ private long latestTSOfCurrentColumn; private long oldestStamp; - private int skipCount; /** * Default constructor. @@ -81,14 +76,11 @@ public class ExplicitColumnTracker implements ColumnTracker { * @param maxVersions maximum versions to return per column * @param oldestUnexpiredTS the oldest timestamp we are interested in, * based on TTL - * @param lookAhead number of KeyValues to look ahead via next before - * (re)seeking */ public ExplicitColumnTracker(NavigableSet columns, int minVersions, - int maxVersions, long oldestUnexpiredTS, int lookAhead) { + int maxVersions, long oldestUnexpiredTS) { this.maxVersions = maxVersions; this.minVersions = minVersions; - this.lookAhead = lookAhead; this.oldestStamp = oldestUnexpiredTS; this.columns = new ColumnCount[columns.size()]; int i=0; @@ -144,8 +136,7 @@ public class ExplicitColumnTracker implements ColumnTracker { if (ret > 0) { // The current KV is smaller than the column the ExplicitColumnTracker // is interested in, so seek to that column of interest. - return this.skipCount++ < this.lookAhead ? ScanQueryMatcher.MatchCode.SKIP - : ScanQueryMatcher.MatchCode.SEEK_NEXT_COL; + return ScanQueryMatcher.MatchCode.SEEK_NEXT_COL; } // The current KV is bigger than the column the ExplicitColumnTracker @@ -154,7 +145,6 @@ public class ExplicitColumnTracker implements ColumnTracker { // column of interest, and check again. if (ret <= -1) { ++this.index; - this.skipCount = 0; if (done()) { // No more to match, do not include, done with this row. return ScanQueryMatcher.MatchCode.SEEK_NEXT_ROW; // done_row @@ -179,7 +169,6 @@ public class ExplicitColumnTracker implements ColumnTracker { if (count >= maxVersions || (count >= minVersions && isExpired(timestamp))) { // Done with versions for this column ++this.index; - this.skipCount = 0; resetTS(); if (done()) { // We have served all the requested columns. @@ -198,7 +187,6 @@ public class ExplicitColumnTracker implements ColumnTracker { // Called between every row. public void reset() { this.index = 0; - this.skipCount = 0; this.column = this.columns[this.index]; for(ColumnCount col : this.columns) { col.setCount(0); @@ -238,7 +226,6 @@ public class ExplicitColumnTracker implements ColumnTracker { resetTS(); if (compare <= 0) { ++this.index; - this.skipCount = 0; if (done()) { // Will not hit any more columns in this storefile this.column = null; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FavoredNodesForRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FavoredNodesForRegion.java index d978a2d11f1..f516ecd8f01 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FavoredNodesForRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FavoredNodesForRegion.java @@ -21,15 +21,18 @@ package org.apache.hadoop.hbase.regionserver; import java.net.InetSocketAddress; import java.util.List; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName; /** * Abstraction that allows different modules in RegionServer to update/get * the favored nodes information for regions. */ -@InterfaceAudience.Private -interface FavoredNodesForRegion { +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) +@InterfaceStability.Evolving +public interface FavoredNodesForRegion { /** * Used to update the favored nodes mapping when required. * @param encodedRegionName diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushRequestListener.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushRequestListener.java index 80ac07ba24c..0e6bc4fb736 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushRequestListener.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushRequestListener.java @@ -32,5 +32,5 @@ public interface FlushRequestListener { * @param type The type of flush. (ie. Whether a normal flush or flush because of global heap preassure) * @param region The region for which flush is requested */ - void flushRequested(FlushType type, HRegion region); + void flushRequested(FlushType type, Region region); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushRequester.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushRequester.java index 75174544877..c7e155a3bbc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushRequester.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushRequester.java @@ -29,21 +29,21 @@ public interface FlushRequester { /** * Tell the listener the cache needs to be flushed. * - * @param region the HRegion requesting the cache flush + * @param region the Region requesting the cache flush * @param forceFlushAllStores whether we want to flush all stores. e.g., when request from log * rolling. */ - void requestFlush(HRegion region, boolean forceFlushAllStores); + void requestFlush(Region region, boolean forceFlushAllStores); /** * Tell the listener the cache needs to be flushed after a delay * - * @param region the HRegion requesting the cache flush + * @param region the Region requesting the cache flush * @param delay after how much time should the flush happen * @param forceFlushAllStores whether we want to flush all stores. e.g., when request from log * rolling. */ - void requestDelayedFlush(HRegion region, long delay, boolean forceFlushAllStores); + void requestDelayedFlush(Region region, long delay, boolean forceFlushAllStores); /** * Register a FlushRequestListener diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index ab0165def0e..6684309b3cc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.regionserver; import java.io.EOFException; +import java.io.FileNotFoundException; import java.io.IOException; import java.io.InterruptedIOException; import java.io.UnsupportedEncodingException; @@ -62,7 +63,6 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantReadWriteLock; -import com.google.protobuf.ByteString; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -101,6 +101,7 @@ import org.apache.hadoop.hbase.client.Increment; import org.apache.hadoop.hbase.client.IsolationLevel; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.RowMutations; import org.apache.hadoop.hbase.client.Scan; @@ -130,17 +131,26 @@ import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceCall; +import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad; +import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.protobuf.generated.WALProtos; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.FlushAction; +import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor; +import org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.EventType; +import org.apache.hadoop.hbase.protobuf.generated.WALProtos.StoreDescriptor; import org.apache.hadoop.hbase.regionserver.MultiVersionConsistencyControl.WriteEntry; +import org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope; +import org.apache.hadoop.hbase.regionserver.ScannerContext.NextState; import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext; -import org.apache.hadoop.hbase.regionserver.compactions.NoLimitCompactionThroughputController; import org.apache.hadoop.hbase.regionserver.compactions.CompactionThroughputController; +import org.apache.hadoop.hbase.regionserver.compactions.CompactionThroughputControllerFactory; +import org.apache.hadoop.hbase.regionserver.compactions.NoLimitCompactionThroughputController; import org.apache.hadoop.hbase.regionserver.wal.HLogKey; +import org.apache.hadoop.hbase.regionserver.wal.ReplayHLogKey; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; import org.apache.hadoop.hbase.regionserver.wal.WALUtil; import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; @@ -166,6 +176,8 @@ import org.apache.hadoop.hbase.wal.WALSplitter; import org.apache.hadoop.hbase.wal.WALSplitter.MutationReplay; import org.apache.hadoop.io.MultipleIOException; import org.apache.hadoop.util.StringUtils; +import org.apache.htrace.Trace; +import org.apache.htrace.TraceScope; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Optional; @@ -173,50 +185,16 @@ import com.google.common.base.Preconditions; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.google.common.io.Closeables; +import com.google.protobuf.ByteString; import com.google.protobuf.Descriptors; import com.google.protobuf.Message; import com.google.protobuf.RpcCallback; import com.google.protobuf.RpcController; import com.google.protobuf.Service; +import com.google.protobuf.TextFormat; -/** - * HRegion stores data for a certain region of a table. It stores all columns - * for each row. A given table consists of one or more HRegions. - * - *

      We maintain multiple HStores for a single HRegion. - * - *

      An Store is a set of rows with some column data; together, - * they make up all the data for the rows. - * - *

      Each HRegion has a 'startKey' and 'endKey'. - *

      The first is inclusive, the second is exclusive (except for - * the final region) The endKey of region 0 is the same as - * startKey for region 1 (if it exists). The startKey for the - * first region is null. The endKey for the final region is null. - * - *

      Locking at the HRegion level serves only one purpose: preventing the - * region from being closed (and consequently split) while other operations - * are ongoing. Each row level operation obtains both a row lock and a region - * read lock for the duration of the operation. While a scanner is being - * constructed, getScanner holds a read lock. If the scanner is successfully - * constructed, it holds a read lock until it is closed. A close takes out a - * write lock and consequently will block for ongoing operations and will block - * new operations from starting while the close is in progress. - * - *

      An HRegion is defined by its table and its key extent. - * - *

      It consists of at least one Store. The number of Stores should be - * configurable, so that data which is accessed together is stored in the same - * Store. Right now, we approximate that by building a single Store for - * each column family. (This config info will be communicated via the - * tabledesc.) - * - *

      The HTableDescriptor contains metainfo about the HRegion's table. - * regionName is a unique identifier for this HRegion. (startKey, endKey] - * defines the keyspace for this HRegion. - */ @InterfaceAudience.Private -public class HRegion implements HeapSize, PropagatingConfigurationObserver { // , Writable{ +public class HRegion implements HeapSize, PropagatingConfigurationObserver, Region { public static final Log LOG = LogFactory.getLog(HRegion.class); public static final String LOAD_CFS_ON_DEMAND_CONFIG_KEY = @@ -240,8 +218,12 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // * The max sequence id of flushed data on this region. Used doing some rough calculations on * whether time to flush or not. */ - protected volatile long maxFlushedSeqId = -1L; + private volatile long maxFlushedSeqId = HConstants.NO_SEQNUM; + /** + * Record the sequence id of last flush operation. + */ + private volatile long lastFlushOpSeqId = HConstants.NO_SEQNUM; /** * Region scoped edit sequence Id. Edits to this region are GUARANTEED to appear in the WAL * file in this sequence id's order; i.e. edit #2 will be in the WAL after edit #1. @@ -257,15 +239,12 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // private final AtomicLong sequenceId = new AtomicLong(-1L); /** - * Operation enum is used in {@link HRegion#startRegionOperation} to provide operation context for - * startRegionOperation to possibly invoke different checks before any region operations. Not all - * operations have to be defined here. It's only needed when a special check is need in - * startRegionOperation + * The sequence id of the last replayed open region event from the primary region. This is used + * to skip entries before this due to the possibility of replay edits coming out of order from + * replication. */ - public enum Operation { - ANY, GET, PUT, DELETE, SCAN, APPEND, INCREMENT, SPLIT_REGION, MERGE_REGION, BATCH_MUTATE, - REPLAY_BATCH_MUTATE, COMPACT_REGION - } + protected volatile long lastReplayedOpenRegionSeqId = -1L; + protected volatile long lastReplayedCompactionSeqId = -1L; ////////////////////////////////////////////////////////////////////////////// // Members @@ -302,19 +281,11 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // // Number of requests blocked by memstore size. private final Counter blockedRequestsCount = new Counter(); - /** - * @return the number of blocked requests count. - */ - public long getBlockedRequestsCount() { - return this.blockedRequestsCount.get(); - } - // Compaction counters final AtomicLong compactionsFinished = new AtomicLong(0L); final AtomicLong compactionNumFilesCompacted = new AtomicLong(0L); final AtomicLong compactionNumBytesCompacted = new AtomicLong(0L); - private final WAL wal; private final HRegionFileSystem fs; protected final Configuration conf; @@ -369,6 +340,9 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // // The following map is populated when opening the region Map maxSeqIdInStores = new TreeMap(Bytes.BYTES_COMPARATOR); + /** Saved state from replaying prepare flush cache */ + private PrepareFlushResult prepareFlushResult = null; + /** * Config setting for whether to allow writes when a region is in recovering or not. */ @@ -400,6 +374,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // } return minimumReadPoint; } + /* * Data structure of write state flags used coordinating flushes, * compactions and closes. @@ -451,20 +426,11 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // * be specified if the flush was successful, and the failure message should only be specified * if it didn't flush. */ - public static class FlushResult { - enum Result { - FLUSHED_NO_COMPACTION_NEEDED, - FLUSHED_COMPACTION_NEEDED, - // Special case where a flush didn't run because there's nothing in the memstores. Used when - // bulk loading to know when we can still load even if a flush didn't happen. - CANNOT_FLUSH_MEMSTORE_EMPTY, - CANNOT_FLUSH - // Be careful adding more to this enum, look at the below methods to make sure - } - + public static class FlushResultImpl implements FlushResult { final Result result; final String failureReason; final long flushSequenceId; + final boolean wroteFlushWalMarker; /** * Convenience constructor to use when the flush is successful, the failure message is set to @@ -473,8 +439,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // * @param flushSequenceId Generated sequence id that comes right after the edits in the * memstores. */ - FlushResult(Result result, long flushSequenceId) { - this(result, flushSequenceId, null); + FlushResultImpl(Result result, long flushSequenceId) { + this(result, flushSequenceId, null, false); assert result == Result.FLUSHED_NO_COMPACTION_NEEDED || result == Result .FLUSHED_COMPACTION_NEEDED; } @@ -484,8 +450,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // * @param result Expecting CANNOT_FLUSH_MEMSTORE_EMPTY or CANNOT_FLUSH. * @param failureReason Reason why we couldn't flush. */ - FlushResult(Result result, String failureReason) { - this(result, -1, failureReason); + FlushResultImpl(Result result, String failureReason, boolean wroteFlushMarker) { + this(result, -1, failureReason, wroteFlushMarker); assert result == Result.CANNOT_FLUSH_MEMSTORE_EMPTY || result == Result.CANNOT_FLUSH; } @@ -495,10 +461,12 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // * @param flushSequenceId Generated sequence id if the memstores were flushed else -1. * @param failureReason Reason why we couldn't flush, or null. */ - FlushResult(Result result, long flushSequenceId, String failureReason) { + FlushResultImpl(Result result, long flushSequenceId, String failureReason, + boolean wroteFlushMarker) { this.result = result; this.flushSequenceId = flushSequenceId; this.failureReason = failureReason; + this.wroteFlushWalMarker = wroteFlushMarker; } /** @@ -518,6 +486,63 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // public boolean isCompactionNeeded() { return result == Result.FLUSHED_COMPACTION_NEEDED; } + + @Override + public String toString() { + return new StringBuilder() + .append("flush result:").append(result).append(", ") + .append("failureReason:").append(failureReason).append(",") + .append("flush seq id").append(flushSequenceId).toString(); + } + + @Override + public Result getResult() { + return result; + } + } + + /** A result object from prepare flush cache stage */ + @VisibleForTesting + static class PrepareFlushResult { + final FlushResult result; // indicating a failure result from prepare + final TreeMap storeFlushCtxs; + final TreeMap> committedFiles; + final long startTime; + final long flushOpSeqId; + final long flushedSeqId; + final long totalFlushableSize; + + /** Constructs an early exit case */ + PrepareFlushResult(FlushResult result, long flushSeqId) { + this(result, null, null, Math.max(0, flushSeqId), 0, 0, 0); + } + + /** Constructs a successful prepare flush result */ + PrepareFlushResult( + TreeMap storeFlushCtxs, + TreeMap> committedFiles, long startTime, long flushSeqId, + long flushedSeqId, long totalFlushableSize) { + this(null, storeFlushCtxs, committedFiles, startTime, + flushSeqId, flushedSeqId, totalFlushableSize); + } + + private PrepareFlushResult( + FlushResult result, + TreeMap storeFlushCtxs, + TreeMap> committedFiles, long startTime, long flushSeqId, + long flushedSeqId, long totalFlushableSize) { + this.result = result; + this.storeFlushCtxs = storeFlushCtxs; + this.committedFiles = committedFiles; + this.startTime = startTime; + this.flushOpSeqId = flushSeqId; + this.flushedSeqId = flushedSeqId; + this.totalFlushableSize = totalFlushableSize; + } + + public FlushResult getResult() { + return this.result; + } } final WriteState writestate = new WriteState(); @@ -679,7 +704,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // this.metricsRegionWrapper = new MetricsRegionWrapperImpl(this); this.metricsRegion = new MetricsRegion(this.metricsRegionWrapper); - Map recoveringRegions = rsServices.getRecoveringRegions(); + Map recoveringRegions = rsServices.getRecoveringRegions(); String encodedName = getRegionInfo().getEncodedName(); if (recoveringRegions != null && recoveringRegions.containsKey(encodedName)) { this.isRecovering = true; @@ -751,8 +776,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // // nextSeqid will be -1 if the initialization fails. // At least it will be 0 otherwise. if (nextSeqId == -1) { - status - .abort("Exception during region " + this.getRegionNameAsString() + " initialization."); + status.abort("Exception during region " + getRegionInfo().getRegionNameAsString() + + " initialization."); } } } @@ -768,11 +793,10 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // status.setStatus("Writing region info on filesystem"); fs.checkRegionInfoOnFilesystem(); - - // Initialize all the HStores status.setStatus("Initializing all the Stores"); - long maxSeqId = initializeRegionStores(reporter, status); + long maxSeqId = initializeRegionStores(reporter, status, false); + this.lastReplayedOpenRegionSeqId = maxSeqId; this.writestate.setReadOnly(ServerRegionReplicaUtil.isReadOnly(this)); this.writestate.flushRequested = false; @@ -834,8 +858,10 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // return nextSeqid; } - private long initializeRegionStores(final CancelableProgressable reporter, MonitoredTask status) + private long initializeRegionStores(final CancelableProgressable reporter, MonitoredTask status, + boolean warmupOnly) throws IOException { + // Load in all the HStores. long maxSeqId = -1; @@ -864,7 +890,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // for (int i = 0; i < htableDescriptor.getFamilies().size(); i++) { Future future = completionService.take(); HStore store = future.get(); - this.stores.put(store.getColumnFamilyName().getBytes(), store); + this.stores.put(store.getFamily().getName(), store); long storeMaxSequenceId = store.getMaxSequenceId(); maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(), @@ -897,7 +923,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // } } } - if (ServerRegionReplicaUtil.shouldReplayRecoveredEdits(this)) { + if (ServerRegionReplicaUtil.shouldReplayRecoveredEdits(this) && !warmupOnly) { // Recover any edits if available. maxSeqId = Math.max(maxSeqId, replayRecoveredEditsIfAny( this.fs.getRegionDir(), maxSeqIdInStores, reporter, status)); @@ -907,16 +933,22 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // return maxSeqId; } + private void initializeWarmup(final CancelableProgressable reporter) throws IOException { + MonitoredTask status = TaskMonitor.get().createStatus("Initializing region " + this); + + // Initialize all the HStores + status.setStatus("Warming up all the Stores"); + initializeRegionStores(reporter, status, true); + } + private void writeRegionOpenMarker(WAL wal, long openSeqId) throws IOException { - Map> storeFiles - = new TreeMap>(Bytes.BYTES_COMPARATOR); - for (Map.Entry entry : getStores().entrySet()) { - Store store = entry.getValue(); + Map> storeFiles = new TreeMap>(Bytes.BYTES_COMPARATOR); + for (Store store: getStores()) { ArrayList storeFileNames = new ArrayList(); for (StoreFile storeFile: store.getStorefiles()) { storeFileNames.add(storeFile.getPath()); } - storeFiles.put(entry.getKey(), storeFileNames); + storeFiles.put(store.getFamily().getName(), storeFileNames); } RegionEventDescriptor regionOpenDesc = ProtobufUtil.toRegionEventDescriptor( @@ -927,15 +959,13 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // } private void writeRegionCloseMarker(WAL wal) throws IOException { - Map> storeFiles - = new TreeMap>(Bytes.BYTES_COMPARATOR); - for (Map.Entry entry : getStores().entrySet()) { - Store store = entry.getValue(); + Map> storeFiles = new TreeMap>(Bytes.BYTES_COMPARATOR); + for (Store store: getStores()) { ArrayList storeFileNames = new ArrayList(); for (StoreFile storeFile: store.getStorefiles()) { storeFileNames.add(storeFile.getPath()); } - storeFiles.put(entry.getKey(), storeFileNames); + storeFiles.put(store.getFamily().getName(), storeFileNames); } RegionEventDescriptor regionEventDesc = ProtobufUtil.toRegionEventDescriptor( @@ -963,11 +993,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // return false; } - /** - * This function will return the HDFS blocks distribution based on the data - * captured when HFile is created - * @return The HDFS blocks distribution for the region. - */ + @Override public HDFSBlocksDistribution getHDFSBlocksDistribution() { HDFSBlocksDistribution hdfsBlocksDistribution = new HDFSBlocksDistribution(); @@ -1024,10 +1050,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // return hdfsBlocksDistribution; } - public AtomicLong getMemstoreSize() { - return memstoreSize; - } - /** * Increase the size of mem store in this region and the size of global mem * store @@ -1040,7 +1062,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // return this.memstoreSize.addAndGet(memStoreSize); } - /** @return a HRegionInfo object for this region */ + @Override public HRegionInfo getRegionInfo() { return this.fs.getRegionInfo(); } @@ -1053,37 +1075,121 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // return this.rsServices; } - /** @return readRequestsCount for this region */ - long getReadRequestsCount() { - return this.readRequestsCount.get(); + @Override + public long getReadRequestsCount() { + return readRequestsCount.get(); } - /** @return writeRequestsCount for this region */ - long getWriteRequestsCount() { - return this.writeRequestsCount.get(); + @Override + public void updateReadRequestsCount(long i) { + readRequestsCount.add(i); } + @Override + public long getWriteRequestsCount() { + return writeRequestsCount.get(); + } + + @Override + public void updateWriteRequestsCount(long i) { + writeRequestsCount.add(i); + } + + @Override + public long getMemstoreSize() { + return memstoreSize.get(); + } + + @Override + public long getNumMutationsWithoutWAL() { + return numMutationsWithoutWAL.get(); + } + + @Override + public long getDataInMemoryWithoutWAL() { + return dataInMemoryWithoutWAL.get(); + } + + @Override + public long getBlockedRequestsCount() { + return blockedRequestsCount.get(); + } + + @Override + public long getCheckAndMutateChecksPassed() { + return checkAndMutateChecksPassed.get(); + } + + @Override + public long getCheckAndMutateChecksFailed() { + return checkAndMutateChecksFailed.get(); + } + + @Override public MetricsRegion getMetrics() { return metricsRegion; } - /** @return true if region is closed */ + @Override public boolean isClosed() { return this.closed.get(); } - /** - * @return True if closing process has started. - */ + @Override public boolean isClosing() { return this.closing.get(); } + @Override + public boolean isReadOnly() { + return this.writestate.isReadOnly(); + } + /** * Reset recovering state of current region */ public void setRecovering(boolean newState) { boolean wasRecovering = this.isRecovering; + // before we flip the recovering switch (enabling reads) we should write the region open + // event to WAL if needed + if (wal != null && getRegionServerServices() != null && !writestate.readOnly + && wasRecovering && !newState) { + + // force a flush only if region replication is set up for this region. Otherwise no need. + boolean forceFlush = getTableDesc().getRegionReplication() > 1; + + // force a flush first + MonitoredTask status = TaskMonitor.get().createStatus( + "Flushing region " + this + " because recovery is finished"); + try { + if (forceFlush) { + internalFlushcache(status); + } + + status.setStatus("Writing region open event marker to WAL because recovery is finished"); + try { + long seqId = openSeqNum; + // obtain a new seqId because we possibly have writes and flushes on top of openSeqNum + if (wal != null) { + seqId = getNextSequenceId(wal); + } + writeRegionOpenMarker(wal, seqId); + } catch (IOException e) { + // We cannot rethrow this exception since we are being called from the zk thread. The + // region has already opened. In this case we log the error, but continue + LOG.warn(getRegionInfo().getEncodedName() + " : was not able to write region opening " + + "event to WAL, continueing", e); + } + } catch (IOException ioe) { + // Distributed log replay semantics does not necessarily require a flush, since the replayed + // data is already written again in the WAL. So failed flush should be fine. + LOG.warn(getRegionInfo().getEncodedName() + " : was not able to flush " + + "event to WAL, continueing", ioe); + } finally { + status.cleanup(); + } + } + this.isRecovering = newState; if (wasRecovering && !isRecovering) { // Call only when wal replay is over. @@ -1091,14 +1197,12 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // } } - /** - * @return True if current region is in recovering - */ + @Override public boolean isRecovering() { return this.isRecovering; } - /** @return true if region is available (not closed and not closing) */ + @Override public boolean isAvailable() { return !isClosed() && !isClosing(); } @@ -1113,12 +1217,12 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // */ public boolean isMergeable() { if (!isAvailable()) { - LOG.debug("Region " + this.getRegionNameAsString() + LOG.debug("Region " + getRegionInfo().getRegionNameAsString() + " is not mergeable because it is closing or closed"); return false; } if (hasReferences()) { - LOG.debug("Region " + this.getRegionNameAsString() + LOG.debug("Region " + getRegionInfo().getRegionNameAsString() + " is not mergeable because it has references"); return false; } @@ -1136,9 +1240,12 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // return mvcc; } - /* - * Returns readpoint considering given IsolationLevel - */ + @Override + public long getMaxFlushedSeqId() { + return maxFlushedSeqId; + } + + @Override public long getReadpoint(IsolationLevel isolationLevel) { if (isolationLevel == IsolationLevel.READ_UNCOMMITTED) { // This scan can read even uncommitted transactions @@ -1147,6 +1254,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // return mvcc.memstoreReadPoint(); } + @Override public boolean isLoadingCfsOnDemandDefault() { return this.isLoadingCfsOnDemandDefault; } @@ -1231,9 +1339,11 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // } status.setStatus("Disabling compacts and flushes for region"); + boolean canFlush = true; synchronized (writestate) { // Disable compacting and flushing by background threads for this // region. + canFlush = !writestate.readOnly; writestate.writesEnabled = false; LOG.debug("Closing " + this + ": disabling compactions & flushes"); waitForFlushesAndCompactions(); @@ -1241,9 +1351,9 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // // If we were not just flushing, is it worth doing a preflush...one // that will clear out of the bulk of the memstore before we put up // the close flag? - if (!abort && worthPreFlushing()) { + if (!abort && worthPreFlushing() && canFlush) { status.setStatus("Pre-flushing region before close"); - LOG.info("Running close preflush of " + this.getRegionNameAsString()); + LOG.info("Running close preflush of " + getRegionInfo().getRegionNameAsString()); try { internalFlushcache(status); } catch (IOException ioe) { @@ -1264,9 +1374,9 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // } LOG.debug("Updates disabled for region " + this); // Don't flush the cache if we are aborting - if (!abort) { + if (!abort && canFlush) { int flushCount = 0; - while (this.getMemstoreSize().get() > 0) { + while (this.memstoreSize.get() > 0) { try { if (flushCount++ > 0) { int actualFlushes = flushCount - 1; @@ -1274,7 +1384,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // // If we tried 5 times and are unable to clear memory, abort // so we do not lose data throw new DroppedSnapshotException("Failed clearing memory after " + - actualFlushes + " attempts on region: " + Bytes.toStringBinary(getRegionName())); + actualFlushes + " attempts on region: " + + Bytes.toStringBinary(getRegionInfo().getRegionName())); } LOG.info("Running extra flush, " + actualFlushes + " (carrying snapshot?) " + this); @@ -1296,13 +1407,14 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // if (!stores.isEmpty()) { // initialize the thread pool for closing stores in parallel. ThreadPoolExecutor storeCloserThreadPool = - getStoreOpenAndCloseThreadPool("StoreCloserThread-" + this.getRegionNameAsString()); + getStoreOpenAndCloseThreadPool("StoreCloserThread-" + + getRegionInfo().getRegionNameAsString()); CompletionService>> completionService = new ExecutorCompletionService>>(storeCloserThreadPool); // close each store in parallel for (final Store store : stores.values()) { - assert abort || store.getFlushableSize() == 0; + assert abort || store.getFlushableSize() == 0 || writestate.readOnly; completionService .submit(new Callable>>() { @Override @@ -1338,7 +1450,11 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // } this.closed.set(true); - if (memstoreSize.get() != 0) LOG.error("Memstore size is " + memstoreSize.get()); + if (!canFlush) { + addAndGetGlobalMemstoreSize(-memstoreSize.get()); + } else if (memstoreSize.get() != 0) { + LOG.error("Memstore size is " + memstoreSize.get()); + } if (coprocessorHost != null) { status.setStatus("Running coprocessor post-close hooks"); this.coprocessorHost.postClose(abort); @@ -1357,13 +1473,14 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // } } - /** - * Wait for all current flushes and compactions of the region to complete. - *

      - * Exposed for TESTING. - */ + @Override public void waitForFlushesAndCompactions() { synchronized (writestate) { + if (this.writestate.readOnly) { + // we should not wait for replayed flushed if we are read only (for example in case the + // region is a secondary replica). + return; + } boolean interrupted = false; try { while (writestate.compacting > 0 || writestate.flushing) { @@ -1429,32 +1546,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // // HRegion accessors ////////////////////////////////////////////////////////////////////////////// - /** @return start key for region */ - public byte [] getStartKey() { - return this.getRegionInfo().getStartKey(); - } - - /** @return end key for region */ - public byte [] getEndKey() { - return this.getRegionInfo().getEndKey(); - } - - /** @return region id */ - public long getRegionId() { - return this.getRegionInfo().getRegionId(); - } - - /** @return region name */ - public byte [] getRegionName() { - return this.getRegionInfo().getRegionName(); - } - - /** @return region name as string for logging */ - public String getRegionNameAsString() { - return this.getRegionInfo().getRegionNameAsString(); - } - - /** @return HTableDescriptor for this region */ + @Override public HTableDescriptor getTableDesc() { return this.htableDescriptor; } @@ -1492,24 +1584,16 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // return this.fs; } - /** - * @return Returns the earliest time a store in the region was flushed. All - * other stores in the region would have been flushed either at, or - * after this time. - */ - @VisibleForTesting + @Override public long getEarliestFlushTimeForAllStores() { - return Collections.min(lastStoreFlushTimeMap.values()); + return lastStoreFlushTimeMap.isEmpty() ? Long.MAX_VALUE : Collections.min(lastStoreFlushTimeMap + .values()); } - /** - * This can be used to determine the last time all files of this region were major compacted. - * @param majorCompactioOnly Only consider HFile that are the result of major compaction - * @return the timestamp of the oldest HFile for all stores of this region - */ + @Override public long getOldestHfileTs(boolean majorCompactioOnly) throws IOException { long result = Long.MAX_VALUE; - for (Store store : getStores().values()) { + for (Store store : getStores()) { for (StoreFile file : store.getStorefiles()) { HFile.Reader reader = file.getReader().getHFileReader(); if (majorCompactioOnly) { @@ -1524,6 +1608,23 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // return result == Long.MAX_VALUE ? 0 : result; } + RegionLoad.Builder setCompleteSequenceId(RegionLoad.Builder regionLoadBldr) { + long lastFlushOpSeqIdLocal = this.lastFlushOpSeqId; + byte[] encodedRegionName = this.getRegionInfo().getEncodedNameAsBytes(); + regionLoadBldr.clearStoreCompleteSequenceId(); + for (byte[] familyName : this.stores.keySet()) { + long oldestUnflushedSeqId = this.wal.getEarliestMemstoreSeqNum(encodedRegionName, familyName); + // no oldestUnflushedSeqId means no data has written to the store after last flush, so we use + // lastFlushOpSeqId as complete sequence id for the store. + regionLoadBldr.addStoreCompleteSequenceId(StoreSequenceId + .newBuilder() + .setFamilyName(ByteString.copyFrom(familyName)) + .setSequenceId( + oldestUnflushedSeqId < 0 ? lastFlushOpSeqIdLocal : oldestUnflushedSeqId - 1).build()); + } + return regionLoadBldr.setCompleteSequenceId(this.maxFlushedSeqId); + } + ////////////////////////////////////////////////////////////////////////////// // HRegion maintenance. // @@ -1557,25 +1658,31 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // protected void doRegionCompactionPrep() throws IOException { } - void triggerMajorCompaction() { - for (Store h : stores.values()) { - h.triggerMajorCompaction(); + @Override + public void triggerMajorCompaction() throws IOException { + for (Store s : getStores()) { + s.triggerMajorCompaction(); } } - /** - * This is a helper function that compact all the stores synchronously - * It is used by utilities and testing - * - * @param majorCompaction True to force a major compaction regardless of thresholds - * @throws IOException e - */ - public void compactStores(final boolean majorCompaction) - throws IOException { + @Override + public void compact(final boolean majorCompaction) throws IOException { if (majorCompaction) { - this.triggerMajorCompaction(); + triggerMajorCompaction(); + } + for (Store s : getStores()) { + CompactionContext compaction = s.requestCompaction(); + if (compaction != null) { + CompactionThroughputController controller = null; + if (rsServices != null) { + controller = CompactionThroughputControllerFactory.create(rsServices, conf); + } + if (controller == null) { + controller = NoLimitCompactionThroughputController.INSTANCE; + } + compact(compaction, s, controller); + } } - compactStores(); } /** @@ -1585,7 +1692,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // * @throws IOException e */ public void compactStores() throws IOException { - for (Store s : getStores().values()) { + for (Store s : getStores()) { CompactionContext compaction = s.requestCompaction(); if (compaction != null) { compact(compaction, s, NoLimitCompactionThroughputController.INSTANCE); @@ -1594,6 +1701,22 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // } /** + * This is a helper function that compact the given store + * It is used by utilities and testing + * + * @throws IOException e + */ + @VisibleForTesting + void compactStore(byte[] family, CompactionThroughputController throughputController) + throws IOException { + Store s = getStore(family); + CompactionContext compaction = s.requestCompaction(); + if (compaction != null) { + compact(compaction, s, throughputController); + } + } + + /* * Called by compaction thread and after region is opened to compact the * HStores if necessary. * @@ -1686,16 +1809,9 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // } } - /** - * Flush all stores. - *

      - * See {@link #flushcache(boolean)}. - * - * @return whether the flush is success and whether the region needs compacting - * @throws IOException - */ - public FlushResult flushcache() throws IOException { - return flushcache(true); + @Override + public FlushResult flush(boolean force) throws IOException { + return flushcache(force, false); } /** @@ -1712,18 +1828,20 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // *

      This method may block for some time, so it should not be called from a * time-sensitive thread. * @param forceFlushAllStores whether we want to flush all stores + * @param writeFlushRequestWalMarker whether to write the flush request marker to WAL * @return whether the flush is success and whether the region needs compacting * * @throws IOException general io exceptions * @throws DroppedSnapshotException Thrown when replay of wal is required * because a Snapshot was not properly persisted. */ - public FlushResult flushcache(boolean forceFlushAllStores) throws IOException { + public FlushResult flushcache(boolean forceFlushAllStores, boolean writeFlushRequestWalMarker) + throws IOException { // fail-fast instead of waiting on the lock if (this.closing.get()) { String msg = "Skipping flush on " + this + " because closing"; LOG.debug(msg); - return new FlushResult(FlushResult.Result.CANNOT_FLUSH, msg); + return new FlushResultImpl(FlushResult.Result.CANNOT_FLUSH, msg, false); } MonitoredTask status = TaskMonitor.get().createStatus("Flushing " + this); status.setStatus("Acquiring readlock on region"); @@ -1734,12 +1852,14 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // String msg = "Skipping flush on " + this + " because closed"; LOG.debug(msg); status.abort(msg); - return new FlushResult(FlushResult.Result.CANNOT_FLUSH, msg); + return new FlushResultImpl(FlushResult.Result.CANNOT_FLUSH, msg, false); } if (coprocessorHost != null) { status.setStatus("Running coprocessor pre-flush hooks"); coprocessorHost.preFlush(); } + // TODO: this should be managed within memstore with the snapshot, updated only after flush + // successful if (numMutationsWithoutWAL.get() > 0) { numMutationsWithoutWAL.set(0); dataInMemoryWithoutWAL.set(0); @@ -1757,14 +1877,15 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // + (writestate.flushing ? "already flushing" : "writes not enabled"); status.abort(msg); - return new FlushResult(FlushResult.Result.CANNOT_FLUSH, msg); + return new FlushResultImpl(FlushResult.Result.CANNOT_FLUSH, msg, false); } } try { Collection specificStoresToFlush = forceFlushAllStores ? stores.values() : flushPolicy.selectStoresToFlush(); - FlushResult fs = internalFlushcache(specificStoresToFlush, status); + FlushResult fs = internalFlushcache(specificStoresToFlush, + status, writeFlushRequestWalMarker); if (coprocessorHost != null) { status.setStatus("Running post-flush coprocessor hooks"); @@ -1845,7 +1966,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // } //since we didn't flush in the recent past, flush now if certain conditions //are met. Return true on first such memstore hit. - for (Store s : this.getStores().values()) { + for (Store s : getStores()) { if (s.timeOfOldestEdit() < now - modifiedFlushCheckInterval) { // we have an old enough edit in the memstore, flush return true; @@ -1857,22 +1978,22 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // /** * Flushing all stores. * - * @see #internalFlushcache(Collection, MonitoredTask) + * @see #internalFlushcache(Collection, MonitoredTask, boolean) */ private FlushResult internalFlushcache(MonitoredTask status) throws IOException { - return internalFlushcache(stores.values(), status); + return internalFlushcache(stores.values(), status, false); } /** * Flushing given stores. * - * @see #internalFlushcache(WAL, long, Collection, MonitoredTask) + * @see #internalFlushcache(WAL, long, Collection, MonitoredTask, boolean) */ private FlushResult internalFlushcache(final Collection storesToFlush, - MonitoredTask status) throws IOException { + MonitoredTask status, boolean writeFlushWalMarker) throws IOException { return internalFlushcache(this.wal, HConstants.NO_SEQNUM, storesToFlush, - status); + status, writeFlushWalMarker); } /** @@ -1904,7 +2025,22 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // * properly persisted. */ protected FlushResult internalFlushcache(final WAL wal, final long myseqid, - final Collection storesToFlush, MonitoredTask status) throws IOException { + final Collection storesToFlush, MonitoredTask status, boolean writeFlushWalMarker) + throws IOException { + PrepareFlushResult result + = internalPrepareFlushCache(wal, myseqid, storesToFlush, status, writeFlushWalMarker); + if (result.result == null) { + return internalFlushCacheAndCommit(wal, status, result, storesToFlush); + } else { + return result.result; // early exit due to failure from prepare stage + } + } + + protected PrepareFlushResult internalPrepareFlushCache( + final WAL wal, final long myseqid, final Collection storesToFlush, + MonitoredTask status, boolean writeFlushWalMarker) + throws IOException { + if (this.rsServices != null && this.rsServices.isAborted()) { // Don't flush when server aborting, it's unsafe throw new IOException("Aborting flush because server is aborted..."); @@ -1926,16 +2062,19 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // // wal can be null replaying edits. if (wal != null) { w = mvcc.beginMemstoreInsert(); - long flushSeqId = getNextSequenceId(wal); - FlushResult flushResult = new FlushResult( - FlushResult.Result.CANNOT_FLUSH_MEMSTORE_EMPTY, flushSeqId, "Nothing to flush"); - w.setWriteNumber(flushSeqId); + long flushOpSeqId = getNextSequenceId(wal); + FlushResult flushResult = new FlushResultImpl( + FlushResult.Result.CANNOT_FLUSH_MEMSTORE_EMPTY, flushOpSeqId, "Nothing to flush", + writeFlushRequestMarkerToWAL(wal, writeFlushWalMarker)); + w.setWriteNumber(flushOpSeqId); mvcc.waitForPreviousTransactionsComplete(w); w = null; - return flushResult; + return new PrepareFlushResult(flushResult, myseqid); } else { - return new FlushResult(FlushResult.Result.CANNOT_FLUSH_MEMSTORE_EMPTY, - "Nothing to flush"); + return new PrepareFlushResult( + new FlushResultImpl(FlushResult.Result.CANNOT_FLUSH_MEMSTORE_EMPTY, + "Nothing to flush", false), + myseqid); } } } finally { @@ -1979,7 +2118,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // flushedFamilyNames.add(store.getFamily().getName()); } - List storeFlushCtxs = new ArrayList(stores.size()); + TreeMap storeFlushCtxs + = new TreeMap(Bytes.BYTES_COMPARATOR); TreeMap> committedFiles = new TreeMap>( Bytes.BYTES_COMPARATOR); // The sequence id of this flush operation which is used to log FlushMarker and pass to @@ -2000,7 +2140,9 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // String msg = "Flush will not be started for [" + this.getRegionInfo().getEncodedName() + "] - because the WAL is closing."; status.setStatus(msg); - return new FlushResult(FlushResult.Result.CANNOT_FLUSH, msg); + return new PrepareFlushResult( + new FlushResultImpl(FlushResult.Result.CANNOT_FLUSH, msg, false), + myseqid); } flushOpSeqId = getNextSequenceId(wal); long oldestUnflushedSeqId = wal.getEarliestMemstoreSeqNum(encodedRegionName); @@ -2015,12 +2157,12 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // for (Store s : storesToFlush) { totalFlushableSizeOfFlushableStores += s.getFlushableSize(); - storeFlushCtxs.add(s.createFlushContext(flushOpSeqId)); + storeFlushCtxs.put(s.getFamily().getName(), s.createFlushContext(flushOpSeqId)); committedFiles.put(s.getFamily().getName(), null); // for writing stores to WAL } // write the snapshot start to WAL - if (wal != null) { + if (wal != null && !writestate.readOnly) { FlushDescriptor desc = ProtobufUtil.toFlushDescriptor(FlushAction.START_FLUSH, getRegionInfo(), flushOpSeqId, committedFiles); // no sync. Sync is below where we do not hold the updates lock @@ -2029,7 +2171,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // } // Prepare flush (take a snapshot) - for (StoreFlushContext flush : storeFlushCtxs) { + for (StoreFlushContext flush : storeFlushCtxs.values()) { flush.prepare(); } } catch (IOException ex) { @@ -2077,15 +2219,54 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // mvcc.waitForPreviousTransactionsComplete(w); // set w to null to prevent mvcc.advanceMemstore from being called again inside finally block w = null; - s = "Flushing stores of " + this; - status.setStatus(s); - if (LOG.isTraceEnabled()) LOG.trace(s); } finally { if (w != null) { // in case of failure just mark current w as complete mvcc.advanceMemstore(w); } } + return new PrepareFlushResult(storeFlushCtxs, committedFiles, startTime, flushOpSeqId, + flushedSeqId, totalFlushableSizeOfFlushableStores); + } + + /** + * Writes a marker to WAL indicating a flush is requested but cannot be complete due to various + * reasons. Ignores exceptions from WAL. Returns whether the write succeeded. + * @param wal + * @return whether WAL write was successful + */ + private boolean writeFlushRequestMarkerToWAL(WAL wal, boolean writeFlushWalMarker) { + if (writeFlushWalMarker && wal != null && !writestate.readOnly) { + FlushDescriptor desc = ProtobufUtil.toFlushDescriptor(FlushAction.CANNOT_FLUSH, + getRegionInfo(), -1, new TreeMap>()); + try { + WALUtil.writeFlushMarker(wal, this.htableDescriptor, getRegionInfo(), + desc, sequenceId, true); + return true; + } catch (IOException e) { + LOG.warn(getRegionInfo().getEncodedName() + " : " + + "Received exception while trying to write the flush request to wal", e); + } + } + return false; + } + + protected FlushResult internalFlushCacheAndCommit( + final WAL wal, MonitoredTask status, final PrepareFlushResult prepareResult, + final Collection storesToFlush) + throws IOException { + + // prepare flush context is carried via PrepareFlushResult + TreeMap storeFlushCtxs = prepareResult.storeFlushCtxs; + TreeMap> committedFiles = prepareResult.committedFiles; + long startTime = prepareResult.startTime; + long flushOpSeqId = prepareResult.flushOpSeqId; + long flushedSeqId = prepareResult.flushedSeqId; + long totalFlushableSizeOfFlushableStores = prepareResult.totalFlushableSize; + + String s = "Flushing stores of " + this; + status.setStatus(s); + if (LOG.isTraceEnabled()) LOG.trace(s); // Any failure from here on out will be catastrophic requiring server // restart so wal content can be replayed and put back into the memstore. @@ -2098,7 +2279,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // // just-made new flush store file. The new flushed file is still in the // tmp directory. - for (StoreFlushContext flush : storeFlushCtxs) { + for (StoreFlushContext flush : storeFlushCtxs.values()) { flush.flushCache(status); } @@ -2106,7 +2287,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // // all the store scanners to reset/reseek). Iterator it = storesToFlush.iterator(); // stores.values() and storeFlushCtxs have same order - for (StoreFlushContext flush : storeFlushCtxs) { + for (StoreFlushContext flush : storeFlushCtxs.values()) { boolean needsCompaction = flush.commit(status); if (needsCompaction) { compactionRequested = true; @@ -2139,14 +2320,15 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // WALUtil.writeFlushMarker(wal, this.htableDescriptor, getRegionInfo(), desc, sequenceId, false); } catch (Throwable ex) { - LOG.warn("Received unexpected exception trying to write ABORT_FLUSH marker to WAL:" + - StringUtils.stringifyException(ex)); + LOG.warn(getRegionInfo().getEncodedName() + " : " + + "Received unexpected exception trying to write ABORT_FLUSH marker to WAL:" + + StringUtils.stringifyException(ex)); // ignore this since we will be aborting the RS with DSE. } wal.abortCacheFlush(this.getRegionInfo().getEncodedNameAsBytes()); } DroppedSnapshotException dse = new DroppedSnapshotException("region: " + - Bytes.toStringBinary(getRegionName())); + Bytes.toStringBinary(getRegionInfo().getRegionName())); dse.initCause(t); status.abort("Flush failed: " + StringUtils.stringifyException(t)); throw dse; @@ -2165,6 +2347,9 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // // Update the oldest unflushed sequence id for region. this.maxFlushedSeqId = flushedSeqId; + // Record flush operation sequence id. + this.lastFlushOpSeqId = flushOpSeqId; + // C. Finally notify anyone waiting on memstore to clear: // e.g. checkResources(). synchronized (this) { @@ -2183,8 +2368,10 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // LOG.info(msg); status.setStatus(msg); - return new FlushResult(compactionRequested ? FlushResult.Result.FLUSHED_COMPACTION_NEEDED : - FlushResult.Result.FLUSHED_NO_COMPACTION_NEEDED, flushOpSeqId); + return new FlushResultImpl(compactionRequested ? + FlushResult.Result.FLUSHED_COMPACTION_NEEDED : + FlushResult.Result.FLUSHED_NO_COMPACTION_NEEDED, + flushOpSeqId); } /** @@ -2192,7 +2379,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // * @return Next sequence number unassociated with any actual edit. * @throws IOException */ - private long getNextSequenceId(final WAL wal) throws IOException { + @VisibleForTesting + protected long getNextSequenceId(final WAL wal) throws IOException { WALKey key = this.appendEmptyEdit(wal, null); return key.getSequenceId(); } @@ -2200,32 +2388,9 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // ////////////////////////////////////////////////////////////////////////////// // get() methods for client use. ////////////////////////////////////////////////////////////////////////////// - /** - * Return all the data for the row that matches row exactly, - * or the one that immediately preceeds it, at or immediately before - * ts. - * - * @param row row key - * @return map of values - * @throws IOException - */ - Result getClosestRowBefore(final byte [] row) - throws IOException{ - return getClosestRowBefore(row, HConstants.CATALOG_FAMILY); - } - /** - * Return all the data for the row that matches row exactly, - * or the one that immediately precedes it, at or immediately before - * ts. - * - * @param row row key - * @param family column family to find on - * @return map of values - * @throws IOException read exceptions - */ - public Result getClosestRowBefore(final byte [] row, final byte [] family) - throws IOException { + @Override + public Result getClosestRowBefore(final byte [] row, final byte [] family) throws IOException { if (coprocessorHost != null) { Result result = new Result(); if (coprocessorHost.preGetClosestRowBefore(row, family, result)) { @@ -2256,37 +2421,23 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // } } - /** - * Return an iterator that scans over the HRegion, returning the indicated - * columns and rows specified by the {@link Scan}. - *

      - * This Iterator must be closed by the caller. - * - * @param scan configured {@link Scan} - * @return RegionScanner - * @throws IOException read exceptions - */ + @Override public RegionScanner getScanner(Scan scan) throws IOException { return getScanner(scan, null); } - void prepareScanner(Scan scan) { - if(!scan.hasFamilies()) { - // Adding all families to scanner - for(byte[] family: this.htableDescriptor.getFamiliesKeys()){ - scan.addFamily(family); - } - } - } - protected RegionScanner getScanner(Scan scan, List additionalScanners) throws IOException { startRegionOperation(Operation.SCAN); try { // Verify families are all valid - prepareScanner(scan); - if(scan.hasFamilies()) { - for(byte [] family : scan.getFamilyMap().keySet()) { + if (!scan.hasFamilies()) { + // Adding all families to scanner + for (byte[] family: this.htableDescriptor.getFamiliesKeys()) { + scan.addFamily(family); + } + } else { + for (byte [] family : scan.getFamilyMap().keySet()) { checkFamily(family); } } @@ -2307,10 +2458,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // return new RegionScannerImpl(scan, additionalScanners, this); } - /* - * @param delete The passed delete is modified by this method. WARNING! - */ - void prepareDelete(Delete delete) throws IOException { + @Override + public void prepareDelete(Delete delete) throws IOException { // Check to see if this is a deleteRow insert if(delete.getFamilyCellMap().isEmpty()){ for(byte [] family : this.htableDescriptor.getFamiliesKeys()){ @@ -2327,15 +2476,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // } } - ////////////////////////////////////////////////////////////////////////////// - // set() methods for client use. - ////////////////////////////////////////////////////////////////////////////// - /** - * @param delete delete object - * @throws IOException read exceptions - */ - public void delete(Delete delete) - throws IOException { + @Override + public void delete(Delete delete) throws IOException { checkReadOnly(); checkResources(); startRegionOperation(Operation.DELETE); @@ -2352,6 +2494,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // * Row needed by below method. */ private static final byte [] FOR_UNIT_TESTS_ONLY = Bytes.toBytes("ForUnitTestsOnly"); + /** * This is used only by unit tests. Not required to be a public API. * @param familyMap map of family to edits for the given family. @@ -2365,15 +2508,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // doBatchMutate(delete); } - /** - * Setup correct timestamps in the KVs in Delete object. - * Caller should have the row and region locks. - * @param mutation - * @param familyMap - * @param byteNow - * @throws IOException - */ - void prepareDeleteTimestamps(Mutation mutation, Map> familyMap, + @Override + public void prepareDeleteTimestamps(Mutation mutation, Map> familyMap, byte[] byteNow) throws IOException { for (Map.Entry> e : familyMap.entrySet()) { @@ -2433,11 +2569,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // CellUtil.setTimestamp(cell, getCell.getTimestamp()); } - /** - * @throws IOException - */ - public void put(Put put) - throws IOException { + @Override + public void put(Put put) throws IOException { checkReadOnly(); // Do a rough check that we have resources to accept a write. The check is @@ -2564,16 +2697,9 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // } } - /** - * Perform a batch of mutations. - * It supports only Put and Delete mutations and will ignore other types passed. - * @param mutations the list of mutations - * @return an array of OperationStatus which internally contains the - * OperationStatusCode and the exceptionMessage if any. - * @throws IOException - */ - public OperationStatus[] batchMutate( - Mutation[] mutations, long nonceGroup, long nonce) throws IOException { + @Override + public OperationStatus[] batchMutate(Mutation[] mutations, long nonceGroup, long nonce) + throws IOException { // As it stands, this is used for 3 things // * batchMutate with single mutation - put/delete, separate or from checkAndMutate. // * coprocessor calls (see ex. BulkDeleteEndpoint). @@ -2585,16 +2711,28 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // return batchMutate(mutations, HConstants.NO_NONCE, HConstants.NO_NONCE); } - /** - * Replay a batch of mutations. - * @param mutations mutations to replay. - * @param replaySeqId SeqId for current mutations - * @return an array of OperationStatus which internally contains the - * OperationStatusCode and the exceptionMessage if any. - * @throws IOException - */ + @Override public OperationStatus[] batchReplay(MutationReplay[] mutations, long replaySeqId) throws IOException { + if (!RegionReplicaUtil.isDefaultReplica(getRegionInfo()) + && replaySeqId < lastReplayedOpenRegionSeqId) { + // if it is a secondary replica we should ignore these entries silently + // since they are coming out of order + if (LOG.isTraceEnabled()) { + LOG.trace(getRegionInfo().getEncodedName() + " : " + + "Skipping " + mutations.length + " mutations with replaySeqId=" + replaySeqId + + " which is < than lastReplayedOpenRegionSeqId=" + lastReplayedOpenRegionSeqId); + for (MutationReplay mut : mutations) { + LOG.trace(getRegionInfo().getEncodedName() + " : Skipping : " + mut.mutation); + } + } + + OperationStatus[] statuses = new OperationStatus[mutations.length]; + for (int i = 0; i < statuses.length; i++) { + statuses[i] = OperationStatus.SUCCESS; + } + return statuses; + } return batchMutate(new ReplayBatch(mutations, replaySeqId)); } @@ -2899,7 +3037,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // } // txid should always increase, so having the one from the last call is ok. // we use HLogKey here instead of WALKey directly to support legacy coprocessors. - walKey = new HLogKey(this.getRegionInfo().getEncodedNameAsBytes(), + walKey = new ReplayHLogKey(this.getRegionInfo().getEncodedNameAsBytes(), this.htableDescriptor.getTableName(), now, m.getClusterIds(), currentNonceGroup, currentNonce); txid = this.wal.append(this.htableDescriptor, this.getRegionInfo(), walKey, @@ -2925,14 +3063,29 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // // STEP 5. Append the final edit to WAL. Do not sync wal. // ------------------------- Mutation mutation = batchOp.getMutation(firstIndex); + if (isInReplay) { + // use wal key from the original + walKey = new ReplayHLogKey(this.getRegionInfo().getEncodedNameAsBytes(), + this.htableDescriptor.getTableName(), WALKey.NO_SEQUENCE_ID, now, + mutation.getClusterIds(), currentNonceGroup, currentNonce); + long replaySeqId = batchOp.getReplaySequenceId(); + walKey.setOrigLogSeqNum(replaySeqId); + + // ensure that the sequence id of the region is at least as big as orig log seq id + while (true) { + long seqId = getSequenceId().get(); + if (seqId >= replaySeqId) break; + if (getSequenceId().compareAndSet(seqId, replaySeqId)) break; + } + } if (walEdit.size() > 0) { + if (!isInReplay) { // we use HLogKey here instead of WALKey directly to support legacy coprocessors. walKey = new HLogKey(this.getRegionInfo().getEncodedNameAsBytes(), this.htableDescriptor.getTableName(), WALKey.NO_SEQUENCE_ID, now, mutation.getClusterIds(), currentNonceGroup, currentNonce); - if(isInReplay) { - walKey.setOrigLogSeqNum(batchOp.getReplaySequenceId()); } + txid = this.wal.append(this.htableDescriptor, this.getRegionInfo(), walKey, walEdit, getSequenceId(), true, memstoreCells); } @@ -3062,11 +3215,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // //the getting of the lock happens before, so that you would just pass it into //the methods. So in the case of checkAndMutate you could just do lockRow, //get, put, unlockRow or something - /** - * - * @throws IOException - * @return true if the new put was executed, false otherwise - */ + + @Override public boolean checkAndMutate(byte [] row, byte [] family, byte [] qualifier, CompareOp compareOp, ByteArrayComparable comparator, Mutation w, boolean writeToWAL) @@ -3167,15 +3317,11 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // //the getting of the lock happens before, so that you would just pass it into //the methods. So in the case of checkAndMutate you could just do lockRow, //get, put, unlockRow or something - /** - * - * @throws IOException - * @return true if the new put was executed, false otherwise - */ + + @Override public boolean checkAndRowMutate(byte [] row, byte [] family, byte [] qualifier, CompareOp compareOp, ByteArrayComparable comparator, RowMutations rm, - boolean writeToWAL) - throws IOException{ + boolean writeToWAL) throws IOException { checkReadOnly(); //TODO, add check for value length or maybe even better move this to the //client if this becomes a global setting @@ -3246,10 +3392,10 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // closeRegionOperation(); } } + private void doBatchMutate(Mutation mutation) throws IOException { // Currently this is only called for puts and deletes, so no nonces. - OperationStatus[] batchMutate = this.batchMutate(new Mutation[] { mutation }, - HConstants.NO_NONCE, HConstants.NO_NONCE); + OperationStatus[] batchMutate = this.batchMutate(new Mutation[] { mutation }); if (batchMutate[0].getOperationStatusCode().equals(OperationStatusCode.SANITY_CHECK_FAILURE)) { throw new FailedSanityCheckException(batchMutate[0].getExceptionMsg()); } else if (batchMutate[0].getOperationStatusCode().equals(OperationStatusCode.BAD_FAMILY)) { @@ -3281,13 +3427,13 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // // The regionserver holding the first region of the table is responsible for taking the // manifest of the mob dir. - if (!Bytes.equals(getStartKey(), HConstants.EMPTY_START_ROW)) + if (!Bytes.equals(getRegionInfo().getStartKey(), HConstants.EMPTY_START_ROW)) return; // if any cf's have is mob enabled, add the "mob region" to the manifest. - Map stores = getStores(); - for (Entry store : stores.entrySet()) { - boolean hasMobStore = store.getValue().getFamily().isMobEnabled(); + List stores = getStores(); + for (Store store : stores) { + boolean hasMobStore = store.getFamily().isMobEnabled(); if (hasMobStore) { // use the .mob as the start key and 0 as the regionid HRegionInfo mobRegionInfo = MobUtils.getMobRegionInfo(this.getTableDesc().getTableName()); @@ -3298,12 +3444,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // } } - /** - * Replaces any KV timestamps set to {@link HConstants#LATEST_TIMESTAMP} with the - * provided current timestamp. - * @throws IOException - */ - void updateCellTimestamps(final Iterable> cellItr, final byte[] now) + @Override + public void updateCellTimestamps(final Iterable> cellItr, final byte[] now) throws IOException { for (List cells: cellItr) { if (cells == null) continue; @@ -3395,18 +3537,22 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // * @throws IOException Throws exception if region is in read-only mode. */ protected void checkReadOnly() throws IOException { - if (this.writestate.isReadOnly()) { - throw new IOException("region is read only"); + if (isReadOnly()) { + throw new DoNotRetryIOException("region is read only"); } } protected void checkReadsEnabled() throws IOException { if (!this.writestate.readsEnabled) { - throw new IOException ("The region's reads are disabled. Cannot serve the request"); + throw new IOException(getRegionInfo().getEncodedName() + + ": The region's reads are disabled. Cannot serve the request"); } } public void setReadsEnabled(boolean readsEnabled) { + if (readsEnabled && !this.writestate.readsEnabled) { + LOG.info(getRegionInfo().getEncodedName() + " : Enabling reads for region."); + } this.writestate.setReadsEnabled(readsEnabled); } @@ -3482,12 +3628,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // LOG.debug("rollbackMemstore rolled back " + kvsRolledback); } - /** - * Check the collection of families for validity. - * @throws NoSuchColumnFamilyException if a family does not exist. - */ - void checkFamilies(Collection families) - throws NoSuchColumnFamilyException { + @Override + public void checkFamilies(Collection families) throws NoSuchColumnFamilyException { for (byte[] family : families) { checkFamily(family); } @@ -3517,8 +3659,9 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // } } - void checkTimestamps(final Map> familyMap, - long now) throws FailedSanityCheckException { + @Override + public void checkTimestamps(final Map> familyMap, long now) + throws FailedSanityCheckException { if (timestampSlop == HConstants.LATEST_TIMESTAMP) { return; } @@ -3684,11 +3827,11 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // // The edits size added into rsAccounting during this replaying will not // be required any more. So just clear it. if (this.rsAccounting != null) { - this.rsAccounting.clearRegionReplayEditsSize(this.getRegionName()); + this.rsAccounting.clearRegionReplayEditsSize(getRegionInfo().getRegionName()); } if (seqid > minSeqIdForTheRegion) { // Then we added some edits to memory. Flush and cleanup split edit files. - internalFlushcache(null, seqid, stores.values(), status); + internalFlushcache(null, seqid, stores.values(), status, false); } // Now delete the content of recovered edits. We're done w/ them. if (files.size() > 0 && this.conf.getBoolean("hbase.region.archive.recovered.edits", false)) { @@ -3790,7 +3933,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // if (currentEditSeqId > key.getLogSeqNum()) { // when this condition is true, it means we have a serious defect because we need to // maintain increasing SeqId for WAL edits per region - LOG.error("Found decreasing SeqId. PreId=" + currentEditSeqId + " key=" + key + LOG.error(getRegionInfo().getEncodedName() + " : " + + "Found decreasing SeqId. PreId=" + currentEditSeqId + " key=" + key + "; edit=" + val); } else { currentEditSeqId = key.getLogSeqNum(); @@ -3823,7 +3967,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // CompactionDescriptor compaction = WALEdit.getCompaction(cell); if (compaction != null) { //replay the compaction - completeCompactionMarker(compaction); + replayWALCompactionMarker(compaction, false, true, Long.MAX_VALUE); } skippedEdits++; continue; @@ -3854,7 +3998,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // editsCount++; } if (flush) { - internalFlushcache(null, currentEditSeqId, stores.values(), status); + internalFlushcache(null, currentEditSeqId, stores.values(), status, false); } if (coprocessorHost != null) { @@ -3906,15 +4050,780 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // * that was not finished. We could find one recovering a WAL after a regionserver crash. * See HBASE-2331. */ - void completeCompactionMarker(CompactionDescriptor compaction) + void replayWALCompactionMarker(CompactionDescriptor compaction, boolean pickCompactionFiles, + boolean removeFiles, long replaySeqId) throws IOException { - Store store = this.getStore(compaction.getFamilyName().toByteArray()); - if (store == null) { - LOG.warn("Found Compaction WAL edit for deleted family:" + - Bytes.toString(compaction.getFamilyName().toByteArray())); + checkTargetRegion(compaction.getEncodedRegionName().toByteArray(), + "Compaction marker from WAL ", compaction); + + synchronized (writestate) { + if (replaySeqId < lastReplayedOpenRegionSeqId) { + LOG.warn(getRegionInfo().getEncodedName() + " : " + + "Skipping replaying compaction event :" + TextFormat.shortDebugString(compaction) + + " because its sequence id " + replaySeqId + " is smaller than this regions " + + "lastReplayedOpenRegionSeqId of " + lastReplayedOpenRegionSeqId); + return; + } + if (replaySeqId < lastReplayedCompactionSeqId) { + LOG.warn(getRegionInfo().getEncodedName() + " : " + + "Skipping replaying compaction event :" + TextFormat.shortDebugString(compaction) + + " because its sequence id " + replaySeqId + " is smaller than this regions " + + "lastReplayedCompactionSeqId of " + lastReplayedCompactionSeqId); + return; + } else { + lastReplayedCompactionSeqId = replaySeqId; + } + + if (LOG.isDebugEnabled()) { + LOG.debug(getRegionInfo().getEncodedName() + " : " + + "Replaying compaction marker " + TextFormat.shortDebugString(compaction) + + " with seqId=" + replaySeqId + " and lastReplayedOpenRegionSeqId=" + + lastReplayedOpenRegionSeqId); + } + + startRegionOperation(Operation.REPLAY_EVENT); + try { + Store store = this.getStore(compaction.getFamilyName().toByteArray()); + if (store == null) { + LOG.warn(getRegionInfo().getEncodedName() + " : " + + "Found Compaction WAL edit for deleted family:" + + Bytes.toString(compaction.getFamilyName().toByteArray())); + return; + } + store.replayCompactionMarker(compaction, pickCompactionFiles, removeFiles); + logRegionFiles(); + } finally { + closeRegionOperation(Operation.REPLAY_EVENT); + } + } + } + + void replayWALFlushMarker(FlushDescriptor flush, long replaySeqId) throws IOException { + checkTargetRegion(flush.getEncodedRegionName().toByteArray(), + "Flush marker from WAL ", flush); + + if (ServerRegionReplicaUtil.isDefaultReplica(this.getRegionInfo())) { + return; // if primary nothing to do + } + + if (LOG.isDebugEnabled()) { + LOG.debug(getRegionInfo().getEncodedName() + " : " + + "Replaying flush marker " + TextFormat.shortDebugString(flush)); + } + + startRegionOperation(Operation.REPLAY_EVENT); // use region close lock to guard against close + try { + FlushAction action = flush.getAction(); + switch (action) { + case START_FLUSH: + replayWALFlushStartMarker(flush); + break; + case COMMIT_FLUSH: + replayWALFlushCommitMarker(flush); + break; + case ABORT_FLUSH: + replayWALFlushAbortMarker(flush); + break; + case CANNOT_FLUSH: + replayWALFlushCannotFlushMarker(flush, replaySeqId); + break; + default: + LOG.warn(getRegionInfo().getEncodedName() + " : " + + "Received a flush event with unknown action, ignoring. " + + TextFormat.shortDebugString(flush)); + break; + } + + logRegionFiles(); + } finally { + closeRegionOperation(Operation.REPLAY_EVENT); + } + } + + /** Replay the flush marker from primary region by creating a corresponding snapshot of + * the store memstores, only if the memstores do not have a higher seqId from an earlier wal + * edit (because the events may be coming out of order). + */ + @VisibleForTesting + PrepareFlushResult replayWALFlushStartMarker(FlushDescriptor flush) throws IOException { + long flushSeqId = flush.getFlushSequenceNumber(); + + HashSet storesToFlush = new HashSet(); + for (StoreFlushDescriptor storeFlush : flush.getStoreFlushesList()) { + byte[] family = storeFlush.getFamilyName().toByteArray(); + Store store = getStore(family); + if (store == null) { + LOG.warn(getRegionInfo().getEncodedName() + " : " + + "Received a flush start marker from primary, but the family is not found. Ignoring" + + " StoreFlushDescriptor:" + TextFormat.shortDebugString(storeFlush)); + continue; + } + storesToFlush.add(store); + } + + MonitoredTask status = TaskMonitor.get().createStatus("Preparing flush " + this); + + // we will use writestate as a coarse-grain lock for all the replay events + // (flush, compaction, region open etc) + synchronized (writestate) { + try { + if (flush.getFlushSequenceNumber() < lastReplayedOpenRegionSeqId) { + LOG.warn(getRegionInfo().getEncodedName() + " : " + + "Skipping replaying flush event :" + TextFormat.shortDebugString(flush) + + " because its sequence id is smaller than this regions lastReplayedOpenRegionSeqId " + + " of " + lastReplayedOpenRegionSeqId); + return null; + } + if (numMutationsWithoutWAL.get() > 0) { + numMutationsWithoutWAL.set(0); + dataInMemoryWithoutWAL.set(0); + } + + if (!writestate.flushing) { + // we do not have an active snapshot and corresponding this.prepareResult. This means + // we can just snapshot our memstores and continue as normal. + + // invoke prepareFlushCache. Send null as wal since we do not want the flush events in wal + PrepareFlushResult prepareResult = internalPrepareFlushCache(null, + flushSeqId, storesToFlush, status, false); + if (prepareResult.result == null) { + // save the PrepareFlushResult so that we can use it later from commit flush + this.writestate.flushing = true; + this.prepareFlushResult = prepareResult; + status.markComplete("Flush prepare successful"); + if (LOG.isDebugEnabled()) { + LOG.debug(getRegionInfo().getEncodedName() + " : " + + " Prepared flush with seqId:" + flush.getFlushSequenceNumber()); + } + } else { + // special case empty memstore. We will still save the flush result in this case, since + // our memstore ie empty, but the primary is still flushing + if (prepareResult.getResult().getResult() == + FlushResult.Result.CANNOT_FLUSH_MEMSTORE_EMPTY) { + this.writestate.flushing = true; + this.prepareFlushResult = prepareResult; + if (LOG.isDebugEnabled()) { + LOG.debug(getRegionInfo().getEncodedName() + " : " + + " Prepared empty flush with seqId:" + flush.getFlushSequenceNumber()); + } + } + status.abort("Flush prepare failed with " + prepareResult.result); + // nothing much to do. prepare flush failed because of some reason. + } + return prepareResult; + } else { + // we already have an active snapshot. + if (flush.getFlushSequenceNumber() == this.prepareFlushResult.flushOpSeqId) { + // They define the same flush. Log and continue. + LOG.warn(getRegionInfo().getEncodedName() + " : " + + "Received a flush prepare marker with the same seqId: " + + + flush.getFlushSequenceNumber() + " before clearing the previous one with seqId: " + + prepareFlushResult.flushOpSeqId + ". Ignoring"); + // ignore + } else if (flush.getFlushSequenceNumber() < this.prepareFlushResult.flushOpSeqId) { + // We received a flush with a smaller seqNum than what we have prepared. We can only + // ignore this prepare flush request. + LOG.warn(getRegionInfo().getEncodedName() + " : " + + "Received a flush prepare marker with a smaller seqId: " + + + flush.getFlushSequenceNumber() + " before clearing the previous one with seqId: " + + prepareFlushResult.flushOpSeqId + ". Ignoring"); + // ignore + } else { + // We received a flush with a larger seqNum than what we have prepared + LOG.warn(getRegionInfo().getEncodedName() + " : " + + "Received a flush prepare marker with a larger seqId: " + + + flush.getFlushSequenceNumber() + " before clearing the previous one with seqId: " + + prepareFlushResult.flushOpSeqId + ". Ignoring"); + // We do not have multiple active snapshots in the memstore or a way to merge current + // memstore snapshot with the contents and resnapshot for now. We cannot take + // another snapshot and drop the previous one because that will cause temporary + // data loss in the secondary. So we ignore this for now, deferring the resolution + // to happen when we see the corresponding flush commit marker. If we have a memstore + // snapshot with x, and later received another prepare snapshot with y (where x < y), + // when we see flush commit for y, we will drop snapshot for x, and can also drop all + // the memstore edits if everything in memstore is < y. This is the usual case for + // RS crash + recovery where we might see consequtive prepare flush wal markers. + // Otherwise, this will cause more memory to be used in secondary replica until a + // further prapare + commit flush is seen and replayed. + } + } + } finally { + status.cleanup(); + writestate.notifyAll(); + } + } + return null; + } + + @VisibleForTesting + void replayWALFlushCommitMarker(FlushDescriptor flush) throws IOException { + MonitoredTask status = TaskMonitor.get().createStatus("Committing flush " + this); + + // check whether we have the memstore snapshot with the corresponding seqId. Replay to + // secondary region replicas are in order, except for when the region moves or then the + // region server crashes. In those cases, we may receive replay requests out of order from + // the original seqIds. + synchronized (writestate) { + try { + if (flush.getFlushSequenceNumber() < lastReplayedOpenRegionSeqId) { + LOG.warn(getRegionInfo().getEncodedName() + " : " + + "Skipping replaying flush event :" + TextFormat.shortDebugString(flush) + + " because its sequence id is smaller than this regions lastReplayedOpenRegionSeqId " + + " of " + lastReplayedOpenRegionSeqId); + return; + } + + if (writestate.flushing) { + PrepareFlushResult prepareFlushResult = this.prepareFlushResult; + if (flush.getFlushSequenceNumber() == prepareFlushResult.flushOpSeqId) { + if (LOG.isDebugEnabled()) { + LOG.debug(getRegionInfo().getEncodedName() + " : " + + "Received a flush commit marker with seqId:" + flush.getFlushSequenceNumber() + + " and a previous prepared snapshot was found"); + } + // This is the regular case where we received commit flush after prepare flush + // corresponding to the same seqId. + replayFlushInStores(flush, prepareFlushResult, true); + + // Set down the memstore size by amount of flush. + this.addAndGetGlobalMemstoreSize(-prepareFlushResult.totalFlushableSize); + + this.prepareFlushResult = null; + writestate.flushing = false; + } else if (flush.getFlushSequenceNumber() < prepareFlushResult.flushOpSeqId) { + // This should not happen normally. However, lets be safe and guard against these cases + // we received a flush commit with a smaller seqId than what we have prepared + // we will pick the flush file up from this commit (if we have not seen it), but we + // will not drop the memstore + LOG.warn(getRegionInfo().getEncodedName() + " : " + + "Received a flush commit marker with smaller seqId: " + + flush.getFlushSequenceNumber() + " than what we have prepared with seqId: " + + prepareFlushResult.flushOpSeqId + ". Picking up new file, but not dropping" + +" prepared memstore snapshot"); + replayFlushInStores(flush, prepareFlushResult, false); + + // snapshot is not dropped, so memstore sizes should not be decremented + // we still have the prepared snapshot, flushing should still be true + } else { + // This should not happen normally. However, lets be safe and guard against these cases + // we received a flush commit with a larger seqId than what we have prepared + // we will pick the flush file for this. We will also obtain the updates lock and + // look for contents of the memstore to see whether we have edits after this seqId. + // If not, we will drop all the memstore edits and the snapshot as well. + LOG.warn(getRegionInfo().getEncodedName() + " : " + + "Received a flush commit marker with larger seqId: " + + flush.getFlushSequenceNumber() + " than what we have prepared with seqId: " + + prepareFlushResult.flushOpSeqId + ". Picking up new file and dropping prepared" + +" memstore snapshot"); + + replayFlushInStores(flush, prepareFlushResult, true); + + // Set down the memstore size by amount of flush. + this.addAndGetGlobalMemstoreSize(-prepareFlushResult.totalFlushableSize); + + // Inspect the memstore contents to see whether the memstore contains only edits + // with seqId smaller than the flush seqId. If so, we can discard those edits. + dropMemstoreContentsForSeqId(flush.getFlushSequenceNumber(), null); + + this.prepareFlushResult = null; + writestate.flushing = false; + } + // If we were waiting for observing a flush or region opening event for not showing + // partial data after a secondary region crash, we can allow reads now. We can only make + // sure that we are not showing partial data (for example skipping some previous edits) + // until we observe a full flush start and flush commit. So if we were not able to find + // a previous flush we will not enable reads now. + this.setReadsEnabled(true); + } else { + LOG.warn(getRegionInfo().getEncodedName() + " : " + + "Received a flush commit marker with seqId:" + flush.getFlushSequenceNumber() + + ", but no previous prepared snapshot was found"); + // There is no corresponding prepare snapshot from before. + // We will pick up the new flushed file + replayFlushInStores(flush, null, false); + + // Inspect the memstore contents to see whether the memstore contains only edits + // with seqId smaller than the flush seqId. If so, we can discard those edits. + dropMemstoreContentsForSeqId(flush.getFlushSequenceNumber(), null); + } + + status.markComplete("Flush commit successful"); + + // Update the last flushed sequence id for region. + this.maxFlushedSeqId = flush.getFlushSequenceNumber(); + + // advance the mvcc read point so that the new flushed file is visible. + // there may be some in-flight transactions, but they won't be made visible since they are + // either greater than flush seq number or they were already dropped via flush. + // TODO: If we are using FlushAllStoresPolicy, then this can make edits visible from other + // stores while they are still in flight because the flush commit marker will not contain + // flushes from ALL stores. + getMVCC().advanceMemstoreReadPointIfNeeded(flush.getFlushSequenceNumber()); + + // C. Finally notify anyone waiting on memstore to clear: + // e.g. checkResources(). + synchronized (this) { + notifyAll(); // FindBugs NN_NAKED_NOTIFY + } + } finally { + status.cleanup(); + writestate.notifyAll(); + } + } + } + + /** + * Replays the given flush descriptor by opening the flush files in stores and dropping the + * memstore snapshots if requested. + * @param flush + * @param prepareFlushResult + * @param dropMemstoreSnapshot + * @throws IOException + */ + private void replayFlushInStores(FlushDescriptor flush, PrepareFlushResult prepareFlushResult, + boolean dropMemstoreSnapshot) + throws IOException { + for (StoreFlushDescriptor storeFlush : flush.getStoreFlushesList()) { + byte[] family = storeFlush.getFamilyName().toByteArray(); + Store store = getStore(family); + if (store == null) { + LOG.warn(getRegionInfo().getEncodedName() + " : " + + "Received a flush commit marker from primary, but the family is not found." + + "Ignoring StoreFlushDescriptor:" + storeFlush); + continue; + } + List flushFiles = storeFlush.getFlushOutputList(); + StoreFlushContext ctx = null; + long startTime = EnvironmentEdgeManager.currentTime(); + if (prepareFlushResult == null || prepareFlushResult.storeFlushCtxs == null) { + ctx = store.createFlushContext(flush.getFlushSequenceNumber()); + } else { + ctx = prepareFlushResult.storeFlushCtxs.get(family); + startTime = prepareFlushResult.startTime; + } + + if (ctx == null) { + LOG.warn(getRegionInfo().getEncodedName() + " : " + + "Unexpected: flush commit marker received from store " + + Bytes.toString(family) + " but no associated flush context. Ignoring"); + continue; + } + ctx.replayFlush(flushFiles, dropMemstoreSnapshot); // replay the flush + + // Record latest flush time + this.lastStoreFlushTimeMap.put(store, startTime); + } + } + + /** + * Drops the memstore contents after replaying a flush descriptor or region open event replay + * if the memstore edits have seqNums smaller than the given seq id + * @param flush the flush descriptor + * @throws IOException + */ + private long dropMemstoreContentsForSeqId(long seqId, Store store) throws IOException { + long totalFreedSize = 0; + this.updatesLock.writeLock().lock(); + try { + mvcc.waitForPreviousTransactionsComplete(); + long currentSeqId = getSequenceId().get(); + if (seqId >= currentSeqId) { + // then we can drop the memstore contents since everything is below this seqId + LOG.info(getRegionInfo().getEncodedName() + " : " + + "Dropping memstore contents as well since replayed flush seqId: " + + seqId + " is greater than current seqId:" + currentSeqId); + + // Prepare flush (take a snapshot) and then abort (drop the snapshot) + if (store == null ) { + for (Store s : stores.values()) { + totalFreedSize += doDropStoreMemstoreContentsForSeqId(s, currentSeqId); + } + } else { + totalFreedSize += doDropStoreMemstoreContentsForSeqId(store, currentSeqId); + } + } else { + LOG.info(getRegionInfo().getEncodedName() + " : " + + "Not dropping memstore contents since replayed flush seqId: " + + seqId + " is smaller than current seqId:" + currentSeqId); + } + } finally { + this.updatesLock.writeLock().unlock(); + } + return totalFreedSize; + } + + private long doDropStoreMemstoreContentsForSeqId(Store s, long currentSeqId) throws IOException { + long snapshotSize = s.getFlushableSize(); + this.addAndGetGlobalMemstoreSize(-snapshotSize); + StoreFlushContext ctx = s.createFlushContext(currentSeqId); + ctx.prepare(); + ctx.abort(); + return snapshotSize; + } + + private void replayWALFlushAbortMarker(FlushDescriptor flush) { + // nothing to do for now. A flush abort will cause a RS abort which means that the region + // will be opened somewhere else later. We will see the region open event soon, and replaying + // that will drop the snapshot + } + + private void replayWALFlushCannotFlushMarker(FlushDescriptor flush, long replaySeqId) { + synchronized (writestate) { + if (this.lastReplayedOpenRegionSeqId > replaySeqId) { + LOG.warn(getRegionInfo().getEncodedName() + " : " + + "Skipping replaying flush event :" + TextFormat.shortDebugString(flush) + + " because its sequence id " + replaySeqId + " is smaller than this regions " + + "lastReplayedOpenRegionSeqId of " + lastReplayedOpenRegionSeqId); + return; + } + + // If we were waiting for observing a flush or region opening event for not showing partial + // data after a secondary region crash, we can allow reads now. This event means that the + // primary was not able to flush because memstore is empty when we requested flush. By the + // time we observe this, we are guaranteed to have up to date seqId with our previous + // assignment. + this.setReadsEnabled(true); + } + } + + @VisibleForTesting + PrepareFlushResult getPrepareFlushResult() { + return prepareFlushResult; + } + + void replayWALRegionEventMarker(RegionEventDescriptor regionEvent) throws IOException { + checkTargetRegion(regionEvent.getEncodedRegionName().toByteArray(), + "RegionEvent marker from WAL ", regionEvent); + + startRegionOperation(Operation.REPLAY_EVENT); + try { + if (ServerRegionReplicaUtil.isDefaultReplica(this.getRegionInfo())) { + return; // if primary nothing to do + } + + if (regionEvent.getEventType() == EventType.REGION_CLOSE) { + // nothing to do on REGION_CLOSE for now. + return; + } + if (regionEvent.getEventType() != EventType.REGION_OPEN) { + LOG.warn(getRegionInfo().getEncodedName() + " : " + + "Unknown region event received, ignoring :" + + TextFormat.shortDebugString(regionEvent)); + return; + } + + if (LOG.isDebugEnabled()) { + LOG.debug(getRegionInfo().getEncodedName() + " : " + + "Replaying region open event marker " + TextFormat.shortDebugString(regionEvent)); + } + + // we will use writestate as a coarse-grain lock for all the replay events + synchronized (writestate) { + // Replication can deliver events out of order when primary region moves or the region + // server crashes, since there is no coordination between replication of different wal files + // belonging to different region servers. We have to safe guard against this case by using + // region open event's seqid. Since this is the first event that the region puts (after + // possibly flushing recovered.edits), after seeing this event, we can ignore every edit + // smaller than this seqId + if (this.lastReplayedOpenRegionSeqId <= regionEvent.getLogSequenceNumber()) { + this.lastReplayedOpenRegionSeqId = regionEvent.getLogSequenceNumber(); + } else { + LOG.warn(getRegionInfo().getEncodedName() + " : " + + "Skipping replaying region event :" + TextFormat.shortDebugString(regionEvent) + + " because its sequence id is smaller than this regions lastReplayedOpenRegionSeqId " + + " of " + lastReplayedOpenRegionSeqId); + return; + } + + // region open lists all the files that the region has at the time of the opening. Just pick + // all the files and drop prepared flushes and empty memstores + for (StoreDescriptor storeDescriptor : regionEvent.getStoresList()) { + // stores of primary may be different now + byte[] family = storeDescriptor.getFamilyName().toByteArray(); + Store store = getStore(family); + if (store == null) { + LOG.warn(getRegionInfo().getEncodedName() + " : " + + "Received a region open marker from primary, but the family is not found. " + + "Ignoring. StoreDescriptor:" + storeDescriptor); + continue; + } + + long storeSeqId = store.getMaxSequenceId(); + List storeFiles = storeDescriptor.getStoreFileList(); + store.refreshStoreFiles(storeFiles); // replace the files with the new ones + if (store.getMaxSequenceId() != storeSeqId) { + // Record latest flush time if we picked up new files + lastStoreFlushTimeMap.put(store, EnvironmentEdgeManager.currentTime()); + } + + if (writestate.flushing) { + // only drop memstore snapshots if they are smaller than last flush for the store + if (this.prepareFlushResult.flushOpSeqId <= regionEvent.getLogSequenceNumber()) { + StoreFlushContext ctx = this.prepareFlushResult.storeFlushCtxs == null ? + null : this.prepareFlushResult.storeFlushCtxs.get(family); + if (ctx != null) { + long snapshotSize = store.getFlushableSize(); + ctx.abort(); + this.addAndGetGlobalMemstoreSize(-snapshotSize); + this.prepareFlushResult.storeFlushCtxs.remove(family); + } + } + } + + // Drop the memstore contents if they are now smaller than the latest seen flushed file + dropMemstoreContentsForSeqId(regionEvent.getLogSequenceNumber(), store); + if (storeSeqId > this.maxFlushedSeqId) { + this.maxFlushedSeqId = storeSeqId; + } + } + + // if all stores ended up dropping their snapshots, we can safely drop the + // prepareFlushResult + dropPrepareFlushIfPossible(); + + // advance the mvcc read point so that the new flushed file is visible. + // there may be some in-flight transactions, but they won't be made visible since they are + // either greater than flush seq number or they were already dropped via flush. + getMVCC().advanceMemstoreReadPointIfNeeded(this.maxFlushedSeqId); + + // If we were waiting for observing a flush or region opening event for not showing partial + // data after a secondary region crash, we can allow reads now. + this.setReadsEnabled(true); + + // C. Finally notify anyone waiting on memstore to clear: + // e.g. checkResources(). + synchronized (this) { + notifyAll(); // FindBugs NN_NAKED_NOTIFY + } + } + logRegionFiles(); + } finally { + closeRegionOperation(Operation.REPLAY_EVENT); + } + } + + void replayWALBulkLoadEventMarker(WALProtos.BulkLoadDescriptor bulkLoadEvent) throws IOException { + checkTargetRegion(bulkLoadEvent.getEncodedRegionName().toByteArray(), + "BulkLoad marker from WAL ", bulkLoadEvent); + + if (ServerRegionReplicaUtil.isDefaultReplica(this.getRegionInfo())) { + return; // if primary nothing to do + } + + if (LOG.isDebugEnabled()) { + LOG.debug(getRegionInfo().getEncodedName() + " : " + + "Replaying bulkload event marker " + TextFormat.shortDebugString(bulkLoadEvent)); + } + // check if multiple families involved + boolean multipleFamilies = false; + byte[] family = null; + for (StoreDescriptor storeDescriptor : bulkLoadEvent.getStoresList()) { + byte[] fam = storeDescriptor.getFamilyName().toByteArray(); + if (family == null) { + family = fam; + } else if (!Bytes.equals(family, fam)) { + multipleFamilies = true; + break; + } + } + + startBulkRegionOperation(multipleFamilies); + try { + // we will use writestate as a coarse-grain lock for all the replay events + synchronized (writestate) { + // Replication can deliver events out of order when primary region moves or the region + // server crashes, since there is no coordination between replication of different wal files + // belonging to different region servers. We have to safe guard against this case by using + // region open event's seqid. Since this is the first event that the region puts (after + // possibly flushing recovered.edits), after seeing this event, we can ignore every edit + // smaller than this seqId + if (bulkLoadEvent.getBulkloadSeqNum() >= 0 + && this.lastReplayedOpenRegionSeqId >= bulkLoadEvent.getBulkloadSeqNum()) { + LOG.warn(getRegionInfo().getEncodedName() + " : " + + "Skipping replaying bulkload event :" + + TextFormat.shortDebugString(bulkLoadEvent) + + " because its sequence id is smaller than this region's lastReplayedOpenRegionSeqId" + + " =" + lastReplayedOpenRegionSeqId); + + return; + } + + for (StoreDescriptor storeDescriptor : bulkLoadEvent.getStoresList()) { + // stores of primary may be different now + family = storeDescriptor.getFamilyName().toByteArray(); + Store store = getStore(family); + if (store == null) { + LOG.warn(getRegionInfo().getEncodedName() + " : " + + "Received a bulk load marker from primary, but the family is not found. " + + "Ignoring. StoreDescriptor:" + storeDescriptor); + continue; + } + + List storeFiles = storeDescriptor.getStoreFileList(); + for (String storeFile : storeFiles) { + StoreFileInfo storeFileInfo = null; + try { + storeFileInfo = fs.getStoreFileInfo(Bytes.toString(family), storeFile); + store.bulkLoadHFile(storeFileInfo); + } catch(FileNotFoundException ex) { + LOG.warn(getRegionInfo().getEncodedName() + " : " + + ((storeFileInfo != null) ? storeFileInfo.toString() : + (new Path(Bytes.toString(family), storeFile)).toString()) + + " doesn't exist any more. Skip loading the file"); + } + } + } + } + if (bulkLoadEvent.getBulkloadSeqNum() > 0) { + getMVCC().advanceMemstoreReadPointIfNeeded(bulkLoadEvent.getBulkloadSeqNum()); + } + } finally { + closeBulkRegionOperation(); + } + } + + /** + * If all stores ended up dropping their snapshots, we can safely drop the prepareFlushResult + */ + private void dropPrepareFlushIfPossible() { + if (writestate.flushing) { + boolean canDrop = true; + if (prepareFlushResult.storeFlushCtxs != null) { + for (Entry entry + : prepareFlushResult.storeFlushCtxs.entrySet()) { + Store store = getStore(entry.getKey()); + if (store == null) { + continue; + } + if (store.getSnapshotSize() > 0) { + canDrop = false; + break; + } + } + } + + // this means that all the stores in the region has finished flushing, but the WAL marker + // may not have been written or we did not receive it yet. + if (canDrop) { + writestate.flushing = false; + this.prepareFlushResult = null; + } + } + } + + @Override + public boolean refreshStoreFiles() throws IOException { + if (ServerRegionReplicaUtil.isDefaultReplica(this.getRegionInfo())) { + return false; // if primary nothing to do + } + + if (LOG.isDebugEnabled()) { + LOG.debug(getRegionInfo().getEncodedName() + " : " + + "Refreshing store files to see whether we can free up memstore"); + } + + long totalFreedSize = 0; + + long smallestSeqIdInStores = Long.MAX_VALUE; + + startRegionOperation(); // obtain region close lock + try { + synchronized (writestate) { + for (Store store : getStores()) { + // TODO: some stores might see new data from flush, while others do not which + // MIGHT break atomic edits across column families. + long maxSeqIdBefore = store.getMaxSequenceId(); + + // refresh the store files. This is similar to observing a region open wal marker. + store.refreshStoreFiles(); + + long storeSeqId = store.getMaxSequenceId(); + if (storeSeqId < smallestSeqIdInStores) { + smallestSeqIdInStores = storeSeqId; + } + + // see whether we can drop the memstore or the snapshot + if (storeSeqId > maxSeqIdBefore) { + + if (writestate.flushing) { + // only drop memstore snapshots if they are smaller than last flush for the store + if (this.prepareFlushResult.flushOpSeqId <= storeSeqId) { + StoreFlushContext ctx = this.prepareFlushResult.storeFlushCtxs == null ? + null : this.prepareFlushResult.storeFlushCtxs.get(store.getFamily().getName()); + if (ctx != null) { + long snapshotSize = store.getFlushableSize(); + ctx.abort(); + this.addAndGetGlobalMemstoreSize(-snapshotSize); + this.prepareFlushResult.storeFlushCtxs.remove(store.getFamily().getName()); + totalFreedSize += snapshotSize; + } + } + } + + // Drop the memstore contents if they are now smaller than the latest seen flushed file + totalFreedSize += dropMemstoreContentsForSeqId(storeSeqId, store); + } + } + + // if all stores ended up dropping their snapshots, we can safely drop the + // prepareFlushResult + dropPrepareFlushIfPossible(); + + // advance the mvcc read point so that the new flushed files are visible. + // there may be some in-flight transactions, but they won't be made visible since they are + // either greater than flush seq number or they were already picked up via flush. + for (Store s : getStores()) { + getMVCC().advanceMemstoreReadPointIfNeeded(s.getMaxMemstoreTS()); + } + + // smallestSeqIdInStores is the seqId that we have a corresponding hfile for. We can safely + // skip all edits that are to be replayed in the future with that has a smaller seqId + // than this. We are updating lastReplayedOpenRegionSeqId so that we can skip all edits + // that we have picked the flush files for + if (this.lastReplayedOpenRegionSeqId < smallestSeqIdInStores) { + this.lastReplayedOpenRegionSeqId = smallestSeqIdInStores; + } + } + // C. Finally notify anyone waiting on memstore to clear: + // e.g. checkResources(). + synchronized (this) { + notifyAll(); // FindBugs NN_NAKED_NOTIFY + } + return totalFreedSize > 0; + } finally { + closeRegionOperation(); + } + } + + private void logRegionFiles() { + if (LOG.isTraceEnabled()) { + LOG.trace(getRegionInfo().getEncodedName() + " : Store files for region: "); + for (Store s : stores.values()) { + for (StoreFile sf : s.getStorefiles()) { + LOG.trace(getRegionInfo().getEncodedName() + " : " + sf); + } + } + } + } + + /** Checks whether the given regionName is either equal to our region, or that + * the regionName is the primary region to our corresponding range for the secondary replica. + */ + private void checkTargetRegion(byte[] encodedRegionName, String exceptionMsg, Object payload) + throws WrongRegionException { + if (Bytes.equals(this.getRegionInfo().getEncodedNameAsBytes(), encodedRegionName)) { return; } - store.completeCompactionMarker(compaction); + + if (!RegionReplicaUtil.isDefaultReplica(this.getRegionInfo()) && + Bytes.equals(encodedRegionName, + this.fs.getRegionInfoForFS().getEncodedNameAsBytes())) { + return; + } + + throw new WrongRegionException(exceptionMsg + payload + + " targetted for region " + Bytes.toStringBinary(encodedRegionName) + + " does not match this region: " + this.getRegionInfo()); } /** @@ -3926,7 +4835,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // protected boolean restoreEdit(final Store s, final Cell cell) { long kvSize = s.add(cell).getFirst(); if (this.rsAccounting != null) { - rsAccounting.addAndGetRegionReplayEditsSize(this.getRegionName(), kvSize); + rsAccounting.addAndGetRegionReplayEditsSize(getRegionInfo().getRegionName(), kvSize); } return isFlushSize(this.addAndGetGlobalMemstoreSize(kvSize)); } @@ -3959,13 +4868,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // return new HStore(this, family, this.conf); } - /** - * Return HStore instance. - * Use with caution. Exposed for use of fixup utilities. - * @param column Name of column family hosted by this region. - * @return Store that goes with the family on passed column. - * TODO: Make this lookup faster. - */ + @Override public Store getStore(final byte[] column) { return this.stores.get(column); } @@ -3986,17 +4889,14 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // return null; } - public Map getStores() { - return this.stores; + @Override + public List getStores() { + List list = new ArrayList(stores.size()); + list.addAll(stores.values()); + return list; } - /** - * Return list of storeFiles for the set of CFs. - * Uses closeLock to prevent the race condition where a region closes - * in between the for loop - closing the stores one by one, some stores - * will return 0 files. - * @return List of storeFiles. - */ + @Override public List getStoreFileList(final byte [][] columns) throws IllegalArgumentException { List storeFileNames = new ArrayList(); @@ -4010,6 +4910,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // for (StoreFile storeFile: store.getStorefiles()) { storeFileNames.add(storeFile.getPath().toString()); } + + logRegionFiles(); } } return storeFileNames; @@ -4024,21 +4926,13 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // if (!rowIsInRange(getRegionInfo(), row)) { throw new WrongRegionException("Requested row out of range for " + op + " on HRegion " + this + ", startKey='" + - Bytes.toStringBinary(getStartKey()) + "', getEndKey()='" + - Bytes.toStringBinary(getEndKey()) + "', row='" + + Bytes.toStringBinary(getRegionInfo().getStartKey()) + "', getEndKey()='" + + Bytes.toStringBinary(getRegionInfo().getEndKey()) + "', row='" + Bytes.toStringBinary(row) + "'"); } } - /** - * Tries to acquire a lock on the given row. - * @param waitForLock if true, will block until the lock is available. - * Otherwise, just tries to obtain the lock and returns - * false if unavailable. - * @return the row lock if acquired, - * null if waitForLock was false and the lock was not acquired - * @throws IOException if waitForLock was true and the lock could not be acquired after waiting - */ + @Override public RowLock getRowLock(byte[] row, boolean waitForLock) throws IOException { startRegionOperation(); try { @@ -4071,16 +4965,27 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // if (!waitForLock) { return null; } + TraceScope traceScope = null; try { + if (Trace.isTracing()) { + traceScope = Trace.startSpan("HRegion.getRowLockInternal"); + } // Row is already locked by some other thread, give up or wait for it if (!existingContext.latch.await(this.rowLockWaitDuration, TimeUnit.MILLISECONDS)) { + if(traceScope != null) { + traceScope.getSpan().addTimelineAnnotation("Failed to get row lock"); + } throw new IOException("Timed out waiting for lock for row: " + rowKey); } + if (traceScope != null) traceScope.close(); + traceScope = null; } catch (InterruptedException ie) { LOG.warn("Thread interrupted waiting for lock on row: " + rowKey); InterruptedIOException iie = new InterruptedIOException(); iie.initCause(ie); throw iie; + } finally { + if (traceScope != null) traceScope.close(); } } } @@ -4099,9 +5004,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // return getRowLock(row, true); } - /** - * If the given list of row locks is not null, releases all locks. - */ + @Override public void releaseRowLocks(List rowLocks) { if (rowLocks != null) { for (RowLock rowLock : rowLocks) { @@ -4117,8 +5020,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // * * @param familyPaths List of Pair */ - private static boolean hasMultipleColumnFamilies( - List> familyPaths) { + private static boolean hasMultipleColumnFamilies(Collection> familyPaths) { boolean multipleFamilies = false; byte[] family = null; for (Pair pair : familyPaths) { @@ -4133,36 +5035,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // return multipleFamilies; } - /** - * Bulk load a/many HFiles into this region - * - * @param familyPaths A list which maps column families to the location of the HFile to load - * into that column family region. - * @param assignSeqId Force a flush, get it's sequenceId to preserve the guarantee that all the - * edits lower than the highest sequential ID from all the HFiles are flushed - * on disk. - * @return true if successful, false if failed recoverably - * @throws IOException if failed unrecoverably. - */ - public boolean bulkLoadHFiles(List> familyPaths, - boolean assignSeqId) throws IOException { - return bulkLoadHFiles(familyPaths, assignSeqId, null); - } - - /** - * Attempts to atomically load a group of hfiles. This is critical for loading - * rows with multiple column families atomically. - * - * @param familyPaths List of Pair - * @param bulkLoadListener Internal hooks enabling massaging/preparation of a - * file about to be bulk loaded - * @param assignSeqId Force a flush, get it's sequenceId to preserve the guarantee that - * all the edits lower than the highest sequential ID from all the - * HFiles are flushed on disk. - * @return true if successful, false if failed recoverably - * @throws IOException if failed unrecoverably. - */ - public boolean bulkLoadHFiles(List> familyPaths, boolean assignSeqId, + @Override + public boolean bulkLoadHFiles(Collection> familyPaths, boolean assignSeqId, BulkLoadListener bulkLoadListener) throws IOException { long seqId = -1; Map> storeFiles = new TreeMap>(Bytes.BYTES_COMPARATOR); @@ -4225,14 +5099,14 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // // guaranteed to be one beyond the file made when we flushed (or if nothing to flush, it is // a sequence id that we can be sure is beyond the last hfile written). if (assignSeqId) { - FlushResult fs = this.flushcache(true); + FlushResult fs = flushcache(true, false); if (fs.isFlushSucceeded()) { - seqId = fs.flushSequenceId; - } else if (fs.result == FlushResult.Result.CANNOT_FLUSH_MEMSTORE_EMPTY) { - seqId = fs.flushSequenceId; + seqId = ((FlushResultImpl)fs).flushSequenceId; + } else if (fs.getResult() == FlushResult.Result.CANNOT_FLUSH_MEMSTORE_EMPTY) { + seqId = ((FlushResultImpl)fs).flushSequenceId; } else { - throw new IOException("Could not bulk load with an assigned sequential ID because the " + - "flush didn't run. Reason for not flushing: " + fs.failureReason); + throw new IOException("Could not bulk load with an assigned sequential ID because the "+ + "flush didn't run. Reason for not flushing: " + ((FlushResultImpl)fs).failureReason); } } @@ -4245,13 +5119,13 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // if (bulkLoadListener != null) { finalPath = bulkLoadListener.prepareBulkLoad(familyName, path); } - store.bulkLoadHFile(finalPath, seqId); - + Path commitedStoreFile = store.bulkLoadHFile(finalPath, seqId); + if(storeFiles.containsKey(familyName)) { - storeFiles.get(familyName).add(new Path(finalPath)); + storeFiles.get(familyName).add(commitedStoreFile); } else { List storeFileNames = new ArrayList(); - storeFileNames.add(new Path(finalPath)); + storeFileNames.add(commitedStoreFile); storeFiles.put(familyName, storeFileNames); } if (bulkLoadListener != null) { @@ -4294,25 +5168,25 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // } } } - + closeBulkRegionOperation(); } } @Override public boolean equals(Object o) { - return o instanceof HRegion && Bytes.equals(this.getRegionName(), - ((HRegion) o).getRegionName()); + return o instanceof HRegion && Bytes.equals(getRegionInfo().getRegionName(), + ((HRegion) o).getRegionInfo().getRegionName()); } @Override public int hashCode() { - return Bytes.hashCode(this.getRegionName()); + return Bytes.hashCode(getRegionInfo().getRegionName()); } @Override public String toString() { - return this.getRegionNameAsString(); + return getRegionInfo().getRegionNameAsString(); } /** @@ -4328,11 +5202,9 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // * If the joined heap data gathering is interrupted due to scan limits, this will * contain the row for which we are populating the values.*/ protected Cell joinedContinuationRow = null; - // KeyValue indicating that limit is reached when scanning - private final KeyValue KV_LIMIT = new KeyValue(); protected final byte[] stopRow; private final FilterWrapper filter; - private int batch; + private ScannerContext defaultScannerContext; protected int isScan; private boolean filterClosed = false; private long readPt; @@ -4355,7 +5227,13 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // this.filter = null; } - this.batch = scan.getBatch(); + /** + * By default, calls to next/nextRaw must enforce the batch limit. Thus, construct a default + * scanner context that can be used to enforce the batch limit in the event that a + * ScannerContext is not specified during an invocation of next/nextRaw + */ + defaultScannerContext = ScannerContext.newBuilder().setBatchLimit(scan.getBatch()).build(); + if (Bytes.equals(scan.getStopRow(), HConstants.EMPTY_END_ROW) && !scan.isGetScan()) { this.stopRow = null; } else { @@ -4414,6 +5292,11 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // return this.readPt; } + @Override + public int getBatch() { + return this.defaultScannerContext.getBatchLimit(); + } + /** * Reset both the filter and the old filter. * @@ -4429,11 +5312,12 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // public boolean next(List outResults) throws IOException { // apply the batching limit by default - return next(outResults, batch); + return next(outResults, defaultScannerContext); } @Override - public synchronized boolean next(List outResults, int limit) throws IOException { + public synchronized boolean next(List outResults, ScannerContext scannerContext) + throws IOException { if (this.filterClosed) { throw new UnknownScannerException("Scanner was closed (timed out?) " + "after we renewed it. Could be caused by a very slow scanner " + @@ -4442,77 +5326,122 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // startRegionOperation(Operation.SCAN); readRequestsCount.increment(); try { - return nextRaw(outResults, limit); + return nextRaw(outResults, scannerContext); } finally { closeRegionOperation(Operation.SCAN); } } @Override - public boolean nextRaw(List outResults) - throws IOException { - return nextRaw(outResults, batch); + public boolean nextRaw(List outResults) throws IOException { + // Use the RegionScanner's context by default + return nextRaw(outResults, defaultScannerContext); } @Override - public boolean nextRaw(List outResults, int limit) throws IOException { + public boolean nextRaw(List outResults, ScannerContext scannerContext) + throws IOException { if (storeHeap == null) { // scanner is closed throw new UnknownScannerException("Scanner was closed"); } - boolean returnResult; + boolean moreValues; if (outResults.isEmpty()) { // Usually outResults is empty. This is true when next is called // to handle scan or get operation. - returnResult = nextInternal(outResults, limit); + moreValues = nextInternal(outResults, scannerContext); } else { List tmpList = new ArrayList(); - returnResult = nextInternal(tmpList, limit); + moreValues = nextInternal(tmpList, scannerContext); outResults.addAll(tmpList); } - resetFilters(); + + // If the size limit was reached it means a partial Result is being returned. Returning a + // partial Result means that we should not reset the filters; filters should only be reset in + // between rows + if (!scannerContext.partialResultFormed()) resetFilters(); + if (isFilterDoneInternal()) { - returnResult = false; + moreValues = false; } - return returnResult; + return moreValues; } - private void populateFromJoinedHeap(List results, int limit) - throws IOException { + /** + * @return true if more cells exist after this batch, false if scanner is done + */ + private boolean populateFromJoinedHeap(List results, ScannerContext scannerContext) + throws IOException { assert joinedContinuationRow != null; - Cell kv = populateResult(results, this.joinedHeap, limit, + boolean moreValues = + populateResult(results, this.joinedHeap, scannerContext, joinedContinuationRow.getRowArray(), joinedContinuationRow.getRowOffset(), joinedContinuationRow.getRowLength()); - if (kv != KV_LIMIT) { + + if (!scannerContext.checkAnyLimitReached(LimitScope.BETWEEN_CELLS)) { // We are done with this row, reset the continuation. joinedContinuationRow = null; } // As the data is obtained from two independent heaps, we need to // ensure that result list is sorted, because Result relies on that. Collections.sort(results, comparator); + return moreValues; } /** - * Fetches records with currentRow into results list, until next row or limit (if not -1). + * Fetches records with currentRow into results list, until next row, batchLimit (if not -1) is + * reached, or remainingResultSize (if not -1) is reaced * @param heap KeyValueHeap to fetch data from.It must be positioned on correct row before call. - * @param limit Max amount of KVs to place in result list, -1 means no limit. + * @param scannerContext * @param currentRow Byte array with key we are fetching. * @param offset offset for currentRow * @param length length for currentRow - * @return KV_LIMIT if limit reached, next KeyValue otherwise. + * @return state of last call to {@link KeyValueHeap#next()} */ - private Cell populateResult(List results, KeyValueHeap heap, int limit, - byte[] currentRow, int offset, short length) throws IOException { + private boolean populateResult(List results, KeyValueHeap heap, + ScannerContext scannerContext, byte[] currentRow, int offset, short length) + throws IOException { Cell nextKv; + boolean moreCellsInRow = false; + boolean tmpKeepProgress = scannerContext.getKeepProgress(); + // Scanning between column families and thus the scope is between cells + LimitScope limitScope = LimitScope.BETWEEN_CELLS; do { - heap.next(results, limit - results.size()); - if (limit > 0 && results.size() == limit) { - return KV_LIMIT; - } - nextKv = heap.peek(); - } while (nextKv != null && CellUtil.matchingRow(nextKv, currentRow, offset, length)); + // We want to maintain any progress that is made towards the limits while scanning across + // different column families. To do this, we toggle the keep progress flag on during calls + // to the StoreScanner to ensure that any progress made thus far is not wiped away. + scannerContext.setKeepProgress(true); + heap.next(results, scannerContext); + scannerContext.setKeepProgress(tmpKeepProgress); - return nextKv; + nextKv = heap.peek(); + moreCellsInRow = moreCellsInRow(nextKv, currentRow, offset, length); + + if (scannerContext.checkBatchLimit(limitScope)) { + return scannerContext.setScannerState(NextState.BATCH_LIMIT_REACHED).hasMoreValues(); + } else if (scannerContext.checkSizeLimit(limitScope)) { + ScannerContext.NextState state = + moreCellsInRow ? NextState.SIZE_LIMIT_REACHED_MID_ROW : NextState.SIZE_LIMIT_REACHED; + return scannerContext.setScannerState(state).hasMoreValues(); + } + } while (moreCellsInRow); + + return nextKv != null; + } + + /** + * Based on the nextKv in the heap, and the current row, decide whether or not there are more + * cells to be read in the heap. If the row of the nextKv in the heap matches the current row + * then there are more cells to be read in the row. + * @param nextKv + * @param currentRow + * @param offset + * @param length + * @return true When there are more cells in the row to be read + */ + private boolean moreCellsInRow(final Cell nextKv, byte[] currentRow, int offset, + short length) { + return nextKv != null && CellUtil.matchingRow(nextKv, currentRow, offset, length); } /* @@ -4527,18 +5456,37 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // return this.filter != null && this.filter.filterAllRemaining(); } - private boolean nextInternal(List results, int limit) - throws IOException { + private boolean nextInternal(List results, ScannerContext scannerContext) + throws IOException { if (!results.isEmpty()) { throw new IllegalArgumentException("First parameter should be an empty list"); } + if (scannerContext == null) { + throw new IllegalArgumentException("Scanner context cannot be null"); + } RpcCallContext rpcCall = RpcServer.getCurrentCall(); + + // Save the initial progress from the Scanner context in these local variables. The progress + // may need to be reset a few times if rows are being filtered out so we save the initial + // progress. + int initialBatchProgress = scannerContext.getBatchProgress(); + long initialSizeProgress = scannerContext.getSizeProgress(); + // The loop here is used only when at some point during the next we determine // that due to effects of filters or otherwise, we have an empty row in the result. // Then we loop and try again. Otherwise, we must get out on the first iteration via return, // "true" if there's more data to read, "false" if there isn't (storeHeap is at a stop row, // and joinedHeap has no more data to read for the last row (if set, joinedContinuationRow). while (true) { + // Starting to scan a new row. Reset the scanner progress according to whether or not + // progress should be kept. + if (scannerContext.getKeepProgress()) { + // Progress should be kept. Reset to initial values seen at start of method invocation. + scannerContext.setProgress(initialBatchProgress, initialSizeProgress); + } else { + scannerContext.clearProgress(); + } + if (rpcCall != null) { // If a user specifies a too-restrictive or too-slow scanner, the // client might time out and disconnect while the server side @@ -4547,7 +5495,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // long afterTime = rpcCall.disconnectSince(); if (afterTime >= 0) { throw new CallerDisconnectedException( - "Aborting on region " + getRegionNameAsString() + ", call " + + "Aborting on region " + getRegionInfo().getRegionNameAsString() + ", call " + this + " after " + afterTime + " ms, since " + "caller disconnected"); } @@ -4564,38 +5512,60 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // offset = current.getRowOffset(); length = current.getRowLength(); } + boolean stopRow = isStopRow(currentRow, offset, length); + // When has filter row is true it means that the all the cells for a particular row must be + // read before a filtering decision can be made. This means that filters where hasFilterRow + // run the risk of encountering out of memory errors in the case that they are applied to a + // table that has very large rows. + boolean hasFilterRow = this.filter != null && this.filter.hasFilterRow(); + + // If filter#hasFilterRow is true, partial results are not allowed since allowing them + // would prevent the filters from being evaluated. Thus, if it is true, change the + // scope of any limits that could potentially create partial results to + // LimitScope.BETWEEN_ROWS so that those limits are not reached mid-row + if (hasFilterRow) { + if (LOG.isTraceEnabled()) { + LOG.trace("filter#hasFilterRow is true which prevents partial results from being " + + " formed. Changing scope of limits that may create partials"); + } + scannerContext.setSizeLimitScope(LimitScope.BETWEEN_ROWS); + } + // Check if we were getting data from the joinedHeap and hit the limit. // If not, then it's main path - getting results from storeHeap. if (joinedContinuationRow == null) { // First, check if we are at a stop row. If so, there are no more results. if (stopRow) { - if (filter != null && filter.hasFilterRow()) { + if (hasFilterRow) { filter.filterRowCells(results); } - return false; + return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); } // Check if rowkey filter wants to exclude this row. If so, loop to next. // Technically, if we hit limits before on this row, we don't need this call. if (filterRowKey(currentRow, offset, length)) { boolean moreRows = nextRow(currentRow, offset, length); - if (!moreRows) return false; + if (!moreRows) { + return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); + } results.clear(); continue; } - Cell nextKv = populateResult(results, this.storeHeap, limit, currentRow, offset, - length); // Ok, we are good, let's try to get some results from the main heap. - if (nextKv == KV_LIMIT) { - if (this.filter != null && filter.hasFilterRow()) { - throw new IncompatibleFilterException( - "Filter whose hasFilterRow() returns true is incompatible with scan with limit!"); - } - return true; // We hit the limit. - } + populateResult(results, this.storeHeap, scannerContext, currentRow, offset, length); + if (scannerContext.checkAnyLimitReached(LimitScope.BETWEEN_CELLS)) { + if (hasFilterRow) { + throw new IncompatibleFilterException( + "Filter whose hasFilterRow() returns true is incompatible with scans that must " + + " stop mid-row because of a limit. ScannerContext:" + scannerContext); + } + return true; + } + Cell nextKv = this.storeHeap.peek(); stopRow = nextKv == null || isStopRow(nextKv.getRowArray(), nextKv.getRowOffset(), nextKv.getRowLength()); // save that the row was empty before filters applied to it. @@ -4604,19 +5574,33 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // // We have the part of the row necessary for filtering (all of it, usually). // First filter with the filterRow(List). FilterWrapper.FilterRowRetCode ret = FilterWrapper.FilterRowRetCode.NOT_CALLED; - if (filter != null && filter.hasFilterRow()) { + if (hasFilterRow) { ret = filter.filterRowCellsWithRet(results); + + // We don't know how the results have changed after being filtered. Must set progress + // according to contents of results now. + if (scannerContext.getKeepProgress()) { + scannerContext.setProgress(initialBatchProgress, initialSizeProgress); + } else { + scannerContext.clearProgress(); + } + scannerContext.incrementBatchProgress(results.size()); + for (Cell cell : results) { + scannerContext.incrementSizeProgress(CellUtil.estimatedHeapSizeOfWithoutTags(cell)); + } } if ((isEmptyRow || ret == FilterWrapper.FilterRowRetCode.EXCLUDE) || filterRow()) { results.clear(); boolean moreRows = nextRow(currentRow, offset, length); - if (!moreRows) return false; + if (!moreRows) { + return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); + } // This row was totally filtered out, if this is NOT the last row, // we should continue on. Otherwise, nothing else to do. if (!stopRow) continue; - return false; + return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); } // Ok, we are done with storeHeap for this row. @@ -4634,18 +5618,24 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // currentRow, offset, length)); if (mayHaveData) { joinedContinuationRow = current; - populateFromJoinedHeap(results, limit); + populateFromJoinedHeap(results, scannerContext); + + if (scannerContext.checkAnyLimitReached(LimitScope.BETWEEN_CELLS)) { + return true; + } } } } else { // Populating from the joined heap was stopped by limits, populate some more. - populateFromJoinedHeap(results, limit); + populateFromJoinedHeap(results, scannerContext); + if (scannerContext.checkAnyLimitReached(LimitScope.BETWEEN_CELLS)) { + return true; + } } - // We may have just called populateFromJoinedMap and hit the limits. If that is // the case, we need to call it again on the next next() invocation. if (joinedContinuationRow != null) { - return true; + return scannerContext.setScannerState(NextState.MORE_VALUES).hasMoreValues(); } // Finally, we are done with both joinedHeap and storeHeap. @@ -4653,12 +5643,18 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // // the case when SingleColumnValueExcludeFilter is used. if (results.isEmpty()) { boolean moreRows = nextRow(currentRow, offset, length); - if (!moreRows) return false; + if (!moreRows) { + return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); + } if (!stopRow) continue; } // We are done. Return the result. - return !stopRow; + if (stopRow) { + return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); + } else { + return scannerContext.setScannerState(NextState.MORE_VALUES).hasMoreValues(); + } } } @@ -5001,6 +5997,11 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // return r.openHRegion(reporter); } + public static Region openHRegion(final Region other, final CancelableProgressable reporter) + throws IOException { + return openHRegion((HRegion)other, reporter); + } + /** * Open HRegion. * Calls initialize and sets sequenceId. @@ -5018,12 +6019,45 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // checkClassLoading(); this.openSeqNum = initialize(reporter); this.setSequenceId(openSeqNum); - if (wal != null && getRegionServerServices() != null) { + if (wal != null && getRegionServerServices() != null && !writestate.readOnly + && !isRecovering) { + // Only write the region open event marker to WAL if (1) we are not read-only + // (2) dist log replay is off or we are not recovering. In case region is + // recovering, the open event will be written at setRecovering(false) writeRegionOpenMarker(wal, openSeqNum); } return this; } + public static void warmupHRegion(final HRegionInfo info, + final HTableDescriptor htd, final WAL wal, final Configuration conf, + final RegionServerServices rsServices, + final CancelableProgressable reporter) + throws IOException { + + if (info == null) throw new NullPointerException("Passed region info is null"); + + if (LOG.isDebugEnabled()) { + LOG.debug("HRegion.Warming up region: " + info); + } + + Path rootDir = FSUtils.getRootDir(conf); + Path tableDir = FSUtils.getTableDir(rootDir, info.getTable()); + + FileSystem fs = null; + if (rsServices != null) { + fs = rsServices.getFileSystem(); + } + if (fs == null) { + fs = FileSystem.get(conf); + } + + HRegion r = HRegion.newHRegion(tableDir, wal, fs, conf, info, htd, rsServices); + r.initializeWarmup(reporter); + r.close(); + } + + private void checkCompressionCodecs() throws IOException { for (HColumnDescriptor fam: this.htableDescriptor.getColumnFamilies()) { CompressionTest.testCompression(fam.getCompression()); @@ -5093,7 +6127,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // public static void addRegionToMETA(final HRegion meta, final HRegion r) throws IOException { meta.checkResources(); // The row key is the region name - byte[] row = r.getRegionName(); + byte[] row = r.getRegionInfo().getRegionName(); final long now = EnvironmentEdgeManager.currentTime(); final List cells = new ArrayList(2); cells.add(new KeyValue(row, HConstants.CATALOG_FAMILY, @@ -5160,18 +6194,20 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // // Make sure that srcA comes first; important for key-ordering during // write of the merged file. - if (srcA.getStartKey() == null) { - if (srcB.getStartKey() == null) { + if (srcA.getRegionInfo().getStartKey() == null) { + if (srcB.getRegionInfo().getStartKey() == null) { throw new IOException("Cannot merge two regions with null start key"); } // A's start key is null but B's isn't. Assume A comes before B - } else if ((srcB.getStartKey() == null) || - (Bytes.compareTo(srcA.getStartKey(), srcB.getStartKey()) > 0)) { + } else if ((srcB.getRegionInfo().getStartKey() == null) || + (Bytes.compareTo(srcA.getRegionInfo().getStartKey(), + srcB.getRegionInfo().getStartKey()) > 0)) { a = srcB; b = srcA; } - if (!(Bytes.compareTo(a.getEndKey(), b.getStartKey()) == 0)) { + if (!(Bytes.compareTo(a.getRegionInfo().getEndKey(), + b.getRegionInfo().getStartKey()) == 0)) { throw new IOException("Cannot merge non-adjacent regions"); } return merge(a, b); @@ -5192,22 +6228,22 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // FileSystem fs = a.getRegionFileSystem().getFileSystem(); // Make sure each region's cache is empty - a.flushcache(true); - b.flushcache(true); + a.flush(true); + b.flush(true); // Compact each region so we only have one store file per family - a.compactStores(true); + a.compact(true); if (LOG.isDebugEnabled()) { LOG.debug("Files for region: " + a); a.getRegionFileSystem().logFileSystemState(LOG); } - b.compactStores(true); + b.compact(true); if (LOG.isDebugEnabled()) { LOG.debug("Files for region: " + b); b.getRegionFileSystem().logFileSystemState(LOG); } - RegionMergeTransaction rmt = new RegionMergeTransaction(a, b, true); + RegionMergeTransactionImpl rmt = new RegionMergeTransactionImpl(a, b, true); if (!rmt.prepare(null)) { throw new IOException("Unable to merge regions " + a + " and " + b); } @@ -5220,13 +6256,13 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // + Bytes.toStringBinary(mergedRegionInfo.getEndKey()) + ">"); HRegion dstRegion; try { - dstRegion = rmt.execute(null, null); + dstRegion = (HRegion)rmt.execute(null, null); } catch (IOException ioe) { rmt.rollback(null, null); throw new IOException("Failed merging region " + a + " and " + b + ", and successfully rolled back"); } - dstRegion.compactStores(true); + dstRegion.compact(true); if (LOG.isDebugEnabled()) { LOG.debug("Files for new region"); @@ -5247,14 +6283,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // return dstRegion; } - // - // HBASE-880 - // - /** - * @param get get object - * @return result - * @throws IOException read exceptions - */ + @Override public Result get(final Get get) throws IOException { checkRow(get.getRow(), "Get"); // Verify families are all valid @@ -5272,13 +6301,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // return Result.create(results, get.isCheckExistenceOnly() ? !results.isEmpty() : null, stale); } - /* - * Do a get based on the get parameter. - * @param withCoprocessor invoke coprocessor or not. We don't want to - * always invoke cp for this private method. - */ - public List get(Get get, boolean withCoprocessor) - throws IOException { + @Override + public List get(Get get, boolean withCoprocessor) throws IOException { List results = new ArrayList(); @@ -5363,27 +6387,19 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // return stats.build(); } - /** - * Performs atomic multiple reads and writes on a given row. - * - * @param processor The object defines the reads and writes to a row. - * @param nonceGroup Optional nonce group of the operation (client Id) - * @param nonce Optional nonce of the operation (unique random id to ensure "more idempotence") - */ + @Override + public void processRowsWithLocks(RowProcessor processor) throws IOException { + processRowsWithLocks(processor, rowProcessorTimeout, HConstants.NO_NONCE, + HConstants.NO_NONCE); + } + + @Override public void processRowsWithLocks(RowProcessor processor, long nonceGroup, long nonce) throws IOException { processRowsWithLocks(processor, rowProcessorTimeout, nonceGroup, nonce); } - /** - * Performs atomic multiple reads and writes on a given row. - * - * @param processor The object defines the reads and writes to a row. - * @param timeout The timeout of the processor.process() execution - * Use a negative number to switch off the time bound - * @param nonceGroup Optional nonce group of the operation (client Id) - * @param nonce Optional nonce of the operation (unique random id to ensure "more idempotence") - */ + @Override public void processRowsWithLocks(RowProcessor processor, long timeout, long nonceGroup, long nonce) throws IOException { @@ -5596,14 +6612,9 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // // TODO: There's a lot of boiler plate code identical to increment. // We should refactor append and increment as local get-mutate-put // transactions, so all stores only go through one code path for puts. - /** - * Perform one or more append operations on a row. - * - * @return new keyvalues after increment - * @throws IOException - */ - public Result append(Append append, long nonceGroup, long nonce) - throws IOException { + + @Override + public Result append(Append append, long nonceGroup, long nonce) throws IOException { byte[] row = append.getRow(); checkRow(row, "append"); boolean flush = false; @@ -5689,7 +6700,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // } } if (cell.getTagsLength() > 0) { - Iterator i = CellUtil.tagsIterator(cell.getTagsArray(), + Iterator i = CellUtil.tagsIterator(cell.getTagsArray(), cell.getTagsOffset(), cell.getTagsLength()); while (i.hasNext()) { newTags.add(i.next()); @@ -5861,11 +6872,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // // TODO: There's a lot of boiler plate code identical to append. // We should refactor append and increment as local get-mutate-put // transactions, so all stores only go through one code path for puts. - /** - * Perform one or more increment operations on a row. - * @return new keyvalues after increment - * @throws IOException - */ + + @Override public Result increment(Increment increment, long nonceGroup, long nonce) throws IOException { byte [] row = increment.getRow(); @@ -5933,17 +6941,19 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // // Iterate the input columns and update existing values if they were // found, otherwise add new column initialized to the increment amount int idx = 0; - for (Cell cell: family.getValue()) { + List edits = family.getValue(); + for (int i = 0; i < edits.size(); i++) { + Cell cell = edits.get(i); long amount = Bytes.toLong(CellUtil.cloneValue(cell)); boolean noWriteBack = (amount == 0); List newTags = new ArrayList(); // Carry forward any tags that might have been added by a coprocessor if (cell.getTagsLength() > 0) { - Iterator i = CellUtil.tagsIterator(cell.getTagsArray(), + Iterator itr = CellUtil.tagsIterator(cell.getTagsArray(), cell.getTagsOffset(), cell.getTagsLength()); - while (i.hasNext()) { - newTags.add(i.next()); + while (itr.hasNext()) { + newTags.add(itr.next()); } } @@ -5961,13 +6971,14 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // } // Carry tags forward from previous version if (c.getTagsLength() > 0) { - Iterator i = CellUtil.tagsIterator(c.getTagsArray(), + Iterator itr = CellUtil.tagsIterator(c.getTagsArray(), c.getTagsOffset(), c.getTagsLength()); - while (i.hasNext()) { - newTags.add(i.next()); + while (itr.hasNext()) { + newTags.add(itr.next()); } } - idx++; + if (i < ( edits.size() - 1) && !CellUtil.matchingQualifier(cell, edits.get(i + 1))) + idx++; } // Append new incremented KeyValue to list @@ -6089,8 +7100,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // // Request a cache flush. Do it outside update lock. requestFlush(); } - - return Result.create(allKVs); + return increment.isReturnResults() ? Result.create(allKVs) : null; } // @@ -6109,9 +7119,9 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // public static final long FIXED_OVERHEAD = ClassSize.align( ClassSize.OBJECT + ClassSize.ARRAY + - 44 * ClassSize.REFERENCE + 2 * Bytes.SIZEOF_INT + - (11 * Bytes.SIZEOF_LONG) + - 4 * Bytes.SIZEOF_BOOLEAN); + 45 * ClassSize.REFERENCE + 2 * Bytes.SIZEOF_INT + + (14 * Bytes.SIZEOF_LONG) + + 5 * Bytes.SIZEOF_BOOLEAN); // woefully out of date - currently missing: // 1 x HashMap - coprocessorServiceHandlers @@ -6160,22 +7170,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // System.exit(1); } - /** - * Registers a new protocol buffer {@link Service} subclass as a coprocessor endpoint to - * be available for handling - * {@link HRegion#execService(com.google.protobuf.RpcController, - * org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceCall)}} calls. - * - *

      - * Only a single instance may be registered per region for a given {@link Service} subclass (the - * instances are keyed on {@link com.google.protobuf.Descriptors.ServiceDescriptor#getFullName()}. - * After the first registration, subsequent calls with the same service name will fail with - * a return value of {@code false}. - *

      - * @param instance the {@code Service} subclass instance to expose as a coprocessor endpoint - * @return {@code true} if the registration was successful, {@code false} - * otherwise - */ + @Override public boolean registerService(Service instance) { /* * No stacking of instances is allowed for a single service name @@ -6190,26 +7185,14 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // coprocessorServiceHandlers.put(serviceDesc.getFullName(), instance); if (LOG.isDebugEnabled()) { - LOG.debug("Registered coprocessor service: region="+ - Bytes.toStringBinary(getRegionName())+" service="+serviceDesc.getFullName()); + LOG.debug("Registered coprocessor service: region=" + + Bytes.toStringBinary(getRegionInfo().getRegionName()) + + " service=" + serviceDesc.getFullName()); } return true; } - /** - * Executes a single protocol buffer coprocessor endpoint {@link Service} method using - * the registered protocol handlers. {@link Service} implementations must be registered via the - * {@link HRegion#registerService(com.google.protobuf.Service)} - * method before they are available. - * - * @param controller an {@code RpcController} implementation to pass to the invoked service - * @param call a {@code CoprocessorServiceCall} instance identifying the service, method, - * and parameters for the method invocation - * @return a protocol buffer {@code Message} instance containing the method's result - * @throws IOException if no registered service handler is found or an error - * occurs during the invocation - * @see org.apache.hadoop.hbase.regionserver.HRegion#registerService(com.google.protobuf.Service) - */ + @Override public Message execService(RpcController controller, CoprocessorServiceCall call) throws IOException { String serviceName = call.getServiceName(); @@ -6217,7 +7200,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // if (!coprocessorServiceHandlers.containsKey(serviceName)) { throw new UnknownProtocolException(null, "No registered coprocessor service found for name "+serviceName+ - " in region "+Bytes.toStringBinary(getRegionName())); + " in region "+Bytes.toStringBinary(getRegionInfo().getRegionName())); } Service service = coprocessorServiceHandlers.get(serviceName); @@ -6226,7 +7209,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // if (methodDesc == null) { throw new UnknownProtocolException(service.getClass(), "Unknown method "+methodName+" called on service "+serviceName+ - " in region "+Bytes.toStringBinary(getRegionName())); + " in region "+Bytes.toStringBinary(getRegionInfo().getRegionName())); } Message request = service.getRequestPrototype(methodDesc).newBuilderForType() @@ -6278,7 +7261,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // try { region.initialize(null); if (majorCompact) { - region.compactStores(true); + region.compact(true); } else { // Default behavior Scan scan = new Scan(); @@ -6391,22 +7374,13 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // this.coprocessorHost = coprocessorHost; } - /** - * This method needs to be called before any public call that reads or - * modifies data. It has to be called just before a try. - * #closeRegionOperation needs to be called in the try's finally block - * Acquires a read lock and checks if the region is closing or closed. - * @throws IOException - */ + @Override public void startRegionOperation() throws IOException { startRegionOperation(Operation.ANY); } - /** - * @param op The operation is about to be taken on the region - * @throws IOException - */ - protected void startRegionOperation(Operation op) throws IOException { + @Override + public void startRegionOperation(Operation op) throws IOException { switch (op) { case GET: // read operations case SCAN: @@ -6422,7 +7396,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // // when a region is in recovering state, no read, split or merge is allowed if (isRecovering() && (this.disallowWritesInRecovering || (op != Operation.PUT && op != Operation.DELETE && op != Operation.BATCH_MUTATE))) { - throw new RegionInRecoveryException(this.getRegionNameAsString() + + throw new RegionInRecoveryException(getRegionInfo().getRegionNameAsString() + " is recovering; cannot take reads"); } break; @@ -6436,12 +7410,12 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // return; } if (this.closing.get()) { - throw new NotServingRegionException(getRegionNameAsString() + " is closing"); + throw new NotServingRegionException(getRegionInfo().getRegionNameAsString() + " is closing"); } lock(lock.readLock()); if (this.closed.get()) { lock.readLock().unlock(); - throw new NotServingRegionException(getRegionNameAsString() + " is closed"); + throw new NotServingRegionException(getRegionInfo().getRegionNameAsString() + " is closed"); } try { if (coprocessorHost != null) { @@ -6453,11 +7427,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // } } - /** - * Closes the lock. This needs to be called in the finally block corresponding - * to the try block of #startRegionOperation - * @throws IOException - */ + @Override public void closeRegionOperation() throws IOException { closeRegionOperation(Operation.ANY); } @@ -6486,14 +7456,14 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // private void startBulkRegionOperation(boolean writeLockNeeded) throws NotServingRegionException, RegionTooBusyException, InterruptedIOException { if (this.closing.get()) { - throw new NotServingRegionException(getRegionNameAsString() + " is closing"); + throw new NotServingRegionException(getRegionInfo().getRegionNameAsString() + " is closing"); } if (writeLockNeeded) lock(lock.writeLock()); else lock(lock.readLock()); if (this.closed.get()) { if (writeLockNeeded) lock.writeLock().unlock(); else lock.readLock().unlock(); - throw new NotServingRegionException(getRegionNameAsString() + " is closed"); + throw new NotServingRegionException(getRegionInfo().getRegionNameAsString() + " is closed"); } } @@ -6666,30 +7636,23 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // } } - /** - * Gets the latest sequence number that was read from storage when this region was opened. - */ + @Override public long getOpenSeqNum() { return this.openSeqNum; } - /** - * Gets max sequence ids of stores that was read from storage when this region was opened. WAL - * Edits with smaller or equal sequence number will be skipped from replay. - */ - public Map getMaxStoreSeqIdForLogReplay() { + @Override + public Map getMaxStoreSeqId() { return this.maxSeqIdInStores; } - @VisibleForTesting + @Override public long getOldestSeqIdOfStore(byte[] familyName) { return wal.getEarliestMemstoreSeqNum(getRegionInfo() .getEncodedNameAsBytes(), familyName); } - /** - * @return if a given region is in compaction now. - */ + @Override public CompactionState getCompactionState() { boolean hasMajor = majorInProgress.get() > 0, hasMinor = minorInProgress.get() > 0; return (hasMajor ? (hasMinor ? CompactionState.MAJOR_AND_MINOR : CompactionState.MAJOR) @@ -6728,39 +7691,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // this.sequenceId.set(value); } - /** - * Listener class to enable callers of - * bulkLoadHFile() to perform any necessary - * pre/post processing of a given bulkload call - */ - public interface BulkLoadListener { - - /** - * Called before an HFile is actually loaded - * @param family family being loaded to - * @param srcPath path of HFile - * @return final path to be used for actual loading - * @throws IOException - */ - String prepareBulkLoad(byte[] family, String srcPath) throws IOException; - - /** - * Called after a successful HFile load - * @param family family being loaded to - * @param srcPath path of HFile - * @throws IOException - */ - void doneBulkLoad(byte[] family, String srcPath) throws IOException; - - /** - * Called after a failed HFile load - * @param family family being loaded to - * @param srcPath path of HFile - * @throws IOException - */ - void failedBulkLoad(byte[] family, String srcPath) throws IOException; - } - @VisibleForTesting class RowLockContext { private final HashedBytes row; private final CountDownLatch latch = new CountDownLatch(1); @@ -6778,7 +7708,9 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // RowLock newLock() { lockCount++; - return new RowLock(this); + RowLockImpl rl = new RowLockImpl(); + rl.setContext(this); + return rl; } @Override @@ -6806,29 +7738,26 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // } } - /** - * Row lock held by a given thread. - * One thread may acquire multiple locks on the same row simultaneously. - * The locks must be released by calling release() from the same thread. - */ - public static class RowLock { - @VisibleForTesting final RowLockContext context; + public static class RowLockImpl implements RowLock { + private RowLockContext context; private boolean released = false; - @VisibleForTesting RowLock(RowLockContext context) { + @VisibleForTesting + public RowLockContext getContext() { + return context; + } + + @VisibleForTesting + public void setContext(RowLockContext context) { this.context = context; } - /** - * Release the given lock. If there are no remaining locks held by the current thread - * then unlock the row and allow other threads to acquire the lock. - * @throws IllegalArgumentException if called by a different thread than the lock owning thread - */ + @Override public void release() { if (!released) { context.releaseLock(); - released = true; } + released = true; } } @@ -6852,16 +7781,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { // return key; } - /** - * Explicitly sync wal - * @throws IOException - */ - public void syncWal() throws IOException { - if(this.wal != null) { - this.wal.sync(); - } - } - /** * {@inheritDoc} */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java index 0751634263a..014ec2c0dae 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java @@ -119,6 +119,10 @@ public class HRegionFileSystem { return this.regionInfo; } + public HRegionInfo getRegionInfoForFS() { + return this.regionInfoForFs; + } + /** @return {@link Path} to the region's root directory. */ public Path getTableDir() { return this.tableDir; @@ -205,7 +209,7 @@ public class HRegionFileSystem { continue; } StoreFileInfo info = ServerRegionReplicaUtil.getStoreFileInfo(conf, fs, regionInfo, - regionInfoForFs, familyName, status); + regionInfoForFs, familyName, status.getPath()); storeFiles.add(info); } @@ -234,8 +238,8 @@ public class HRegionFileSystem { StoreFileInfo getStoreFileInfo(final String familyName, final String fileName) throws IOException { Path familyDir = getStoreDir(familyName); - FileStatus status = fs.getFileStatus(new Path(familyDir, fileName)); - return new StoreFileInfo(this.conf, this.fs, status); + return ServerRegionReplicaUtil.getStoreFileInfo(conf, fs, regionInfo, + regionInfoForFs, familyName, new Path(familyDir, fileName)); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 787828b3294..325e5106e37 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -80,6 +80,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.ConnectionUtils; +import org.apache.hadoop.hbase.client.RpcRetryingCallerFactory; import org.apache.hadoop.hbase.conf.ConfigurationManager; import org.apache.hadoop.hbase.coordination.BaseCoordinatedStateManager; import org.apache.hadoop.hbase.coordination.SplitLogWorkerCoordination; @@ -92,8 +93,10 @@ import org.apache.hadoop.hbase.executor.ExecutorType; import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.http.InfoServer; import org.apache.hadoop.hbase.io.hfile.CacheConfig; +import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.ipc.RpcClient; import org.apache.hadoop.hbase.ipc.RpcClientFactory; +import org.apache.hadoop.hbase.ipc.RpcControllerFactory; import org.apache.hadoop.hbase.ipc.RpcServerInterface; import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException; import org.apache.hadoop.hbase.ipc.ServerRpcController; @@ -110,12 +113,14 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServic import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse; import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos; import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad; +import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest; +import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse; @@ -129,8 +134,10 @@ import org.apache.hadoop.hbase.quotas.RegionServerQuotaManager; import org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress; import org.apache.hadoop.hbase.regionserver.handler.CloseMetaHandler; import org.apache.hadoop.hbase.regionserver.handler.CloseRegionHandler; +import org.apache.hadoop.hbase.regionserver.handler.RegionReplicaFlushHandler; import org.apache.hadoop.hbase.regionserver.wal.MetricsWAL; import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener; +import org.apache.hadoop.hbase.replication.regionserver.ReplicationLoad; import org.apache.hadoop.hbase.security.UserProvider; import org.apache.hadoop.hbase.trace.SpanReceiverHost; import org.apache.hadoop.hbase.util.Addressing; @@ -143,6 +150,7 @@ import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.HasThread; import org.apache.hadoop.hbase.util.JSONBean; import org.apache.hadoop.hbase.util.JvmPauseMonitor; +import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil; import org.apache.hadoop.hbase.util.Sleeper; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.util.VersionInfo; @@ -247,8 +255,7 @@ public class HRegionServer extends HasThread implements * Map of regions currently being served by this region server. Key is the * encoded region name. All access should be synchronized. */ - protected final Map onlineRegions = - new ConcurrentHashMap(); + protected final Map onlineRegions = new ConcurrentHashMap(); /** * Map of encoded region names to the DataNode locations they should be hosted on @@ -266,8 +273,8 @@ public class HRegionServer extends HasThread implements * Set of regions currently being in recovering state which means it can accept writes(edits from * previous failed region server) but not reads. A recovering region is also an online region. */ - protected final Map recoveringRegions = Collections - .synchronizedMap(new HashMap()); + protected final Map recoveringRegions = Collections + .synchronizedMap(new HashMap()); // Leases protected Leases leases; @@ -313,6 +320,9 @@ public class HRegionServer extends HasThread implements // RPC client. Used to make the stub above that does region server status checking. RpcClient rpcClient; + private RpcRetryingCallerFactory rpcRetryingCallerFactory; + private RpcControllerFactory rpcControllerFactory; + private UncaughtExceptionHandler uncaughtExceptionHandler; // Info server. Default access so can be used by unit tests. REGIONSERVER @@ -369,6 +379,7 @@ public class HRegionServer extends HasThread implements protected final Sleeper sleeper; private final int operationTimeout; + private final int shortOperationTimeout; private final RegionServerAccounting regionServerAccounting; @@ -392,6 +403,16 @@ public class HRegionServer extends HasThread implements */ protected ServerName serverName; + /* + * hostname specified by hostname config + */ + protected String useThisHostnameInstead; + + // key to the config parameter of server hostname + // the specification of server hostname is optional. The hostname should be resolvable from + // both master and region server + final static String HOSTNAME_KEY = "hbase.regionserver.hostname"; + /** * This servers startcode. */ @@ -476,6 +497,7 @@ public class HRegionServer extends HasThread implements throws IOException { this.fsOk = true; this.conf = conf; + HFile.checkHFileVersion(this.conf); checkCodecs(this.conf); this.userProvider = UserProvider.instantiate(conf); FSUtils.setupShortCircuitRead(this.conf); @@ -497,6 +519,10 @@ public class HRegionServer extends HasThread implements "hbase.regionserver.numregionstoreport", 10); this.operationTimeout = conf.getInt( + HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, + HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT); + + this.shortOperationTimeout = conf.getInt( HConstants.HBASE_RPC_SHORTOPERATION_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_SHORTOPERATION_TIMEOUT); @@ -505,9 +531,14 @@ public class HRegionServer extends HasThread implements rpcServices = createRpcServices(); this.startcode = System.currentTimeMillis(); - String hostName = rpcServices.isa.getHostName(); + useThisHostnameInstead = conf.get(HOSTNAME_KEY); + String hostName = shouldUseThisHostnameInstead() ? useThisHostnameInstead : + rpcServices.isa.getHostName(); serverName = ServerName.valueOf(hostName, rpcServices.isa.getPort(), startcode); + rpcControllerFactory = RpcControllerFactory.instantiate(this.conf); + rpcRetryingCallerFactory = RpcRetryingCallerFactory.instantiate(this.conf); + // login the zookeeper client principal (if using security) ZKUtil.loginClient(this.conf, "hbase.zookeeper.client.keytab.file", "hbase.zookeeper.client.kerberos.principal", hostName); @@ -570,6 +601,13 @@ public class HRegionServer extends HasThread implements this.fs, this.rootDir, !canUpdateTableDescriptor(), false); } + /* + * Returns true if configured hostname should be used + */ + protected boolean shouldUseThisHostnameInstead() { + return useThisHostnameInstead != null && !useThisHostnameInstead.isEmpty(); + } + protected void login(UserProvider user, String host) throws IOException { user.login("hbase.regionserver.keytab.file", "hbase.regionserver.kerberos.principal", host); @@ -792,7 +830,7 @@ public class HRegionServer extends HasThread implements this.leases = new Leases(this.threadWakeFrequency); // Create the thread to clean the moved regions list - movedRegionsCleaner = MovedRegionsCleaner.createAndStart(this); + movedRegionsCleaner = MovedRegionsCleaner.create(this); if (this.nonceManager != null) { // Create the scheduled chore that cleans up nonces. @@ -949,13 +987,14 @@ public class HRegionServer extends HasThread implements // Send interrupts to wake up threads if sleeping so they notice shutdown. // TODO: Should we check they are alive? If OOME could have exited already - if(this.hMemManager != null) this.hMemManager.stop(); + if (this.hMemManager != null) this.hMemManager.stop(); if (this.cacheFlusher != null) this.cacheFlusher.interruptIfNecessary(); if (this.compactSplitThread != null) this.compactSplitThread.interruptIfNecessary(); if (this.compactionChecker != null) this.compactionChecker.cancel(true); if (this.healthCheckChore != null) this.healthCheckChore.cancel(true); if (this.nonceManagerChore != null) this.nonceManagerChore.cancel(true); if (this.storefileRefresher != null) this.storefileRefresher.cancel(true); + sendShutdownInterrupt(); // Stop the quota manager if (rsQuotaManager != null) { @@ -1061,7 +1100,7 @@ public class HRegionServer extends HasThread implements private boolean areAllUserRegionsOffline() { if (getNumberOfOnlineRegions() > 2) return false; boolean allUserRegionsOffline = true; - for (Map.Entry e: this.onlineRegions.entrySet()) { + for (Map.Entry e: this.onlineRegions.entrySet()) { if (!e.getValue().getRegionInfo().isMetaTable()) { allUserRegionsOffline = false; break; @@ -1075,7 +1114,7 @@ public class HRegionServer extends HasThread implements */ private long getWriteRequestCount() { int writeCount = 0; - for (Map.Entry e: this.onlineRegions.entrySet()) { + for (Map.Entry e: this.onlineRegions.entrySet()) { writeCount += e.getValue().getWriteRequestsCount(); } return writeCount; @@ -1121,10 +1160,9 @@ public class HRegionServer extends HasThread implements // Instead they should be stored in an HBase table so that external visibility into HBase is // improved; Additionally the load balancer will be able to take advantage of a more complete // history. - MetricsRegionServerWrapper regionServerWrapper = this.metricsRegionServer.getRegionServerWrapper(); - Collection regions = getOnlineRegionsLocalContext(); - MemoryUsage memory = - ManagementFactory.getMemoryMXBean().getHeapMemoryUsage(); + MetricsRegionServerWrapper regionServerWrapper = metricsRegionServer.getRegionServerWrapper(); + Collection regions = getOnlineRegionsLocalContext(); + MemoryUsage memory = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage(); ClusterStatusProtos.ServerLoad.Builder serverLoad = ClusterStatusProtos.ServerLoad.newBuilder(); @@ -1139,7 +1177,7 @@ public class HRegionServer extends HasThread implements } RegionLoad.Builder regionLoadBldr = RegionLoad.newBuilder(); RegionSpecifier.Builder regionSpecifier = RegionSpecifier.newBuilder(); - for (HRegion region : regions) { + for (Region region : regions) { serverLoad.addRegionLoads(createRegionLoad(region, regionLoadBldr, regionSpecifier)); for (String coprocessor : getWAL(region.getRegionInfo()).getCoprocessorHost().getCoprocessors()) { @@ -1153,12 +1191,28 @@ public class HRegionServer extends HasThread implements } else { serverLoad.setInfoServerPort(-1); } + + // for the replicationLoad purpose. Only need to get from one service + // either source or sink will get the same info + ReplicationSourceService rsources = getReplicationSourceService(); + + if (rsources != null) { + // always refresh first to get the latest value + ReplicationLoad rLoad = rsources.refreshAndGetReplicationLoad(); + if (rLoad != null) { + serverLoad.setReplLoadSink(rLoad.getReplicationLoadSink()); + for (ClusterStatusProtos.ReplicationLoadSource rLS : rLoad.getReplicationLoadSourceList()) { + serverLoad.addReplLoadSource(rLS); + } + } + } + return serverLoad.build(); } String getOnlineRegionsAsPrintableString() { StringBuilder sb = new StringBuilder(); - for (HRegion r: this.onlineRegions.values()) { + for (Region r: this.onlineRegions.values()) { if (sb.length() > 0) sb.append(", "); sb.append(r.getRegionInfo().getEncodedName()); } @@ -1194,7 +1248,7 @@ public class HRegionServer extends HasThread implements // Ensure all user regions have been sent a close. Use this to // protect against the case where an open comes in after we start the // iterator of onlineRegions to close all user regions. - for (Map.Entry e : this.onlineRegions.entrySet()) { + for (Map.Entry e : this.onlineRegions.entrySet()) { HRegionInfo hri = e.getValue().getRegionInfo(); if (!this.regionsInTransitionInRS.containsKey(hri.getEncodedNameAsBytes()) && !closedRegions.contains(hri.getEncodedName())) { @@ -1264,9 +1318,18 @@ public class HRegionServer extends HasThread implements String hostnameFromMasterPOV = e.getValue(); this.serverName = ServerName.valueOf(hostnameFromMasterPOV, rpcServices.isa.getPort(), this.startcode); - if (!hostnameFromMasterPOV.equals(rpcServices.isa.getHostName())) { - LOG.info("Master passed us a different hostname to use; was=" + - rpcServices.isa.getHostName() + ", but now=" + hostnameFromMasterPOV); + if (shouldUseThisHostnameInstead() && + !hostnameFromMasterPOV.equals(useThisHostnameInstead)) { + String msg = "Master passed us a different hostname to use; was=" + + this.useThisHostnameInstead + ", but now=" + hostnameFromMasterPOV; + LOG.error(msg); + throw new IOException(msg); + } + if (!shouldUseThisHostnameInstead() && + !hostnameFromMasterPOV.equals(rpcServices.isa.getHostName())) { + String msg = "Master passed us a different hostname to use; was=" + + rpcServices.isa.getHostName() + ", but now=" + hostnameFromMasterPOV; + LOG.error(msg); } continue; } @@ -1350,44 +1413,37 @@ public class HRegionServer extends HasThread implements * * @throws IOException */ - private RegionLoad createRegionLoad(final HRegion r, RegionLoad.Builder regionLoadBldr, + private RegionLoad createRegionLoad(final Region r, RegionLoad.Builder regionLoadBldr, RegionSpecifier.Builder regionSpecifier) throws IOException { - byte[] name = r.getRegionName(); + byte[] name = r.getRegionInfo().getRegionName(); int stores = 0; int storefiles = 0; int storeUncompressedSizeMB = 0; int storefileSizeMB = 0; - int memstoreSizeMB = (int) (r.memstoreSize.get() / 1024 / 1024); + int memstoreSizeMB = (int) (r.getMemstoreSize() / 1024 / 1024); int storefileIndexSizeMB = 0; int rootIndexSizeKB = 0; int totalStaticIndexSizeKB = 0; int totalStaticBloomSizeKB = 0; long totalCompactingKVs = 0; long currentCompactedKVs = 0; - synchronized (r.stores) { - stores += r.stores.size(); - for (Store store : r.stores.values()) { - storefiles += store.getStorefilesCount(); - storeUncompressedSizeMB += (int) (store.getStoreSizeUncompressed() - / 1024 / 1024); - storefileSizeMB += (int) (store.getStorefilesSize() / 1024 / 1024); - storefileIndexSizeMB += (int) (store.getStorefilesIndexSize() / 1024 / 1024); - CompactionProgress progress = store.getCompactionProgress(); - if (progress != null) { - totalCompactingKVs += progress.totalCompactingKVs; - currentCompactedKVs += progress.currentCompactedKVs; - } - - rootIndexSizeKB += - (int) (store.getStorefilesIndexSize() / 1024); - - totalStaticIndexSizeKB += - (int) (store.getTotalStaticIndexSize() / 1024); - - totalStaticBloomSizeKB += - (int) (store.getTotalStaticBloomSize() / 1024); + List storeList = r.getStores(); + stores += storeList.size(); + for (Store store : storeList) { + storefiles += store.getStorefilesCount(); + storeUncompressedSizeMB += (int) (store.getStoreSizeUncompressed() / 1024 / 1024); + storefileSizeMB += (int) (store.getStorefilesSize() / 1024 / 1024); + storefileIndexSizeMB += (int) (store.getStorefilesIndexSize() / 1024 / 1024); + CompactionProgress progress = store.getCompactionProgress(); + if (progress != null) { + totalCompactingKVs += progress.totalCompactingKVs; + currentCompactedKVs += progress.currentCompactedKVs; } + rootIndexSizeKB += (int) (store.getStorefilesIndexSize() / 1024); + totalStaticIndexSizeKB += (int) (store.getTotalStaticIndexSize() / 1024); + totalStaticBloomSizeKB += (int) (store.getTotalStaticBloomSize() / 1024); } + float dataLocality = r.getHDFSBlocksDistribution().getBlockLocalityIndex(serverName.getHostname()); if (regionLoadBldr == null) { @@ -1408,13 +1464,14 @@ public class HRegionServer extends HasThread implements .setRootIndexSizeKB(rootIndexSizeKB) .setTotalStaticIndexSizeKB(totalStaticIndexSizeKB) .setTotalStaticBloomSizeKB(totalStaticBloomSizeKB) - .setReadRequestsCount(r.readRequestsCount.get()) - .setWriteRequestsCount(r.writeRequestsCount.get()) + .setReadRequestsCount(r.getReadRequestsCount()) + .setWriteRequestsCount(r.getWriteRequestsCount()) .setTotalCompactingKVs(totalCompactingKVs) .setCurrentCompactedKVs(currentCompactedKVs) - .setCompleteSequenceId(r.maxFlushedSeqId) .setDataLocality(dataLocality) .setLastMajorCompactionTs(r.getOldestHfileTs(true)); + ((HRegion)r).setCompleteSequenceId(regionLoadBldr); + return regionLoadBldr.build(); } @@ -1423,8 +1480,7 @@ public class HRegionServer extends HasThread implements * @return An instance of RegionLoad. */ public RegionLoad createRegionLoad(final String encodedRegionName) throws IOException { - HRegion r = null; - r = this.onlineRegions.get(encodedRegionName); + Region r = onlineRegions.get(encodedRegionName); return r != null ? createRegionLoad(r, null, null) : null; } @@ -1453,10 +1509,10 @@ public class HRegionServer extends HasThread implements @Override protected void chore() { - for (HRegion r : this.instance.onlineRegions.values()) { + for (Region r : this.instance.onlineRegions.values()) { if (r == null) continue; - for (Store s : r.getStores().values()) { + for (Store s : r.getStores()) { try { long multiplier = s.getCompactionCheckMultiplier(); assert multiplier > 0; @@ -1467,7 +1523,7 @@ public class HRegionServer extends HasThread implements + " requests compaction"); } else if (s.isMajorCompaction()) { if (majorCompactPriority == DEFAULT_PRIORITY - || majorCompactPriority > r.getCompactPriority()) { + || majorCompactPriority > ((HRegion)r).getCompactPriority()) { this.instance.compactSplitThread.requestCompaction(r, s, getName() + " requests major compaction; use default priority", null); } else { @@ -1496,15 +1552,15 @@ public class HRegionServer extends HasThread implements @Override protected void chore() { - for (HRegion r : this.server.onlineRegions.values()) { + for (Region r : this.server.onlineRegions.values()) { if (r == null) continue; - if (r.shouldFlush()) { + if (((HRegion)r).shouldFlush()) { FlushRequester requester = server.getFlushRequester(); if (requester != null) { long randomDelay = RandomUtils.nextInt(RANGE_OF_DELAY) + MIN_DELAY_TIME; - LOG.info(getName() + " requesting flush for region " + r.getRegionNameAsString() + - " after a delay of " + randomDelay); + LOG.info(getName() + " requesting flush for region " + + r.getRegionInfo().getRegionNameAsString() + " after a delay of " + randomDelay); //Throttle the flushes by putting a delay. If we don't throttle, and there //is a balanced write-load on the regions in a table, we might end up //overwhelming the filesystem with too many flushes at once. @@ -1628,6 +1684,12 @@ public class HRegionServer extends HasThread implements this.service.startExecutorService(ExecutorType.RS_LOG_REPLAY_OPS, conf.getInt( "hbase.regionserver.wal.max.splitters", SplitLogWorkerCoordination.DEFAULT_MAX_SPLITTERS)); + if (ServerRegionReplicaUtil.isRegionReplicaWaitForPrimaryFlushEnabled(conf)) { + this.service.startExecutorService(ExecutorType.RS_REGION_REPLICA_FLUSH_OPS, + conf.getInt("hbase.regionserver.region.replica.flusher.threads", + conf.getInt("hbase.regionserver.executor.openregion.threads", 3))); + } + Threads.setDaemonThreadRunning(this.walRoller.getThread(), getName() + ".logRoller", uncaughtExceptionHandler); this.cacheFlusher.start(uncaughtExceptionHandler); @@ -1637,6 +1699,7 @@ public class HRegionServer extends HasThread implements if (this.healthCheckChore != null) choreService.scheduleChore(healthCheckChore); if (this.nonceManagerChore != null) choreService.scheduleChore(nonceManagerChore); if (this.storefileRefresher != null) choreService.scheduleChore(storefileRefresher); + if (this.movedRegionsCleaner != null) choreService.scheduleChore(movedRegionsCleaner); // Leases is not a Thread. Internally it runs a daemon thread. If it gets // an unhandled exception, it will just exit. @@ -1677,6 +1740,10 @@ public class HRegionServer extends HasThread implements private int putUpWebUI() throws IOException { int port = this.conf.getInt(HConstants.REGIONSERVER_INFO_PORT, HConstants.DEFAULT_REGIONSERVER_INFOPORT); + if(this instanceof HMaster) { + port = conf.getInt(HConstants.MASTER_INFO_PORT, + HConstants.DEFAULT_MASTER_INFOPORT); + } // -1 is for disabling info server if (port < 0) return port; String addr = this.conf.get("hbase.regionserver.info.bindAddress", "0.0.0.0"); @@ -1803,12 +1870,12 @@ public class HRegionServer extends HasThread implements } @Override - public void postOpenDeployTasks(final HRegion r) - throws KeeperException, IOException { + public void postOpenDeployTasks(final Region r) throws KeeperException, IOException { + Preconditions.checkArgument(r instanceof HRegion, "r must be an HRegion"); rpcServices.checkOpen(); - LOG.info("Post open deploy tasks for " + r.getRegionNameAsString()); + LOG.info("Post open deploy tasks for " + r.getRegionInfo().getRegionNameAsString()); // Do checks to see if we need to compact (references or too many files) - for (Store s : r.getStores().values()) { + for (Store s : r.getStores()) { if (s.hasReferences() || s.needsCompaction()) { this.compactSplitThread.requestSystemCompaction(r, s, "Opening Region"); } @@ -1816,7 +1883,8 @@ public class HRegionServer extends HasThread implements long openSeqNum = r.getOpenSeqNum(); if (openSeqNum == HConstants.NO_SEQNUM) { // If we opened a region, we should have read some sequence number from it. - LOG.error("No sequence number found when opening " + r.getRegionNameAsString()); + LOG.error("No sequence number found when opening " + + r.getRegionInfo().getRegionNameAsString()); openSeqNum = 0; } @@ -1827,10 +1895,12 @@ public class HRegionServer extends HasThread implements if (!reportRegionStateTransition( TransitionCode.OPENED, openSeqNum, r.getRegionInfo())) { throw new IOException("Failed to report opened region to master: " - + r.getRegionNameAsString()); + + r.getRegionInfo().getRegionNameAsString()); } - LOG.debug("Finished post open deploy task for " + r.getRegionNameAsString()); + triggerFlushInPrimaryRegion((HRegion)r); + + LOG.debug("Finished post open deploy task for " + r.getRegionInfo().getRegionNameAsString()); } @Override @@ -1905,6 +1975,30 @@ public class HRegionServer extends HasThread implements return false; } + /** + * Trigger a flush in the primary region replica if this region is a secondary replica. Does not + * block this thread. See RegionReplicaFlushHandler for details. + */ + void triggerFlushInPrimaryRegion(final HRegion region) { + if (ServerRegionReplicaUtil.isDefaultReplica(region.getRegionInfo())) { + return; + } + if (!ServerRegionReplicaUtil.isRegionReplicaReplicationEnabled(region.conf) || + !ServerRegionReplicaUtil.isRegionReplicaWaitForPrimaryFlushEnabled( + region.conf)) { + region.setReadsEnabled(true); + return; + } + + region.setReadsEnabled(false); // disable reads before marking the region as opened. + // RegionReplicaFlushHandler might reset this. + + // submit it to be handled by one of the handlers so that we do not block OpenRegionHandler + this.service.submit( + new RegionReplicaFlushHandler(this, clusterConnection, + rpcRetryingCallerFactory, rpcControllerFactory, operationTimeout, region)); + } + @Override public RpcServerInterface getRpcServer() { return rpcServices.rpcServer; @@ -1989,6 +2083,12 @@ public class HRegionServer extends HasThread implements abort("Simulated kill"); } + /** + * Called on stop/abort before closing the cluster connection and meta locator. + */ + protected void sendShutdownInterrupt() { + } + /** * Wait on all threads to finish. Presumption is that all closes and stops * have already been called. @@ -2001,6 +2101,7 @@ public class HRegionServer extends HasThread implements if (this.periodicFlusher != null) periodicFlusher.cancel(true); if (this.healthCheckChore != null) healthCheckChore.cancel(true); if (this.storefileRefresher != null) storefileRefresher.cancel(true); + if (this.movedRegionsCleaner != null) movedRegionsCleaner.cancel(true); if (this.cacheFlusher != null) { this.cacheFlusher.join(); @@ -2051,13 +2152,14 @@ public class HRegionServer extends HasThread implements /** * Get the current master from ZooKeeper and open the RPC connection to it. - * + * To get a fresh connection, the current rssStub must be null. * Method will block until a master is available. You can break from this * block by requesting the server stop. * * @return master + port, or null if server has been stopped */ - private synchronized ServerName createRegionServerStatusStub() { + @VisibleForTesting + protected synchronized ServerName createRegionServerStatusStub() { if (rssStub != null) { return masterAddressTracker.getMasterAddress(); } @@ -2093,7 +2195,8 @@ public class HRegionServer extends HasThread implements } try { BlockingRpcChannel channel = - this.rpcClient.createBlockingRpcChannel(sn, userProvider.getCurrent(), operationTimeout); + this.rpcClient.createBlockingRpcChannel(sn, userProvider.getCurrent(), + shortOperationTimeout); intf = RegionServerStatusService.newBlockingStub(channel); break; } catch (IOException e) { @@ -2147,6 +2250,9 @@ public class HRegionServer extends HasThread implements long now = EnvironmentEdgeManager.currentTime(); int port = rpcServices.isa.getPort(); RegionServerStartupRequest.Builder request = RegionServerStartupRequest.newBuilder(); + if (shouldUseThisHostnameInstead()) { + request.setUseThisHostnameInstead(useThisHostnameInstead); + } request.setPort(port); request.setServerStartCode(this.startcode); request.setServerCurrentTime(now); @@ -2161,36 +2267,37 @@ public class HRegionServer extends HasThread implements LOG.debug("Master is not running yet"); } else { LOG.warn("error telling master we are up", se); + rssStub = null; } } return result; } @Override - public long getLastSequenceId(byte[] encodedRegionName) { - long lastFlushedSequenceId = -1L; + public RegionStoreSequenceIds getLastSequenceId(byte[] encodedRegionName) { try { - GetLastFlushedSequenceIdRequest req = RequestConverter - .buildGetLastFlushedSequenceIdRequest(encodedRegionName); + GetLastFlushedSequenceIdRequest req = + RequestConverter.buildGetLastFlushedSequenceIdRequest(encodedRegionName); RegionServerStatusService.BlockingInterface rss = rssStub; if (rss == null) { // Try to connect one more time createRegionServerStatusStub(); rss = rssStub; if (rss == null) { // Still no luck, we tried - LOG.warn("Unable to connect to the master to check " - + "the last flushed sequence id"); - return -1L; + LOG.warn("Unable to connect to the master to check " + "the last flushed sequence id"); + return RegionStoreSequenceIds.newBuilder().setLastFlushedSequenceId(HConstants.NO_SEQNUM) + .build(); } } - lastFlushedSequenceId = rss.getLastFlushedSequenceId(null, req) - .getLastFlushedSequenceId(); + GetLastFlushedSequenceIdResponse resp = rss.getLastFlushedSequenceId(null, req); + return RegionStoreSequenceIds.newBuilder() + .setLastFlushedSequenceId(resp.getLastFlushedSequenceId()) + .addAllStoreSequenceId(resp.getStoreLastFlushedSequenceIdList()).build(); } catch (ServiceException e) { - lastFlushedSequenceId = -1l; - LOG.warn("Unable to connect to the master to check " - + "the last flushed sequence id", e); + LOG.warn("Unable to connect to the master to check the last flushed sequence id", e); + return RegionStoreSequenceIds.newBuilder().setLastFlushedSequenceId(HConstants.NO_SEQNUM) + .build(); } - return lastFlushedSequenceId; } /** @@ -2208,10 +2315,10 @@ public class HRegionServer extends HasThread implements * @param abort Whether we're running an abort. */ void closeMetaTableRegions(final boolean abort) { - HRegion meta = null; + Region meta = null; this.lock.writeLock().lock(); try { - for (Map.Entry e: onlineRegions.entrySet()) { + for (Map.Entry e: onlineRegions.entrySet()) { HRegionInfo hri = e.getValue().getRegionInfo(); if (hri.isMetaRegion()) { meta = e.getValue(); @@ -2233,8 +2340,8 @@ public class HRegionServer extends HasThread implements void closeUserRegions(final boolean abort) { this.lock.writeLock().lock(); try { - for (Map.Entry e: this.onlineRegions.entrySet()) { - HRegion r = e.getValue(); + for (Map.Entry e: this.onlineRegions.entrySet()) { + Region r = e.getValue(); if (!r.getRegionInfo().isMetaTable() && r.isAvailable()) { // Don't update zk with this close transition; pass false. closeRegionIgnoreErrors(r.getRegionInfo(), abort); @@ -2264,7 +2371,7 @@ public class HRegionServer extends HasThread implements } @Override - public Map getRecoveringRegions() { + public Map getRecoveringRegions() { return this.recoveringRegions; } @@ -2295,13 +2402,13 @@ public class HRegionServer extends HasThread implements * This method will only work if HRegionServer is in the same JVM as client; * HRegion cannot be serialized to cross an rpc. */ - public Collection getOnlineRegionsLocalContext() { - Collection regions = this.onlineRegions.values(); + public Collection getOnlineRegionsLocalContext() { + Collection regions = this.onlineRegions.values(); return Collections.unmodifiableCollection(regions); } @Override - public void addToOnlineRegions(HRegion region) { + public void addToOnlineRegions(Region region) { this.onlineRegions.put(region.getRegionInfo().getEncodedName(), region); configurationManager.registerObserver(region); } @@ -2311,9 +2418,9 @@ public class HRegionServer extends HasThread implements * biggest. If two regions are the same size, then the last one found wins; i.e. this method * may NOT return all regions. */ - SortedMap getCopyOfOnlineRegionsSortedBySize() { + SortedMap getCopyOfOnlineRegionsSortedBySize() { // we'll sort the regions in reverse - SortedMap sortedRegions = new TreeMap( + SortedMap sortedRegions = new TreeMap( new Comparator() { @Override public int compare(Long a, Long b) { @@ -2321,8 +2428,8 @@ public class HRegionServer extends HasThread implements } }); // Copy over all regions. Regions are sorted by size with biggest first. - for (HRegion region : this.onlineRegions.values()) { - sortedRegions.put(region.memstoreSize.get(), region); + for (Region region : this.onlineRegions.values()) { + sortedRegions.put(region.getMemstoreSize(), region); } return sortedRegions; } @@ -2348,7 +2455,7 @@ public class HRegionServer extends HasThread implements */ protected HRegionInfo[] getMostLoadedRegions() { ArrayList regions = new ArrayList(); - for (HRegion r : onlineRegions.values()) { + for (Region r : onlineRegions.values()) { if (!r.isAvailable()) { continue; } @@ -2544,10 +2651,10 @@ public class HRegionServer extends HasThread implements * @return Online regions from tableName */ @Override - public List getOnlineRegions(TableName tableName) { - List tableRegions = new ArrayList(); + public List getOnlineRegions(TableName tableName) { + List tableRegions = new ArrayList(); synchronized (this.onlineRegions) { - for (HRegion region: this.onlineRegions.values()) { + for (Region region: this.onlineRegions.values()) { HRegionInfo regionInfo = region.getRegionInfo(); if(regionInfo.getTable().equals(tableName)) { tableRegions.add(region); @@ -2566,7 +2673,7 @@ public class HRegionServer extends HasThread implements public Set getOnlineTables() { Set tables = new HashSet(); synchronized (this.onlineRegions) { - for (HRegion region: this.onlineRegions.values()) { + for (Region region: this.onlineRegions.values()) { tables.add(region.getTableDesc().getTableName()); } } @@ -2583,8 +2690,8 @@ public class HRegionServer extends HasThread implements "skipping."); LOG.debug("Exception details for failure to fetch wal coprocessor information.", exception); } - Collection regions = getOnlineRegionsLocalContext(); - for (HRegion region: regions) { + Collection regions = getOnlineRegionsLocalContext(); + for (Region region: regions) { coprocessors.addAll(region.getCoprocessorHost().getCoprocessors()); try { coprocessors.addAll(getWAL(region.getRegionInfo()).getCoprocessorHost().getCoprocessors()); @@ -2634,7 +2741,7 @@ public class HRegionServer extends HasThread implements protected boolean closeRegion(String encodedName, final boolean abort, final ServerName sn) throws NotServingRegionException { //Check for permissions to close. - HRegion actualRegion = this.getFromOnlineRegions(encodedName); + Region actualRegion = this.getFromOnlineRegions(encodedName); if ((actualRegion != null) && (actualRegion.getCoprocessorHost() != null)) { try { actualRegion.getCoprocessorHost().preClose(false); @@ -2695,7 +2802,7 @@ public class HRegionServer extends HasThread implements * @return HRegion for the passed binary regionName or null if * named region is not member of the online regions. */ - public HRegion getOnlineRegion(final byte[] regionName) { + public Region getOnlineRegion(final byte[] regionName) { String encodedRegionName = HRegionInfo.encodeRegionName(regionName); return this.onlineRegions.get(encodedRegionName); } @@ -2705,14 +2812,14 @@ public class HRegionServer extends HasThread implements } @Override - public HRegion getFromOnlineRegions(final String encodedRegionName) { + public Region getFromOnlineRegions(final String encodedRegionName) { return this.onlineRegions.get(encodedRegionName); } @Override - public boolean removeFromOnlineRegions(final HRegion r, ServerName destination) { - HRegion toReturn = this.onlineRegions.remove(r.getRegionInfo().getEncodedName()); + public boolean removeFromOnlineRegions(final Region r, ServerName destination) { + Region toReturn = this.onlineRegions.remove(r.getRegionInfo().getEncodedName()); if (destination != null) { try { @@ -2744,20 +2851,20 @@ public class HRegionServer extends HasThread implements * @return {@link HRegion} for regionName * @throws NotServingRegionException */ - protected HRegion getRegion(final byte[] regionName) + protected Region getRegion(final byte[] regionName) throws NotServingRegionException { String encodedRegionName = HRegionInfo.encodeRegionName(regionName); return getRegionByEncodedName(regionName, encodedRegionName); } - public HRegion getRegionByEncodedName(String encodedRegionName) + public Region getRegionByEncodedName(String encodedRegionName) throws NotServingRegionException { return getRegionByEncodedName(null, encodedRegionName); } - protected HRegion getRegionByEncodedName(byte[] regionName, String encodedRegionName) + protected Region getRegionByEncodedName(byte[] regionName, String encodedRegionName) throws NotServingRegionException { - HRegion region = this.onlineRegions.get(encodedRegionName); + Region region = this.onlineRegions.get(encodedRegionName); if (region == null) { MovedRegionInfo moveInfo = getMovedRegion(encodedRegionName); if (moveInfo != null) { @@ -2940,21 +3047,31 @@ public class HRegionServer extends HasThread implements } } + /* + * Use this to allow tests to override and schedule more frequently. + */ + + protected int movedRegionCleanerPeriod() { + return TIMEOUT_REGION_MOVED; + } + /** * Creates a Chore thread to clean the moved region cache. */ - protected static class MovedRegionsCleaner extends ScheduledChore implements Stoppable { + + protected final static class MovedRegionsCleaner extends ScheduledChore implements Stoppable { private HRegionServer regionServer; Stoppable stoppable; private MovedRegionsCleaner( HRegionServer regionServer, Stoppable stoppable){ - super("MovedRegionsCleaner for region " + regionServer, stoppable, TIMEOUT_REGION_MOVED); + super("MovedRegionsCleaner for region " + regionServer, stoppable, + regionServer.movedRegionCleanerPeriod()); this.regionServer = regionServer; this.stoppable = stoppable; } - static MovedRegionsCleaner createAndStart(HRegionServer rs){ + static MovedRegionsCleaner create(HRegionServer rs){ Stoppable stoppable = new Stoppable() { private volatile boolean isStopped = false; @Override public void stop(String why) { isStopped = true;} @@ -3003,17 +3120,17 @@ public class HRegionServer extends HasThread implements * @throws KeeperException * @throws IOException */ - private void updateRecoveringRegionLastFlushedSequenceId(HRegion r) throws KeeperException, + private void updateRecoveringRegionLastFlushedSequenceId(Region r) throws KeeperException, IOException { if (!r.isRecovering()) { // return immdiately for non-recovering regions return; } - HRegionInfo region = r.getRegionInfo(); + HRegionInfo regionInfo = r.getRegionInfo(); ZooKeeperWatcher zkw = getZooKeeper(); - String previousRSName = this.getLastFailedRSFromZK(region.getEncodedName()); - Map maxSeqIdInStores = r.getMaxStoreSeqIdForLogReplay(); + String previousRSName = this.getLastFailedRSFromZK(regionInfo.getEncodedName()); + Map maxSeqIdInStores = r.getMaxStoreSeqId(); long minSeqIdForLogReplay = -1; for (Long storeSeqIdForReplay : maxSeqIdInStores.values()) { if (minSeqIdForLogReplay == -1 || storeSeqIdForReplay < minSeqIdForLogReplay) { @@ -3024,7 +3141,7 @@ public class HRegionServer extends HasThread implements try { long lastRecordedFlushedSequenceId = -1; String nodePath = ZKUtil.joinZNode(this.zooKeeper.recoveringRegionsZNode, - region.getEncodedName()); + regionInfo.getEncodedName()); // recovering-region level byte[] data; try { @@ -3033,7 +3150,7 @@ public class HRegionServer extends HasThread implements throw new InterruptedIOException(); } if (data != null) { - lastRecordedFlushedSequenceId = ZKSplitLog.parseLastFlushedSequenceIdFrom(data); + lastRecordedFlushedSequenceId = ZKSplitLog.parseLastFlushedSequenceIdFrom(data); } if (data == null || lastRecordedFlushedSequenceId < minSeqIdForLogReplay) { ZKUtil.setData(zkw, nodePath, ZKUtil.positionToByteArray(minSeqIdForLogReplay)); @@ -3043,14 +3160,14 @@ public class HRegionServer extends HasThread implements nodePath = ZKUtil.joinZNode(nodePath, previousRSName); ZKUtil.setData(zkw, nodePath, ZKUtil.regionSequenceIdsToByteArray(minSeqIdForLogReplay, maxSeqIdInStores)); - LOG.debug("Update last flushed sequence id of region " + region.getEncodedName() + " for " - + previousRSName); + LOG.debug("Update last flushed sequence id of region " + regionInfo.getEncodedName() + + " for " + previousRSName); } else { LOG.warn("Can't find failed region server for recovering region " + - region.getEncodedName()); + regionInfo.getEncodedName()); } } catch (NoNodeException ignore) { - LOG.debug("Region " + region.getEncodedName() + + LOG.debug("Region " + regionInfo.getEncodedName() + " must have completed recovery because its recovery znode has been removed", ignore); } } @@ -3142,6 +3259,13 @@ public class HRegionServer extends HasThread implements return configurationManager; } + /** + * @return Return table descriptors implementation. + */ + public TableDescriptors getTableDescriptors() { + return this.tableDescriptors; + } + /** * Reload the configuration from disk. */ @@ -3160,8 +3284,8 @@ public class HRegionServer extends HasThread implements @Override public double getCompactionPressure() { double max = 0; - for (HRegion region : onlineRegions.values()) { - for (Store store : region.getStores().values()) { + for (Region region : onlineRegions.values()) { + for (Store store : region.getStores()) { double normCount = store.getCompactionPressure(); if (normCount > max) { max = normCount; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index f5bb67a8ba5..e0bb7cf180f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -319,7 +319,7 @@ public class HStore implements Store { // Use the algorithm the key wants cipher = Encryption.getCipher(conf, key.getAlgorithm()); if (cipher == null) { - throw new RuntimeException("Cipher '" + cipher + "' is not available"); + throw new RuntimeException("Cipher '" + key.getAlgorithm() + "' is not available"); } // Fail if misconfigured // We use the encryption type specified in the column schema as a sanity check on @@ -333,7 +333,7 @@ public class HStore implements Store { // Family does not provide key material, create a random key cipher = Encryption.getCipher(conf, cipherName); if (cipher == null) { - throw new RuntimeException("Cipher '" + cipher + "' is not available"); + throw new RuntimeException("Cipher '" + cipherName + "' is not available"); } key = cipher.getRandomKey(); } @@ -420,6 +420,11 @@ public class HStore implements Store { return this.memstore.getFlushableSize(); } + @Override + public long getSnapshotSize() { + return this.memstore.getSnapshotSize(); + } + @Override public long getCompactionCheckMultiplier() { return this.compactionCheckMultiplier; @@ -470,7 +475,8 @@ public class HStore implements Store { /** * @return The maximum sequence id in all store files. Used for log replay. */ - long getMaxSequenceId() { + @Override + public long getMaxSequenceId() { return StoreFile.getMaxSequenceIdInList(this.getStorefiles()); } @@ -598,11 +604,31 @@ public class HStore implements Store { */ @Override public void refreshStoreFiles() throws IOException { + Collection newFiles = fs.getStoreFiles(getColumnFamilyName()); + refreshStoreFilesInternal(newFiles); + } + + @Override + public void refreshStoreFiles(Collection newFiles) throws IOException { + List storeFiles = new ArrayList(newFiles.size()); + for (String file : newFiles) { + storeFiles.add(fs.getStoreFileInfo(getColumnFamilyName(), file)); + } + refreshStoreFilesInternal(storeFiles); + } + + /** + * Checks the underlying store files, and opens the files that have not + * been opened, and removes the store file readers for store files no longer + * available. Mainly used by secondary region replicas to keep up to date with + * the primary region files. + * @throws IOException + */ + private void refreshStoreFilesInternal(Collection newFiles) throws IOException { StoreFileManager sfm = storeEngine.getStoreFileManager(); Collection currentFiles = sfm.getStorefiles(); if (currentFiles == null) currentFiles = new ArrayList(0); - Collection newFiles = fs.getStoreFiles(getColumnFamilyName()); if (newFiles == null) newFiles = new ArrayList(0); HashMap currentFilesSet = new HashMap(currentFiles.size()); @@ -777,19 +803,33 @@ public class HStore implements Store { } @Override - public void bulkLoadHFile(String srcPathStr, long seqNum) throws IOException { + public Path bulkLoadHFile(String srcPathStr, long seqNum) throws IOException { Path srcPath = new Path(srcPathStr); Path dstPath = fs.bulkLoadStoreFile(getColumnFamilyName(), srcPath, seqNum); - StoreFile sf = createStoreFileAndReader(dstPath); + LOG.info("Loaded HFile " + srcPath + " into store '" + getColumnFamilyName() + "' as " + + dstPath + " - updating store file list."); + StoreFile sf = createStoreFileAndReader(dstPath); + bulkLoadHFile(sf); + + LOG.info("Successfully loaded store file " + srcPath + " into store " + this + + " (new location: " + dstPath + ")"); + + return dstPath; + } + + @Override + public void bulkLoadHFile(StoreFileInfo fileInfo) throws IOException { + StoreFile sf = createStoreFileAndReader(fileInfo); + bulkLoadHFile(sf); + } + + private void bulkLoadHFile(StoreFile sf) throws IOException { StoreFile.Reader r = sf.getReader(); this.storeSize += r.length(); this.totalUncompressedBytes += r.getTotalUncompressedBytes(); - LOG.info("Loaded HFile " + srcPath + " into store '" + getColumnFamilyName() + - "' as " + dstPath + " - updating store file list."); - // Append the new storefile into the list this.lock.writeLock().lock(); try { @@ -803,8 +843,7 @@ public class HStore implements Store { this.lock.writeLock().unlock(); } notifyChangedReadersObservers(); - LOG.info("Successfully loaded store file " + srcPath - + " into store " + this + " (new location: " + dstPath + ")"); + LOG.info("Loaded HFile " + sf.getFileInfo() + " into store '" + getColumnFamilyName()); if (LOG.isTraceEnabled()) { String traceMessage = "BULK LOAD time,size,store size,store files [" + EnvironmentEdgeManager.currentTime() + "," + r.length() + "," + storeSize @@ -1033,7 +1072,9 @@ public class HStore implements Store { this.lock.writeLock().lock(); try { this.storeEngine.getStoreFileManager().insertNewFiles(sfs); - this.memstore.clearSnapshot(snapshotId); + if (snapshotId > 0) { + this.memstore.clearSnapshot(snapshotId); + } } finally { // We need the lock, as long as we are updating the storeFiles // or changing the memstore. Let us release it before calling @@ -1333,10 +1374,12 @@ public class HStore implements Store { * @param compaction */ @Override - public void completeCompactionMarker(CompactionDescriptor compaction) + public void replayCompactionMarker(CompactionDescriptor compaction, + boolean pickCompactionFiles, boolean removeFiles) throws IOException { LOG.debug("Completing compaction from the WAL marker"); List compactionInputs = compaction.getCompactionInputList(); + List compactionOutputs = Lists.newArrayList(compaction.getCompactionOutputList()); // The Compaction Marker is written after the compaction is completed, // and the files moved into the region/family folder. @@ -1353,22 +1396,40 @@ public class HStore implements Store { // being in the store's folder) or they may be missing due to a compaction. String familyName = this.getColumnFamilyName(); - List inputPaths = new ArrayList(compactionInputs.size()); + List inputFiles = new ArrayList(compactionInputs.size()); for (String compactionInput : compactionInputs) { Path inputPath = fs.getStoreFilePath(familyName, compactionInput); - inputPaths.add(inputPath); + inputFiles.add(inputPath.getName()); } //some of the input files might already be deleted List inputStoreFiles = new ArrayList(compactionInputs.size()); for (StoreFile sf : this.getStorefiles()) { - if (inputPaths.contains(sf.getQualifiedPath())) { + if (inputFiles.contains(sf.getPath().getName())) { inputStoreFiles.add(sf); } } - this.replaceStoreFiles(inputStoreFiles, Collections.emptyList()); - this.completeCompaction(inputStoreFiles); + // check whether we need to pick up the new files + List outputStoreFiles = new ArrayList(compactionOutputs.size()); + + if (pickCompactionFiles) { + for (StoreFile sf : this.getStorefiles()) { + compactionOutputs.remove(sf.getPath().getName()); + } + for (String compactionOutput : compactionOutputs) { + StoreFileInfo storeFileInfo = fs.getStoreFileInfo(getColumnFamilyName(), compactionOutput); + StoreFile storeFile = createStoreFileAndReader(storeFileInfo); + outputStoreFiles.add(storeFile); + } + } + + if (!inputStoreFiles.isEmpty() || !outputStoreFiles.isEmpty()) { + LOG.info("Replaying compaction marker, replacing input files: " + + inputStoreFiles + " with output files : " + outputStoreFiles); + this.replaceStoreFiles(inputStoreFiles, outputStoreFiles); + this.completeCompaction(inputStoreFiles, removeFiles); + } } /** @@ -1831,8 +1892,7 @@ public class HStore implements Store { // Unlikely that there'll be an instance of actual first row in table. if (walkForwardInSingleRow(scanner, firstOnRow, state)) return true; // If here, need to start backing up. - while (scanner.seekBefore(firstOnRow.getBuffer(), firstOnRow.getKeyOffset(), - firstOnRow.getKeyLength())) { + while (scanner.seekBefore(firstOnRow)) { Cell kv = scanner.getKeyValue(); if (!state.isTargetTable(kv)) break; if (!state.isBetterCandidate(kv)) break; @@ -2203,6 +2263,52 @@ public class HStore implements Store { public List getCommittedFiles() { return committedFiles; } + + /** + * Similar to commit, but called in secondary region replicas for replaying the + * flush cache from primary region. Adds the new files to the store, and drops the + * snapshot depending on dropMemstoreSnapshot argument. + * @param fileNames names of the flushed files + * @param dropMemstoreSnapshot whether to drop the prepared memstore snapshot + * @throws IOException + */ + @Override + public void replayFlush(List fileNames, boolean dropMemstoreSnapshot) + throws IOException { + List storeFiles = new ArrayList(fileNames.size()); + for (String file : fileNames) { + // open the file as a store file (hfile link, etc) + StoreFileInfo storeFileInfo = fs.getStoreFileInfo(getColumnFamilyName(), file); + StoreFile storeFile = createStoreFileAndReader(storeFileInfo); + storeFiles.add(storeFile); + HStore.this.storeSize += storeFile.getReader().length(); + HStore.this.totalUncompressedBytes += storeFile.getReader().getTotalUncompressedBytes(); + if (LOG.isInfoEnabled()) { + LOG.info("Region: " + HStore.this.getRegionInfo().getEncodedName() + + " added " + storeFile + ", entries=" + storeFile.getReader().getEntries() + + ", sequenceid=" + + storeFile.getReader().getSequenceID() + + ", filesize=" + StringUtils.humanReadableInt(storeFile.getReader().length())); + } + } + + long snapshotId = -1; // -1 means do not drop + if (dropMemstoreSnapshot && snapshot != null) { + snapshotId = snapshot.getId(); + } + HStore.this.updateStorefiles(storeFiles, snapshotId); + } + + /** + * Abort the snapshot preparation. Drops the snapshot if any. + * @throws IOException + */ + @Override + public void abort() throws IOException { + if (snapshot == null) { + return; + } + HStore.this.updateStorefiles(new ArrayList(0), snapshot.getId()); + } } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java index 43deb584f1e..a66a29ce530 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java @@ -324,7 +324,7 @@ public class HeapMemoryManager { } @Override - public void flushRequested(FlushType type, HRegion region) { + public void flushRequested(FlushType type, Region region) { switch (type) { case ABOVE_HIGHER_MARK: blockedFlushCount.incrementAndGet(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java index 18bb3760933..d7a9be56771 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java @@ -72,7 +72,7 @@ extends ConstantSizeRegionSplitPolicy { // Get size to check long sizeToCheck = getSizeToCheck(tableRegionsCount); - for (Store store : region.getStores().values()) { + for (Store store : region.getStores()) { // If any of the stores is unable to split (eg they contain reference files) // then don't split if ((!store.canSplit())) { @@ -114,7 +114,7 @@ extends ConstantSizeRegionSplitPolicy { TableName tablename = this.region.getTableDesc().getTableName(); int tableRegionsCount = 0; try { - List hri = rss.getOnlineRegions(tablename); + List hri = rss.getOnlineRegions(tablename); tableRegionsCount = hri == null || hri.isEmpty()? 0: hri.size(); } catch (IOException e) { LOG.debug("Failed getOnlineRegions " + tablename, e); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/InternalScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/InternalScanner.java index 41708c0b8ed..f73e363cee1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/InternalScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/InternalScanner.java @@ -22,8 +22,8 @@ import java.io.Closeable; import java.io.IOException; import java.util.List; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.classification.InterfaceAudience; /** * Internal scanners differ from client-side scanners in that they operate on @@ -50,14 +50,13 @@ public interface InternalScanner extends Closeable { boolean next(List results) throws IOException; /** - * Grab the next row's worth of values with a limit on the number of values - * to return. + * Grab the next row's worth of values. * @param result return output array - * @param limit limit on row count to get + * @param scannerContext * @return true if more rows exist after this one, false if scanner is done * @throws IOException e */ - boolean next(List result, int limit) throws IOException; + boolean next(List result, ScannerContext scannerContext) throws IOException; /** * Closes the scanner and releases any resources it has allocated diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java index 23834d3796c..761267f65ee 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueHeap.java @@ -24,9 +24,10 @@ import java.util.Comparator; import java.util.List; import java.util.PriorityQueue; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.KeyValue.KVComparator; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.regionserver.ScannerContext.NextState; /** * Implements a heap merge across any number of KeyValueScanners. @@ -125,18 +126,23 @@ public class KeyValueHeap extends NonReversedNonLazyKeyValueScanner *

      * This method takes care of updating the heap. *

      - * This can ONLY be called when you are using Scanners that implement - * InternalScanner as well as KeyValueScanner (a {@link StoreScanner}). + * This can ONLY be called when you are using Scanners that implement InternalScanner as well as + * KeyValueScanner (a {@link StoreScanner}). * @param result - * @param limit - * @return true if there are more keys, false if all scanners are done + * @return true if more rows exist after this one, false if scanner is done */ - public boolean next(List result, int limit) throws IOException { + @Override + public boolean next(List result) throws IOException { + return next(result, NoLimitScannerContext.getInstance()); + } + + @Override + public boolean next(List result, ScannerContext scannerContext) throws IOException { if (this.current == null) { - return false; + return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); } InternalScanner currentAsInternal = (InternalScanner)this.current; - boolean mayContainMoreRows = currentAsInternal.next(result, limit); + boolean moreCells = currentAsInternal.next(result, scannerContext); Cell pee = this.current.peek(); /* * By definition, any InternalScanner must return false only when it has no @@ -145,27 +151,16 @@ public class KeyValueHeap extends NonReversedNonLazyKeyValueScanner * more efficient to close scanners which are not needed than keep them in * the heap. This is also required for certain optimizations. */ - if (pee == null || !mayContainMoreRows) { + if (pee == null || !moreCells) { this.current.close(); } else { this.heap.add(this.current); } this.current = pollRealKV(); - return (this.current != null); - } - - /** - * Gets the next row of keys from the top-most scanner. - *

      - * This method takes care of updating the heap. - *

      - * This can ONLY be called when you are using Scanners that implement - * InternalScanner as well as KeyValueScanner (a {@link StoreScanner}). - * @param result - * @return true if there are more keys, false if all scanners are done - */ - public boolean next(List result) throws IOException { - return next(result, -1); + if (this.current == null) { + moreCells = scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); + } + return moreCells; } protected static class KVScannerComparator implements Comparator { @@ -395,4 +390,10 @@ public class KeyValueHeap extends NonReversedNonLazyKeyValueScanner KeyValueScanner getCurrentForTesting() { return current; } + + @Override + public Cell getNextIndexedKey() { + // here we return the next index key from the top scanner + return current == null ? null : current.getNextIndexedKey(); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java index 6eba20373ff..76a9d0fb544 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/KeyValueScanner.java @@ -156,4 +156,10 @@ public interface KeyValueScanner { * @throws IOException */ public boolean seekToLastRow() throws IOException; + + /** + * @return the next key in the index (the key to seek to the next block) + * if known, or null otherwise + */ + public Cell getNextIndexedKey(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LastSequenceId.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LastSequenceId.java index 98f098573b2..0dfe3557dcd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LastSequenceId.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LastSequenceId.java @@ -19,15 +19,19 @@ package org.apache.hadoop.hbase.regionserver; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds; /** - * Last flushed sequence Ids for the regions on region server + * Last flushed sequence Ids for the regions and their stores on region server */ @InterfaceAudience.Private public interface LastSequenceId { + /** * @param encodedRegionName Encoded region name - * @return Last flushed sequence Id for region or -1 if it can't be determined + * @return Last flushed sequence Id for region and its stores. Id will be -1 if it can't be + * determined */ - long getLastSequenceId(byte[] encodedRegionName); + RegionStoreSequenceIds getLastSequenceId(byte[] encodedRegionName); + } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java index 71dea3b3667..de8ed8d7b5b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java @@ -164,7 +164,7 @@ public class LogRoller extends HasThread { */ private void scheduleFlush(final byte [] encodedRegionName) { boolean scheduled = false; - HRegion r = this.services.getFromOnlineRegions(Bytes.toString(encodedRegionName)); + Region r = this.services.getFromOnlineRegions(Bytes.toString(encodedRegionName)); FlushRequester requester = null; if (r != null) { requester = this.services.getFlushRequester(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java index 7fa81fc9f21..364b9c9b639 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java @@ -59,6 +59,12 @@ public interface MemStore extends HeapSize { */ long getFlushableSize(); + /** + * Return the size of the snapshot(s) if any + * @return size of the memstore snapshot + */ + long getSnapshotSize(); + /** * Write an update * @param cell diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java index e5ad5908cb8..485d30f90bd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java @@ -18,6 +18,8 @@ */ package org.apache.hadoop.hbase.regionserver; +import static org.apache.hadoop.util.StringUtils.humanReadableInt; + import java.io.IOException; import java.lang.Thread.UncaughtExceptionHandler; import java.lang.management.ManagementFactory; @@ -43,13 +45,17 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.DroppedSnapshotException; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.io.util.HeapMemorySizeUtil; +import org.apache.hadoop.hbase.regionserver.Region.FlushResult; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Counter; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.HasThread; +import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix; import org.apache.htrace.Trace; import org.apache.htrace.TraceScope; @@ -69,12 +75,13 @@ import com.google.common.base.Preconditions; class MemStoreFlusher implements FlushRequester { static final Log LOG = LogFactory.getLog(MemStoreFlusher.class); + private Configuration conf; // These two data members go together. Any entry in the one must have // a corresponding entry in the other. private final BlockingQueue flushQueue = new DelayQueue(); - private final Map regionsInQueue = - new HashMap(); + private final Map regionsInQueue = + new HashMap(); private AtomicBoolean wakeupPending = new AtomicBoolean(); private final long threadWakeFrequency; @@ -99,15 +106,16 @@ class MemStoreFlusher implements FlushRequester { public MemStoreFlusher(final Configuration conf, final HRegionServer server) { super(); + this.conf = conf; this.server = server; this.threadWakeFrequency = conf.getLong(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000); long max = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax(); float globalMemStorePercent = HeapMemorySizeUtil.getGlobalMemStorePercent(conf, true); this.globalMemStoreLimit = (long) (max * globalMemStorePercent); - this.globalMemStoreLimitLowMarkPercent = + this.globalMemStoreLimitLowMarkPercent = HeapMemorySizeUtil.getGlobalMemStoreLowerMark(conf, globalMemStorePercent); - this.globalMemStoreLimitLowMark = + this.globalMemStoreLimitLowMark = (long) (this.globalMemStoreLimit * this.globalMemStoreLimitLowMarkPercent); this.blockingWaitTime = conf.getInt("hbase.hstore.blockingWaitTime", @@ -132,39 +140,43 @@ class MemStoreFlusher implements FlushRequester { * @return true if successful */ private boolean flushOneForGlobalPressure() { - SortedMap regionsBySize = - server.getCopyOfOnlineRegionsSortedBySize(); + SortedMap regionsBySize = server.getCopyOfOnlineRegionsSortedBySize(); + Set excludedRegions = new HashSet(); - Set excludedRegions = new HashSet(); + double secondaryMultiplier + = ServerRegionReplicaUtil.getRegionReplicaStoreFileRefreshMultiplier(conf); boolean flushedOne = false; while (!flushedOne) { // Find the biggest region that doesn't have too many storefiles // (might be null!) - HRegion bestFlushableRegion = getBiggestMemstoreRegion( - regionsBySize, excludedRegions, true); + Region bestFlushableRegion = getBiggestMemstoreRegion(regionsBySize, excludedRegions, true); // Find the biggest region, total, even if it might have too many flushes. - HRegion bestAnyRegion = getBiggestMemstoreRegion( + Region bestAnyRegion = getBiggestMemstoreRegion( regionsBySize, excludedRegions, false); + // Find the biggest region that is a secondary region + Region bestRegionReplica = getBiggestMemstoreOfRegionReplica(regionsBySize, + excludedRegions); - if (bestAnyRegion == null) { + if (bestAnyRegion == null && bestRegionReplica == null) { LOG.error("Above memory mark but there are no flushable regions!"); return false; } - HRegion regionToFlush; + Region regionToFlush; if (bestFlushableRegion != null && - bestAnyRegion.memstoreSize.get() > 2 * bestFlushableRegion.memstoreSize.get()) { + bestAnyRegion.getMemstoreSize() > 2 * bestFlushableRegion.getMemstoreSize()) { // Even if it's not supposed to be flushed, pick a region if it's more than twice // as big as the best flushable one - otherwise when we're under pressure we make // lots of little flushes and cause lots of compactions, etc, which just makes // life worse! if (LOG.isDebugEnabled()) { LOG.debug("Under global heap pressure: " + "Region " - + bestAnyRegion.getRegionNameAsString() + " has too many " + "store files, but is " - + TraditionalBinaryPrefix.long2String(bestAnyRegion.memstoreSize.get(), "", 1) + + bestAnyRegion.getRegionInfo().getRegionNameAsString() + + " has too many " + "store files, but is " + + TraditionalBinaryPrefix.long2String(bestAnyRegion.getMemstoreSize(), "", 1) + " vs best flushable region's " - + TraditionalBinaryPrefix.long2String(bestFlushableRegion.memstoreSize.get(), "", 1) + + TraditionalBinaryPrefix.long2String(bestFlushableRegion.getMemstoreSize(), "", 1) + ". Choosing the bigger."); } regionToFlush = bestAnyRegion; @@ -176,14 +188,37 @@ class MemStoreFlusher implements FlushRequester { } } - Preconditions.checkState(regionToFlush.memstoreSize.get() > 0); + Preconditions.checkState( + (regionToFlush != null && regionToFlush.getMemstoreSize() > 0) || + (bestRegionReplica != null && bestRegionReplica.getMemstoreSize() > 0)); - LOG.info("Flush of region " + regionToFlush + " due to global heap pressure"); - flushedOne = flushRegion(regionToFlush, true, true); - if (!flushedOne) { - LOG.info("Excluding unflushable region " + regionToFlush + - " - trying to find a different region to flush."); - excludedRegions.add(regionToFlush); + if (regionToFlush == null || + (bestRegionReplica != null && + ServerRegionReplicaUtil.isRegionReplicaStoreFileRefreshEnabled(conf) && + (bestRegionReplica.getMemstoreSize() + > secondaryMultiplier * regionToFlush.getMemstoreSize()))) { + LOG.info("Refreshing storefiles of region " + regionToFlush + + " due to global heap pressure. memstore size=" + StringUtils.humanReadableInt( + server.getRegionServerAccounting().getGlobalMemstoreSize())); + flushedOne = refreshStoreFilesAndReclaimMemory(bestRegionReplica); + if (!flushedOne) { + LOG.info("Excluding secondary region " + regionToFlush + + " - trying to find a different region to refresh files."); + excludedRegions.add(bestRegionReplica); + } + } else { + LOG.info("Flush of region " + regionToFlush + " due to global heap pressure. " + + "Total Memstore size=" + + humanReadableInt(server.getRegionServerAccounting().getGlobalMemstoreSize()) + + ", Region memstore size=" + + humanReadableInt(regionToFlush.getMemstoreSize())); + flushedOne = flushRegion(regionToFlush, true, true); + + if (!flushedOne) { + LOG.info("Excluding unflushable region " + regionToFlush + + " - trying to find a different region to flush."); + excludedRegions.add(regionToFlush); + } } } return true; @@ -253,17 +288,18 @@ class MemStoreFlusher implements FlushRequester { } } - private HRegion getBiggestMemstoreRegion( - SortedMap regionsBySize, - Set excludedRegions, + private Region getBiggestMemstoreRegion( + SortedMap regionsBySize, + Set excludedRegions, boolean checkStoreFileCount) { synchronized (regionsInQueue) { - for (HRegion region : regionsBySize.values()) { + for (Region region : regionsBySize.values()) { if (excludedRegions.contains(region)) { continue; } - if (region.writestate.flushing || !region.writestate.writesEnabled) { + if (((HRegion)region).writestate.flushing || + !((HRegion)region).writestate.writesEnabled) { continue; } @@ -276,6 +312,33 @@ class MemStoreFlusher implements FlushRequester { return null; } + private Region getBiggestMemstoreOfRegionReplica(SortedMap regionsBySize, + Set excludedRegions) { + synchronized (regionsInQueue) { + for (Region region : regionsBySize.values()) { + if (excludedRegions.contains(region)) { + continue; + } + + if (RegionReplicaUtil.isDefaultReplica(region.getRegionInfo())) { + continue; + } + + return region; + } + } + return null; + } + + private boolean refreshStoreFilesAndReclaimMemory(Region region) { + try { + return region.refreshStoreFiles(); + } catch (IOException e) { + LOG.warn("Refreshing store files failed with exception", e); + } + return false; + } + /** * Return true if global memory usage is above the high watermark */ @@ -292,7 +355,8 @@ class MemStoreFlusher implements FlushRequester { getGlobalMemstoreSize() >= globalMemStoreLimitLowMark; } - public void requestFlush(HRegion r, boolean forceFlushAllStores) { + @Override + public void requestFlush(Region r, boolean forceFlushAllStores) { synchronized (regionsInQueue) { if (!regionsInQueue.containsKey(r)) { // This entry has no delay so it will be added at the top of the flush @@ -304,7 +368,8 @@ class MemStoreFlusher implements FlushRequester { } } - public void requestDelayedFlush(HRegion r, long delay, boolean forceFlushAllStores) { + @Override + public void requestDelayedFlush(Region r, long delay, boolean forceFlushAllStores) { synchronized (regionsInQueue) { if (!regionsInQueue.containsKey(r)) { // This entry has some delay @@ -366,23 +431,23 @@ class MemStoreFlusher implements FlushRequester { * on delay queue to retry later. * @param fqe * @return true if the region was successfully flushed, false otherwise. If - * false, there will be accompanying log messages explaining why the log was + * false, there will be accompanying log messages explaining why the region was * not flushed. */ private boolean flushRegion(final FlushRegionEntry fqe) { - HRegion region = fqe.region; + Region region = fqe.region; if (!region.getRegionInfo().isMetaRegion() && isTooManyStoreFiles(region)) { if (fqe.isMaximumWait(this.blockingWaitTime)) { LOG.info("Waited " + (EnvironmentEdgeManager.currentTime() - fqe.createTime) + "ms on a compaction to clean up 'too many store files'; waited " + "long enough... proceeding with flush of " + - region.getRegionNameAsString()); + region.getRegionInfo().getRegionNameAsString()); } else { // If this is first time we've been put off, then emit a log message. if (fqe.getRequeueCount() <= 0) { // Note: We don't impose blockingStoreFiles constraint on meta regions - LOG.warn("Region " + region.getRegionNameAsString() + " has too many " + + LOG.warn("Region " + region.getRegionInfo().getRegionNameAsString() + " has too many " + "store files; delaying flush up to " + this.blockingWaitTime + "ms"); if (!this.server.compactSplitThread.requestSplit(region)) { try { @@ -391,9 +456,8 @@ class MemStoreFlusher implements FlushRequester { } catch (IOException e) { e = e instanceof RemoteException ? ((RemoteException)e).unwrapRemoteException() : e; - LOG.error( - "Cache flush failed for region " + Bytes.toStringBinary(region.getRegionName()), - e); + LOG.error("Cache flush failed for region " + + Bytes.toStringBinary(region.getRegionInfo().getRegionName()), e); } } } @@ -417,10 +481,10 @@ class MemStoreFlusher implements FlushRequester { * poll on the flush queue (which removed it). * @param forceFlushAllStores whether we want to flush all store. * @return true if the region was successfully flushed, false otherwise. If - * false, there will be accompanying log messages explaining why the log was + * false, there will be accompanying log messages explaining why the region was * not flushed. */ - private boolean flushRegion(final HRegion region, final boolean emergencyFlush, + private boolean flushRegion(final Region region, final boolean emergencyFlush, boolean forceFlushAllStores) { long startTime = 0; synchronized (this.regionsInQueue) { @@ -444,10 +508,10 @@ class MemStoreFlusher implements FlushRequester { lock.readLock().lock(); try { notifyFlushRequest(region, emergencyFlush); - HRegion.FlushResult flushResult = region.flushcache(forceFlushAllStores); + FlushResult flushResult = region.flush(forceFlushAllStores); boolean shouldCompact = flushResult.isCompactionNeeded(); // We just want to check the size - boolean shouldSplit = region.checkSplit() != null; + boolean shouldSplit = ((HRegion)region).checkSplit() != null; if (shouldSplit) { this.server.compactSplitThread.requestSplit(region); } else if (shouldCompact) { @@ -470,8 +534,9 @@ class MemStoreFlusher implements FlushRequester { ex = ex instanceof RemoteException ? ((RemoteException) ex).unwrapRemoteException() : ex; LOG.error( "Cache flush failed" - + (region != null ? (" for region " + Bytes.toStringBinary(region.getRegionName())) - : ""), ex); + + (region != null ? (" for region " + + Bytes.toStringBinary(region.getRegionInfo().getRegionName())) + : ""), ex); if (!server.checkFileSystem()) { return false; } @@ -482,7 +547,7 @@ class MemStoreFlusher implements FlushRequester { return true; } - private void notifyFlushRequest(HRegion region, boolean emergencyFlush) { + private void notifyFlushRequest(Region region, boolean emergencyFlush) { FlushType type = FlushType.NORMAL; if (emergencyFlush) { type = isAboveHighWaterMark() ? FlushType.ABOVE_HIGHER_MARK : FlushType.ABOVE_LOWER_MARK; @@ -498,8 +563,8 @@ class MemStoreFlusher implements FlushRequester { } } - private boolean isTooManyStoreFiles(HRegion region) { - for (Store store : region.stores.values()) { + private boolean isTooManyStoreFiles(Region region) { + for (Store store : region.getStores()) { if (store.hasTooManyStoreFiles()) { return true; } @@ -591,6 +656,7 @@ class MemStoreFlusher implements FlushRequester { * Register a MemstoreFlushListener * @param listener */ + @Override public void registerFlushRequestListener(final FlushRequestListener listener) { this.flushRequestListeners.add(listener); } @@ -600,6 +666,7 @@ class MemStoreFlusher implements FlushRequester { * @param listener * @return true when passed listener is unregistered successfully. */ + @Override public boolean unregisterFlushRequestListener(final FlushRequestListener listener) { return this.flushRequestListeners.remove(listener); } @@ -608,9 +675,10 @@ class MemStoreFlusher implements FlushRequester { * Sets the global memstore limit to a new size. * @param globalMemStoreSize */ + @Override public void setGlobalMemstoreLimit(long globalMemStoreSize) { this.globalMemStoreLimit = globalMemStoreSize; - this.globalMemStoreLimitLowMark = + this.globalMemStoreLimitLowMark = (long) (this.globalMemStoreLimitLowMarkPercent * globalMemStoreSize); reclaimMemStoreMemory(); } @@ -651,7 +719,7 @@ class MemStoreFlusher implements FlushRequester { * a while. */ static class FlushRegionEntry implements FlushQueueEntry { - private final HRegion region; + private final Region region; private final long createTime; private long whenToExpire; @@ -659,7 +727,7 @@ class MemStoreFlusher implements FlushRequester { private boolean forceFlushAllStores; - FlushRegionEntry(final HRegion r, boolean forceFlushAllStores) { + FlushRegionEntry(final Region r, boolean forceFlushAllStores) { this.region = r; this.createTime = EnvironmentEdgeManager.currentTime(); this.whenToExpire = this.createTime; @@ -721,7 +789,7 @@ class MemStoreFlusher implements FlushRequester { @Override public String toString() { - return "[flush region " + Bytes.toStringBinary(region.getRegionName()) + "]"; + return "[flush region "+Bytes.toStringBinary(region.getRegionInfo().getRegionName())+"]"; } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java index c3bf97e0708..9f98ba6b43e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java @@ -95,6 +95,10 @@ public class MetricsRegionServer { serverSource.updateReplay(t); } + public void updateScannerNext(long scanSize){ + serverSource.updateScannerNext(scanSize); + } + public void updateSplitTime(long t) { serverSource.updateSplitTime(t); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java index 159ec55787c..ea9558fd868 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.regionserver; import java.io.IOException; import java.util.Collection; +import java.util.List; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; @@ -29,6 +30,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.CompatibilitySingletonFactory; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HDFSBlocksDistribution; +import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.io.hfile.BlockCache; import org.apache.hadoop.hbase.io.hfile.CacheConfig; @@ -73,6 +75,7 @@ class MetricsRegionServerWrapperImpl private volatile long numMutationsWithoutWAL = 0; private volatile long dataInMemoryWithoutWAL = 0; private volatile int percentFileLocal = 0; + private volatile int percentFileLocalSecondaryRegions = 0; private volatile long flushedCellsCount = 0; private volatile long compactedCellsCount = 0; private volatile long majorCompactedCellsCount = 0; @@ -194,7 +197,7 @@ class MetricsRegionServerWrapperImpl @Override public long getNumOnlineRegions() { - Collection onlineRegionsLocalContext = regionServer.getOnlineRegionsLocalContext(); + Collection onlineRegionsLocalContext = regionServer.getOnlineRegionsLocalContext(); if (onlineRegionsLocalContext == null) { return 0; } @@ -402,6 +405,11 @@ class MetricsRegionServerWrapperImpl return percentFileLocal; } + @Override + public int getPercentFileLocalSecondaryRegions() { + return percentFileLocalSecondaryRegions; + } + @Override public long getUpdatesBlockedTime() { if (this.regionServer.cacheFlusher == null) { @@ -528,6 +536,8 @@ class MetricsRegionServerWrapperImpl HDFSBlocksDistribution hdfsBlocksDistribution = new HDFSBlocksDistribution(); + HDFSBlocksDistribution hdfsBlocksDistributionSecondaryRegions = + new HDFSBlocksDistribution(); long tempNumStores = 0; long tempNumStoreFiles = 0; @@ -543,6 +553,7 @@ class MetricsRegionServerWrapperImpl long tempNumMutationsWithoutWAL = 0; long tempDataInMemoryWithoutWAL = 0; int tempPercentFileLocal = 0; + int tempPercentFileLocalSecondaryRegions = 0; long tempFlushedCellsCount = 0; long tempCompactedCellsCount = 0; long tempMajorCompactedCellsCount = 0; @@ -560,16 +571,17 @@ class MetricsRegionServerWrapperImpl long tempMobScanCellsSize = 0; long tempBlockedRequestsCount = 0L; - for (HRegion r : regionServer.getOnlineRegionsLocalContext()) { - tempNumMutationsWithoutWAL += r.numMutationsWithoutWAL.get(); - tempDataInMemoryWithoutWAL += r.dataInMemoryWithoutWAL.get(); - tempReadRequestsCount += r.readRequestsCount.get(); - tempWriteRequestsCount += r.writeRequestsCount.get(); - tempCheckAndMutateChecksFailed += r.checkAndMutateChecksFailed.get(); - tempCheckAndMutateChecksPassed += r.checkAndMutateChecksPassed.get(); + for (Region r : regionServer.getOnlineRegionsLocalContext()) { + tempNumMutationsWithoutWAL += r.getNumMutationsWithoutWAL(); + tempDataInMemoryWithoutWAL += r.getDataInMemoryWithoutWAL(); + tempReadRequestsCount += r.getReadRequestsCount(); + tempWriteRequestsCount += r.getWriteRequestsCount(); + tempCheckAndMutateChecksFailed += r.getCheckAndMutateChecksFailed(); + tempCheckAndMutateChecksPassed += r.getCheckAndMutateChecksPassed(); tempBlockedRequestsCount += r.getBlockedRequestsCount(); - tempNumStores += r.stores.size(); - for (Store store : r.stores.values()) { + List storeList = r.getStores(); + tempNumStores += storeList.size(); + for (Store store : storeList) { tempNumStoreFiles += store.getStorefilesCount(); tempMemstoreSize += store.getMemStoreSize(); tempStoreFileSize += store.getStorefilesSize(); @@ -596,13 +608,20 @@ class MetricsRegionServerWrapperImpl } } - hdfsBlocksDistribution.add(r.getHDFSBlocksDistribution()); + HDFSBlocksDistribution distro = r.getHDFSBlocksDistribution(); + hdfsBlocksDistribution.add(distro); + if (r.getRegionInfo().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) { + hdfsBlocksDistributionSecondaryRegions.add(distro); + } } float localityIndex = hdfsBlocksDistribution.getBlockLocalityIndex( regionServer.getServerName().getHostname()); tempPercentFileLocal = (int) (localityIndex * 100); + float localityIndexSecondaryRegions = hdfsBlocksDistributionSecondaryRegions + .getBlockLocalityIndex(regionServer.getServerName().getHostname()); + tempPercentFileLocalSecondaryRegions = (int) (localityIndexSecondaryRegions * 100); //Compute the number of requests per second long currentTime = EnvironmentEdgeManager.currentTime(); @@ -640,6 +659,7 @@ class MetricsRegionServerWrapperImpl numMutationsWithoutWAL = tempNumMutationsWithoutWAL; dataInMemoryWithoutWAL = tempDataInMemoryWithoutWAL; percentFileLocal = tempPercentFileLocal; + percentFileLocalSecondaryRegions = tempPercentFileLocalSecondaryRegions; flushedCellsCount = tempFlushedCellsCount; compactedCellsCount = tempCompactedCellsCount; majorCompactedCellsCount = tempMajorCompactedCellsCount; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MobStoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MobStoreScanner.java index f7f0acd5001..5739df1b590 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MobStoreScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MobStoreScanner.java @@ -54,8 +54,8 @@ public class MobStoreScanner extends StoreScanner { * from the mob file as the result. */ @Override - public boolean next(List outResult, int limit) throws IOException { - boolean result = super.next(outResult, limit); + public boolean next(List outResult, ScannerContext ctx) throws IOException { + boolean result = super.next(outResult, ctx); if (!MobUtils.isRawMobScan(scan)) { // retrieve the mob data if (outResult.isEmpty()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/NoLimitScannerContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/NoLimitScannerContext.java new file mode 100644 index 00000000000..1484e8072e9 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/NoLimitScannerContext.java @@ -0,0 +1,102 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + +/** + * This is a special {@link ScannerContext} subclass that is designed to be used globally when + * limits should not be enforced during invocations of {@link InternalScanner#next(java.util.List)} + * or {@link RegionScanner#next(java.util.List)}. + *

      + * Instances of {@link NoLimitScannerContext} are immutable after construction. Any attempt to + * change the limits or progress of a {@link NoLimitScannerContext} will fail silently. The net + * effect is that all limit checks will return false, thus indicating that a limit has not been + * reached. + */ +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) +@InterfaceStability.Evolving +public class NoLimitScannerContext extends ScannerContext { + + public NoLimitScannerContext() { + super(false, null); + } + + /** + * Use this instance whenever limits do not need to be enforced. + */ + private static final ScannerContext NO_LIMIT = new NoLimitScannerContext(); + + /** + * @return The static, immutable instance of {@link NoLimitScannerContext} to be used whenever + * limits should not be enforced + */ + public static final ScannerContext getInstance() { + return NO_LIMIT; + } + + @Override + void setKeepProgress(boolean keepProgress) { + // Do nothing. NoLimitScannerContext instances are immutable post-construction + } + + @Override + void setBatchProgress(int batchProgress) { + // Do nothing. NoLimitScannerContext instances are immutable post-construction + } + + @Override + void setSizeProgress(long sizeProgress) { + // Do nothing. NoLimitScannerContext instances are immutable post-construction + } + + @Override + void setProgress(int batchProgress, long sizeProgress) { + // Do nothing. NoLimitScannerContext instances are immutable post-construction + } + + @Override + void setSizeLimitScope(LimitScope scope) { + // Do nothing. NoLimitScannerContext instances are immutable post-construction + } + + @Override + NextState setScannerState(NextState state) { + // Do nothing. NoLimitScannerContext instances are immutable post-construction + return state; + } + + @Override + boolean checkBatchLimit(LimitScope checkerScope) { + // No limits can be specified, thus return false to indicate no limit has been reached. + return false; + } + + @Override + boolean checkSizeLimit(LimitScope checkerScope) { + // No limits can be specified, thus return false to indicate no limit has been reached. + return false; + } + + @Override + boolean checkAnyLimitReached(LimitScope checkerScope) { + return false; + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/NonLazyKeyValueScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/NonLazyKeyValueScanner.java index 9dc46cef901..957f4174392 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/NonLazyKeyValueScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/NonLazyKeyValueScanner.java @@ -67,4 +67,8 @@ public abstract class NonLazyKeyValueScanner implements KeyValueScanner { // Not a file by default. return false; } + @Override + public Cell getNextIndexedKey() { + return null; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/OnlineRegions.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/OnlineRegions.java index 1cde7e394b9..60fc9fb8ade 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/OnlineRegions.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/OnlineRegions.java @@ -22,46 +22,49 @@ import java.io.IOException; import java.util.List; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; /** * Interface to Map of online regions. In the Map, the key is the region's - * encoded name and the value is an {@link HRegion} instance. + * encoded name and the value is an {@link Region} instance. */ -@InterfaceAudience.Private -interface OnlineRegions extends Server { +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) +@InterfaceStability.Evolving +public interface OnlineRegions extends Server { /** * Add to online regions. * @param r */ - void addToOnlineRegions(final HRegion r); + void addToOnlineRegions(final Region r); /** - * This method removes HRegion corresponding to hri from the Map of onlineRegions. + * This method removes Region corresponding to hri from the Map of onlineRegions. * * @param r Region to remove. * @param destination Destination, if any, null otherwise. * @return True if we removed a region from online list. */ - boolean removeFromOnlineRegions(final HRegion r, ServerName destination); + boolean removeFromOnlineRegions(final Region r, ServerName destination); /** - * Return {@link HRegion} instance. - * Only works if caller is in same context, in same JVM. HRegion is not + * Return {@link Region} instance. + * Only works if caller is in same context, in same JVM. Region is not * serializable. * @param encodedRegionName - * @return HRegion for the passed encoded encodedRegionName or + * @return Region for the passed encoded encodedRegionName or * null if named region is not member of the online regions. */ - HRegion getFromOnlineRegions(String encodedRegionName); + Region getFromOnlineRegions(String encodedRegionName); /** * Get all online regions of a table in this RS. * @param tableName - * @return List of HRegion + * @return List of Region * @throws java.io.IOException */ - List getOnlineRegions(TableName tableName) throws IOException; + List getOnlineRegions(TableName tableName) throws IOException; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index 3653cfb9274..15bf2cb3af1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -81,6 +81,7 @@ import org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface; import org.apache.hadoop.hbase.ipc.RpcServerInterface; import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException; import org.apache.hadoop.hbase.ipc.ServerRpcController; +import org.apache.hadoop.hbase.master.MasterRpcServices; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.RequestConverter; import org.apache.hadoop.hbase.protobuf.ResponseConverter; @@ -118,6 +119,8 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfiguratio import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionResponse; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest.FamilyPath; @@ -143,11 +146,16 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RequestHeader; +import org.apache.hadoop.hbase.protobuf.generated.WALProtos.BulkLoadDescriptor; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor; +import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor; +import org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor; import org.apache.hadoop.hbase.quotas.OperationQuota; import org.apache.hadoop.hbase.quotas.RegionServerQuotaManager; -import org.apache.hadoop.hbase.regionserver.HRegion.Operation; import org.apache.hadoop.hbase.regionserver.Leases.LeaseStillHeldException; +import org.apache.hadoop.hbase.regionserver.Region.FlushResult; +import org.apache.hadoop.hbase.regionserver.Region.Operation; +import org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope; import org.apache.hadoop.hbase.regionserver.handler.OpenMetaHandler; import org.apache.hadoop.hbase.regionserver.handler.OpenRegionHandler; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; @@ -157,6 +165,7 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil; import org.apache.hadoop.hbase.util.Strings; +import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALKey; import org.apache.hadoop.hbase.wal.WALSplitter; import org.apache.hadoop.hbase.zookeeper.ZKSplitLog; @@ -209,9 +218,9 @@ public class RSRpcServices implements HBaseRPCErrorHandler, private static class RegionScannerHolder { private RegionScanner s; private long nextCallSeq = 0L; - private HRegion r; + private Region r; - public RegionScannerHolder(RegionScanner s, HRegion r) { + public RegionScannerHolder(RegionScanner s, Region r) { this.s = s; this.r = r; } @@ -236,7 +245,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, LOG.info("Scanner " + this.scannerName + " lease expired on region " + s.getRegionInfo().getRegionNameAsString()); try { - HRegion region = regionServer.getRegion(s.getRegionInfo().getRegionName()); + Region region = regionServer.getRegion(s.getRegionInfo().getRegionName()); if (region != null && region.getCoprocessorHost() != null) { region.getCoprocessorHost().preScannerClose(s); } @@ -336,6 +345,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, if (isClientCellBlockSupport()) { for (Result res : results) { builder.addCellsPerResult(res.size()); + builder.addPartialFlagPerResult(res.isPartial()); } ((PayloadCarryingRpcController)controller). setCellScanner(CellUtil.createCellScanner(results)); @@ -355,7 +365,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, * @param cellScanner if non-null, the mutation data -- the Cell content. * @throws IOException */ - private ClientProtos.RegionLoadStats mutateRows(final HRegion region, + private ClientProtos.RegionLoadStats mutateRows(final Region region, final List actions, final CellScanner cellScanner) throws IOException { if (!region.getRegionInfo().isMetaTable()) { @@ -383,7 +393,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, } } region.mutateRow(rm); - return region.getRegionStats(); + return ((HRegion)region).getRegionStats(); } /** @@ -398,7 +408,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, * @param compareOp * @param comparator @throws IOException */ - private boolean checkAndRowMutate(final HRegion region, final List actions, + private boolean checkAndRowMutate(final Region region, final List actions, final CellScanner cellScanner, byte[] row, byte[] family, byte[] qualifier, CompareOp compareOp, ByteArrayComparable comparator) throws IOException { if (!region.getRegionInfo().isMetaTable()) { @@ -438,7 +448,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, * bypassed as indicated by RegionObserver, null otherwise * @throws IOException */ - private Result append(final HRegion region, final OperationQuota quota, final MutationProto m, + private Result append(final Region region, final OperationQuota quota, final MutationProto m, final CellScanner cellScanner, long nonceGroup) throws IOException { long before = EnvironmentEdgeManager.currentTime(); Append append = ProtobufUtil.toAppend(m, cellScanner); @@ -475,7 +485,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, * @return the Result * @throws IOException */ - private Result increment(final HRegion region, final OperationQuota quota, + private Result increment(final Region region, final OperationQuota quota, final MutationProto mutation, final CellScanner cells, long nonceGroup) throws IOException { long before = EnvironmentEdgeManager.currentTime(); @@ -516,7 +526,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, * method returns as a 'result'. * @return Return the cellScanner passed */ - private List doNonAtomicRegionMutation(final HRegion region, + private List doNonAtomicRegionMutation(final Region region, final OperationQuota quota, final RegionAction actions, final CellScanner cellScanner, final RegionActionResult.Builder builder, List cellsToReturn, long nonceGroup) { // Gather up CONTIGUOUS Puts and Deletes in this mutations List. Idea is that rather than do @@ -615,7 +625,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, * @param region * @param mutations */ - private void doBatchOp(final RegionActionResult.Builder builder, final HRegion region, + private void doBatchOp(final RegionActionResult.Builder builder, final Region region, final OperationQuota quota, final List mutations, final CellScanner cells) { Mutation[] mArray = new Mutation[mutations.size()]; @@ -641,7 +651,8 @@ public class RSRpcServices implements HBaseRPCErrorHandler, regionServer.cacheFlusher.reclaimMemStoreMemory(); } - OperationStatus codes[] = region.batchMutate(mArray); + OperationStatus codes[] = region.batchMutate(mArray, HConstants.NO_NONCE, + HConstants.NO_NONCE); for (i = 0; i < codes.length; i++) { int index = mutations.get(i).getIndex(); Exception e = null; @@ -663,7 +674,8 @@ public class RSRpcServices implements HBaseRPCErrorHandler, case SUCCESS: builder.addResultOrException(getResultOrException( - ClientProtos.Result.getDefaultInstance(), index, region.getRegionStats())); + ClientProtos.Result.getDefaultInstance(), index, + ((HRegion)region).getRegionStats())); break; } } @@ -693,7 +705,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, * exceptionMessage if any * @throws IOException */ - private OperationStatus [] doReplayBatchOp(final HRegion region, + private OperationStatus [] doReplayBatchOp(final Region region, final List mutations, long replaySeqId) throws IOException { long before = EnvironmentEdgeManager.currentTime(); boolean batchContainsPuts = false, batchContainsDelete = false; @@ -712,8 +724,29 @@ public class RSRpcServices implements HBaseRPCErrorHandler, if (metaCells != null && !metaCells.isEmpty()) { for (Cell metaCell : metaCells) { CompactionDescriptor compactionDesc = WALEdit.getCompaction(metaCell); + boolean isDefaultReplica = RegionReplicaUtil.isDefaultReplica(region.getRegionInfo()); + HRegion hRegion = (HRegion)region; if (compactionDesc != null) { - region.completeCompactionMarker(compactionDesc); + // replay the compaction. Remove the files from stores only if we are the primary + // region replica (thus own the files) + hRegion.replayWALCompactionMarker(compactionDesc, !isDefaultReplica, isDefaultReplica, + replaySeqId); + continue; + } + FlushDescriptor flushDesc = WALEdit.getFlushDescriptor(metaCell); + if (flushDesc != null && !isDefaultReplica) { + hRegion.replayWALFlushMarker(flushDesc, replaySeqId); + continue; + } + RegionEventDescriptor regionEvent = WALEdit.getRegionEventDescriptor(metaCell); + if (regionEvent != null && !isDefaultReplica) { + hRegion.replayWALRegionEventMarker(regionEvent); + continue; + } + BulkLoadDescriptor bulkLoadEvent = WALEdit.getBulkLoadDescriptor(metaCell); + if (bulkLoadEvent != null) { + hRegion.replayWALBulkLoadEventMarker(bulkLoadEvent); + continue; } } it.remove(); @@ -766,19 +799,12 @@ public class RSRpcServices implements HBaseRPCErrorHandler, } // Server to handle client requests. String hostname = getHostname(rs.conf); - - boolean mode = - rs.conf.getBoolean(HConstants.CLUSTER_DISTRIBUTED, HConstants.DEFAULT_CLUSTER_DISTRIBUTED); - if (mode == HConstants.CLUSTER_IS_DISTRIBUTED && hostname.equals(HConstants.LOCALHOST)) { - String msg = - "The hostname of regionserver cannot be set to localhost " - + "in a fully-distributed setup because it won't be reachable. " - + "See \"Getting Started\" for more information."; - LOG.fatal(msg); - throw new IOException(msg); - } int port = rs.conf.getInt(HConstants.REGIONSERVER_PORT, HConstants.DEFAULT_REGIONSERVER_PORT); + if(this instanceof MasterRpcServices) { + port = rs.conf.getInt(HConstants.MASTER_PORT, + HConstants.DEFAULT_MASTER_PORT); + } // Creation of a HSA will force a resolve. InetSocketAddress initialIsa = new InetSocketAddress(hostname, port); InetSocketAddress bindAddress = new InetSocketAddress( @@ -799,8 +825,8 @@ public class RSRpcServices implements HBaseRPCErrorHandler, HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, HConstants.DEFAULT_HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD); maxScannerResultSize = rs.conf.getLong( - HConstants.HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY, - HConstants.DEFAULT_HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE); + HConstants.HBASE_SERVER_SCANNER_MAX_RESULT_SIZE_KEY, + HConstants.DEFAULT_HBASE_SERVER_SCANNER_MAX_RESULT_SIZE); // Set our address, however we need the final port that was given to rpcServer isa = new InetSocketAddress(initialIsa.getHostName(), rpcServer.getListenerAddress().getPort()); @@ -809,9 +835,15 @@ public class RSRpcServices implements HBaseRPCErrorHandler, } public static String getHostname(Configuration conf) throws UnknownHostException { - return Strings.domainNamePointerToHostName(DNS.getDefaultHost( - conf.get("hbase.regionserver.dns.interface", "default"), - conf.get("hbase.regionserver.dns.nameserver", "default"))); + String hostname = conf.get(HRegionServer.HOSTNAME_KEY); + if (hostname == null || hostname.isEmpty()) { + return Strings.domainNamePointerToHostName(DNS.getDefaultHost( + conf.get("hbase.regionserver.dns.interface", "default"), + conf.get("hbase.regionserver.dns.nameserver", "default"))); + } else { + LOG.info("hostname is configured to be " + hostname); + return hostname; + } } RegionScanner getScanner(long scannerId) { @@ -836,7 +868,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, return 0L; } - long addScanner(RegionScanner s, HRegion r) throws LeaseStillHeldException { + long addScanner(RegionScanner s, Region r) throws LeaseStillHeldException { long scannerId = this.scannerIdGen.incrementAndGet(); String scannerName = String.valueOf(scannerId); @@ -857,7 +889,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, * @throws IOException if the specifier is not null, * but failed to find the region */ - HRegion getRegion( + Region getRegion( final RegionSpecifier regionSpecifier) throws IOException { return regionServer.getRegionByEncodedName(regionSpecifier.getValue().toByteArray(), ProtobufUtil.getRegionEncodedName(regionSpecifier)); @@ -990,7 +1022,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, final String encodedRegionName = ProtobufUtil.getRegionEncodedName(request.getRegion()); // Can be null if we're calling close on a region that's not online - final HRegion region = regionServer.getFromOnlineRegions(encodedRegionName); + final Region region = regionServer.getFromOnlineRegions(encodedRegionName); if ((region != null) && (region .getCoprocessorHost() != null)) { region.getCoprocessorHost().preClose(false); } @@ -1019,9 +1051,9 @@ public class RSRpcServices implements HBaseRPCErrorHandler, try { checkOpen(); requestCount.increment(); - HRegion region = getRegion(request.getRegion()); + Region region = getRegion(request.getRegion()); region.startRegionOperation(Operation.COMPACT_REGION); - LOG.info("Compacting " + region.getRegionNameAsString()); + LOG.info("Compacting " + region.getRegionInfo().getRegionNameAsString()); boolean major = false; byte [] family = null; Store store = null; @@ -1030,7 +1062,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, store = region.getStore(family); if (store == null) { throw new ServiceException(new IOException("column family " + Bytes.toString(family) - + " does not exist in region " + region.getRegionNameAsString())); + + " does not exist in region " + region.getRegionInfo().getRegionNameAsString())); } } if (request.hasMajor()) { @@ -1047,7 +1079,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, String familyLogMsg = (family != null)?" for column family: " + Bytes.toString(family):""; if (LOG.isTraceEnabled()) { LOG.trace("User-triggered compaction requested for region " - + region.getRegionNameAsString() + familyLogMsg); + + region.getRegionInfo().getRegionNameAsString() + familyLogMsg); } String log = "User-triggered " + (major ? "major " : "") + "compaction" + familyLogMsg; if(family != null) { @@ -1077,28 +1109,33 @@ public class RSRpcServices implements HBaseRPCErrorHandler, try { checkOpen(); requestCount.increment(); - HRegion region = getRegion(request.getRegion()); - LOG.info("Flushing " + region.getRegionNameAsString()); + Region region = getRegion(request.getRegion()); + LOG.info("Flushing " + region.getRegionInfo().getRegionNameAsString()); boolean shouldFlush = true; if (request.hasIfOlderThanTs()) { shouldFlush = region.getEarliestFlushTimeForAllStores() < request.getIfOlderThanTs(); } FlushRegionResponse.Builder builder = FlushRegionResponse.newBuilder(); if (shouldFlush) { + boolean writeFlushWalMarker = request.hasWriteFlushWalMarker() ? + request.getWriteFlushWalMarker() : false; long startTime = EnvironmentEdgeManager.currentTime(); - HRegion.FlushResult flushResult = region.flushcache(); + // Go behind the curtain so we can manage writing of the flush WAL marker + HRegion.FlushResultImpl flushResult = (HRegion.FlushResultImpl) + ((HRegion)region).flushcache(true, writeFlushWalMarker); if (flushResult.isFlushSucceeded()) { long endTime = EnvironmentEdgeManager.currentTime(); regionServer.metricsRegionServer.updateFlushTime(endTime - startTime); } - boolean result = flushResult.isCompactionNeeded(); - if (result) { + boolean compactionNeeded = flushResult.isCompactionNeeded(); + if (compactionNeeded) { regionServer.compactSplitThread.requestSystemCompaction(region, "Compaction through user triggered flush"); } - builder.setFlushed(result); + builder.setFlushed(flushResult.isFlushSucceeded()); + builder.setWroteFlushWalMarker(flushResult.wroteFlushWalMarker); } - builder.setLastFlushTime( region.getEarliestFlushTimeForAllStores()); + builder.setLastFlushTime(region.getEarliestFlushTimeForAllStores()); return builder.build(); } catch (DroppedSnapshotException ex) { // Cache flush can fail in a few places. If it fails in a critical @@ -1119,9 +1156,9 @@ public class RSRpcServices implements HBaseRPCErrorHandler, try { checkOpen(); requestCount.increment(); - Map onlineRegions = regionServer.onlineRegions; + Map onlineRegions = regionServer.onlineRegions; List list = new ArrayList(onlineRegions.size()); - for (HRegion region: onlineRegions.values()) { + for (Region region: onlineRegions.values()) { list.add(region.getRegionInfo()); } Collections.sort(list); @@ -1138,7 +1175,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, try { checkOpen(); requestCount.increment(); - HRegion region = getRegion(request.getRegion()); + Region region = getRegion(request.getRegion()); HRegionInfo info = region.getRegionInfo(); GetRegionInfoResponse.Builder builder = GetRegionInfoResponse.newBuilder(); builder.setRegionInfo(HRegionInfo.convert(info)); @@ -1179,11 +1216,11 @@ public class RSRpcServices implements HBaseRPCErrorHandler, final GetStoreFileRequest request) throws ServiceException { try { checkOpen(); - HRegion region = getRegion(request.getRegion()); + Region region = getRegion(request.getRegion()); requestCount.increment(); Set columnFamilies; if (request.getFamilyCount() == 0) { - columnFamilies = region.getStores().keySet(); + columnFamilies = region.getTableDesc().getFamiliesKeys(); } else { columnFamilies = new TreeSet(Bytes.BYTES_RAWCOMPARATOR); for (ByteString cf: request.getFamilyList()) { @@ -1216,8 +1253,8 @@ public class RSRpcServices implements HBaseRPCErrorHandler, try { checkOpen(); requestCount.increment(); - HRegion regionA = getRegion(request.getRegionA()); - HRegion regionB = getRegion(request.getRegionB()); + Region regionA = getRegion(request.getRegionA()); + Region regionB = getRegion(request.getRegionB()); boolean forcible = request.getForcible(); regionA.startRegionOperation(Operation.MERGE_REGION); regionB.startRegionOperation(Operation.MERGE_REGION); @@ -1228,13 +1265,13 @@ public class RSRpcServices implements HBaseRPCErrorHandler, LOG.info("Receiving merging request for " + regionA + ", " + regionB + ",forcible=" + forcible); long startTime = EnvironmentEdgeManager.currentTime(); - HRegion.FlushResult flushResult = regionA.flushcache(); + FlushResult flushResult = regionA.flush(true); if (flushResult.isFlushSucceeded()) { long endTime = EnvironmentEdgeManager.currentTime(); regionServer.metricsRegionServer.updateFlushTime(endTime - startTime); } startTime = EnvironmentEdgeManager.currentTime(); - flushResult = regionB.flushcache(); + flushResult = regionB.flush(true); if (flushResult.isFlushSucceeded()) { long endTime = EnvironmentEdgeManager.currentTime(); regionServer.metricsRegionServer.updateFlushTime(endTime - startTime); @@ -1327,7 +1364,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, try { String encodedName = region.getEncodedName(); byte[] encodedNameBytes = region.getEncodedNameAsBytes(); - final HRegion onlineRegion = regionServer.getFromOnlineRegions(encodedName); + final Region onlineRegion = regionServer.getFromOnlineRegions(encodedName); if (onlineRegion != null) { // The region is already online. This should not happen any more. String error = "Received OPEN for the region:" @@ -1415,6 +1452,57 @@ public class RSRpcServices implements HBaseRPCErrorHandler, return builder.build(); } + /** + * Wamrmup a region on this server. + * + * This method should only be called by Master. It synchrnously opens the region and + * closes the region bringing the most important pages in cache. + *

      + * + * @param controller the RPC controller + * @param request the request + * @throws ServiceException + */ + public WarmupRegionResponse warmupRegion(final RpcController controller, + final WarmupRegionRequest request) throws ServiceException { + + RegionInfo regionInfo = request.getRegionInfo(); + final HRegionInfo region = HRegionInfo.convert(regionInfo); + HTableDescriptor htd; + WarmupRegionResponse response = WarmupRegionResponse.getDefaultInstance(); + + try { + String encodedName = region.getEncodedName(); + byte[] encodedNameBytes = region.getEncodedNameAsBytes(); + final Region onlineRegion = regionServer.getFromOnlineRegions(encodedName); + + if (onlineRegion != null) { + LOG.info("Region already online. Skipping warming up " + region); + return response; + } + + if (LOG.isDebugEnabled()) { + LOG.debug("Warming up Region " + region.getRegionNameAsString()); + } + + htd = regionServer.tableDescriptors.get(region.getTable()); + + if (regionServer.getRegionsInTransitionInRS().containsKey(encodedNameBytes)) { + LOG.info("Region is in transition. Skipping warmup " + region); + return response; + } + + HRegion.warmupHRegion(region, htd, regionServer.getWAL(region), + regionServer.getConfiguration(), regionServer, null); + + } catch (IOException ie) { + LOG.error("Failed warming up region " + region.getRegionNameAsString(), ie); + throw new ServiceException(ie); + } + + return response; + } + /** * Replay the given changes when distributedLogReplay WAL edits from a failed RS. The guarantee is * that the given mutations will be durable on the receiving RS if this method returns without any @@ -1437,7 +1525,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, return ReplicateWALEntryResponse.newBuilder().build(); } ByteString regionName = entries.get(0).getKey().getEncodedRegionName(); - HRegion region = regionServer.getRegionByEncodedName(regionName.toStringUtf8()); + Region region = regionServer.getRegionByEncodedName(regionName.toStringUtf8()); RegionCoprocessorHost coprocessorHost = ServerRegionReplicaUtil.isDefaultReplica(region.getRegionInfo()) ? region.getCoprocessorHost() @@ -1454,7 +1542,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, "regions. First region:" + regionName.toStringUtf8() + " , other region:" + entry.getKey().getEncodedRegionName()); } - if (regionServer.nonceManager != null) { + if (regionServer.nonceManager != null && isPrimary) { long nonceGroup = entry.getKey().hasNonceGroup() ? entry.getKey().getNonceGroup() : HConstants.NO_NONCE; long nonce = entry.getKey().hasNonce() ? entry.getKey().getNonce() : HConstants.NO_NONCE; @@ -1488,12 +1576,15 @@ public class RSRpcServices implements HBaseRPCErrorHandler, } //sync wal at the end because ASYNC_WAL is used above - region.syncWal(); + WAL wal = getWAL(region); + if (wal != null) { + wal.sync(); + } if (coprocessorHost != null) { - for (Pair wal : walEntries) { - coprocessorHost.postWALRestore(region.getRegionInfo(), wal.getFirst(), - wal.getSecond()); + for (Pair entry : walEntries) { + coprocessorHost.postWALRestore(region.getRegionInfo(), entry.getFirst(), + entry.getSecond()); } } return ReplicateWALEntryResponse.newBuilder().build(); @@ -1507,6 +1598,10 @@ public class RSRpcServices implements HBaseRPCErrorHandler, } } + WAL getWAL(Region region) { + return ((HRegion)region).getWAL(); + } + /** * Replicate WAL entries on the region server. * @@ -1570,15 +1665,15 @@ public class RSRpcServices implements HBaseRPCErrorHandler, try { checkOpen(); requestCount.increment(); - HRegion region = getRegion(request.getRegion()); + Region region = getRegion(request.getRegion()); region.startRegionOperation(Operation.SPLIT_REGION); if (region.getRegionInfo().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) { throw new IOException("Can't split replicas directly. " + "Replicas are auto-split when their primary is split."); } - LOG.info("Splitting " + region.getRegionNameAsString()); + LOG.info("Splitting " + region.getRegionInfo().getRegionNameAsString()); long startTime = EnvironmentEdgeManager.currentTime(); - HRegion.FlushResult flushResult = region.flushcache(); + FlushResult flushResult = region.flush(true); if (flushResult.isFlushSucceeded()) { long endTime = EnvironmentEdgeManager.currentTime(); regionServer.metricsRegionServer.updateFlushTime(endTime - startTime); @@ -1587,8 +1682,8 @@ public class RSRpcServices implements HBaseRPCErrorHandler, if (request.hasSplitPoint()) { splitPoint = request.getSplitPoint().toByteArray(); } - region.forceSplit(splitPoint); - regionServer.compactSplitThread.requestSplit(region, region.checkSplit()); + ((HRegion)region).forceSplit(splitPoint); + regionServer.compactSplitThread.requestSplit(region, ((HRegion)region).checkSplit()); return SplitRegionResponse.newBuilder().build(); } catch (IOException ie) { throw new ServiceException(ie); @@ -1637,7 +1732,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, try { checkOpen(); requestCount.increment(); - HRegion region = getRegion(request.getRegion()); + Region region = getRegion(request.getRegion()); List> familyPaths = new ArrayList>(); for (FamilyPath familyPath: request.getFamilyPathList()) { familyPaths.add(new Pair(familyPath.getFamily().toByteArray(), @@ -1649,7 +1744,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, } boolean loaded = false; if (!bypass) { - loaded = region.bulkLoadHFiles(familyPaths, request.getAssignSeqNum()); + loaded = region.bulkLoadHFiles(familyPaths, request.getAssignSeqNum(), null); } if (region.getCoprocessorHost() != null) { loaded = region.getCoprocessorHost().postBulkLoadHFile(familyPaths, loaded); @@ -1668,12 +1763,12 @@ public class RSRpcServices implements HBaseRPCErrorHandler, try { checkOpen(); requestCount.increment(); - HRegion region = getRegion(request.getRegion()); + Region region = getRegion(request.getRegion()); Message result = execServiceOnRegion(region, request.getCall()); CoprocessorServiceResponse.Builder builder = CoprocessorServiceResponse.newBuilder(); builder.setRegion(RequestConverter.buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, region.getRegionName())); + RegionSpecifierType.REGION_NAME, region.getRegionInfo().getRegionName())); builder.setValue( builder.getValueBuilder().setName(result.getClass().getName()) .setValue(result.toByteString())); @@ -1683,7 +1778,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, } } - private Message execServiceOnRegion(HRegion region, + private Message execServiceOnRegion(Region region, final ClientProtos.CoprocessorServiceCall serviceCall) throws IOException { // ignore the passed in controller (from the serialized call) ServerRpcController execController = new ServerRpcController(); @@ -1709,7 +1804,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, try { checkOpen(); requestCount.increment(); - HRegion region = getRegion(request.getRegion()); + Region region = getRegion(request.getRegion()); GetResponse.Builder builder = GetResponse.newBuilder(); ClientProtos.Get get = request.getGet(); @@ -1801,7 +1896,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, for (RegionAction regionAction : request.getRegionActionList()) { this.requestCount.add(regionAction.getActionCount()); OperationQuota quota; - HRegion region; + Region region; regionActionResultBuilder.clear(); try { region = getRegion(regionAction.getRegion()); @@ -1876,14 +1971,13 @@ public class RSRpcServices implements HBaseRPCErrorHandler, try { checkOpen(); requestCount.increment(); - HRegion region = getRegion(request.getRegion()); + Region region = getRegion(request.getRegion()); MutateResponse.Builder builder = MutateResponse.newBuilder(); MutationProto mutation = request.getMutation(); if (!region.getRegionInfo().isMetaTable()) { regionServer.cacheFlusher.reclaimMemStoreMemory(); } - long nonceGroup = request.hasNonceGroup() - ? request.getNonceGroup() : HConstants.NO_NONCE; + long nonceGroup = request.hasNonceGroup() ? request.getNonceGroup() : HConstants.NO_NONCE; Result r = null; Boolean processed = null; MutationType type = mutation.getMutateType(); @@ -2020,11 +2114,12 @@ public class RSRpcServices implements HBaseRPCErrorHandler, requestCount.increment(); int ttl = 0; - HRegion region = null; + Region region = null; RegionScanner scanner = null; RegionScannerHolder rsh = null; boolean moreResults = true; boolean closeScanner = false; + boolean isSmallScan = false; ScanResponse.Builder builder = ScanResponse.newBuilder(); if (request.hasCloseScanner()) { closeScanner = request.getCloseScanner(); @@ -2056,8 +2151,15 @@ public class RSRpcServices implements HBaseRPCErrorHandler, if (!isLoadingCfsOnDemandSet) { scan.setLoadColumnFamiliesOnDemand(region.isLoadingCfsOnDemandDefault()); } - scan.getAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE); - region.prepareScanner(scan); + + isSmallScan = scan.isSmall(); + if (!scan.hasFamilies()) { + // Adding all families to scanner + for (byte[] family: region.getTableDesc().getFamiliesKeys()) { + scan.addFamily(family); + } + } + if (region.getCoprocessorHost() != null) { scanner = region.getCoprocessorHost().preScannerOpen(scan); } @@ -2097,9 +2199,9 @@ public class RSRpcServices implements HBaseRPCErrorHandler, // Remove lease while its being processed in server; protects against case // where processing of request takes > lease expiration time. lease = regionServer.leases.removeLease(scannerName); - List results = new ArrayList(rows); - long currentScanResultSize = 0; + List results = new ArrayList(); long totalCellSize = 0; + long currentScanResultSize = 0; boolean done = false; // Call coprocessor. Get region info from scanner. @@ -2109,8 +2211,8 @@ public class RSRpcServices implements HBaseRPCErrorHandler, if (!results.isEmpty()) { for (Result r : results) { for (Cell cell : r.rawCells()) { - currentScanResultSize += CellUtil.estimatedHeapSizeOf(cell); totalCellSize += CellUtil.estimatedSerializedSizeOf(cell); + currentScanResultSize += CellUtil.estimatedHeapSizeOfWithoutTags(cell); } } } @@ -2130,20 +2232,52 @@ public class RSRpcServices implements HBaseRPCErrorHandler, int i = 0; synchronized(scanner) { boolean stale = (region.getRegionInfo().getReplicaId() != 0); + boolean clientHandlesPartials = + request.hasClientHandlesPartials() && request.getClientHandlesPartials(); + + // On the server side we must ensure that the correct ordering of partial results is + // returned to the client to allow them to properly reconstruct the partial results. + // If the coprocessor host is adding to the result list, we cannot guarantee the + // correct ordering of partial results and so we prevent partial results from being + // formed. + boolean serverGuaranteesOrderOfPartials = currentScanResultSize == 0; + boolean allowPartialResults = + clientHandlesPartials && serverGuaranteesOrderOfPartials && !isSmallScan; + boolean moreRows = false; + + final LimitScope sizeScope = + allowPartialResults ? LimitScope.BETWEEN_CELLS : LimitScope.BETWEEN_ROWS; + + // Configure with limits for this RPC. Set keep progress true since size progress + // towards size limit should be kept between calls to nextRaw + ScannerContext.Builder contextBuilder = ScannerContext.newBuilder(true); + contextBuilder.setSizeLimit(sizeScope, maxResultSize); + contextBuilder.setBatchLimit(scanner.getBatch()); + ScannerContext scannerContext = contextBuilder.build(); + while (i < rows) { - // Stop collecting results if maxScannerResultSize is set and we have exceeded it - if ((maxScannerResultSize < Long.MAX_VALUE) && - (currentScanResultSize >= maxResultSize)) { + // Stop collecting results if we have exceeded maxResultSize + if (scannerContext.checkSizeLimit(LimitScope.BETWEEN_ROWS)) { + builder.setMoreResultsInRegion(true); break; } + + // Reset the batch progress to 0 before every call to RegionScanner#nextRaw. The + // batch limit is a limit on the number of cells per Result. Thus, if progress is + // being tracked (i.e. scannerContext.keepProgress() is true) then we need to + // reset the batch progress between nextRaw invocations since we don't want the + // batch progress from previous calls to affect future calls + scannerContext.setBatchProgress(0); + // Collect values to be returned here - boolean moreRows = scanner.nextRaw(values); + moreRows = scanner.nextRaw(values, scannerContext); + if (!values.isEmpty()) { for (Cell cell : values) { - currentScanResultSize += CellUtil.estimatedHeapSizeOf(cell); totalCellSize += CellUtil.estimatedSerializedSizeOf(cell); } - results.add(Result.create(values, null, stale)); + final boolean partial = scannerContext.partialResultFormed(); + results.add(Result.create(values, null, stale, partial)); i++; } if (!moreRows) { @@ -2151,9 +2285,21 @@ public class RSRpcServices implements HBaseRPCErrorHandler, } values.clear(); } + + if (scannerContext.checkSizeLimit(LimitScope.BETWEEN_ROWS) || i >= rows || + moreRows) { + // We stopped prematurely + builder.setMoreResultsInRegion(true); + } else { + // We didn't get a single batch + builder.setMoreResultsInRegion(false); + } } - region.readRequestsCount.add(i); + region.updateReadRequestsCount(i); region.getMetrics().updateScanNext(totalCellSize); + if (regionServer.metricsRegionServer != null) { + regionServer.metricsRegionServer.updateScannerNext(totalCellSize); + } } finally { region.closeRegionOperation(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java new file mode 100644 index 00000000000..9a33b6450c4 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java @@ -0,0 +1,678 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import java.io.IOException; +import java.util.Collection; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.hadoop.hbase.HDFSBlocksDistribution; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.client.Append; +import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.Increment; +import org.apache.hadoop.hbase.client.IsolationLevel; +import org.apache.hadoop.hbase.client.Mutation; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.RowMutations; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.conf.ConfigurationObserver; +import org.apache.hadoop.hbase.exceptions.FailedSanityCheckException; +import org.apache.hadoop.hbase.filter.ByteArrayComparable; +import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceCall; +import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.hbase.wal.WALSplitter.MutationReplay; + +import com.google.protobuf.Message; +import com.google.protobuf.RpcController; +import com.google.protobuf.Service; + +/** + * Regions store data for a certain region of a table. It stores all columns + * for each row. A given table consists of one or more Regions. + * + *

      An Region is defined by its table and its key extent. + * + *

      Locking at the Region level serves only one purpose: preventing the + * region from being closed (and consequently split) while other operations + * are ongoing. Each row level operation obtains both a row lock and a region + * read lock for the duration of the operation. While a scanner is being + * constructed, getScanner holds a read lock. If the scanner is successfully + * constructed, it holds a read lock until it is closed. A close takes out a + * write lock and consequently will block for ongoing operations and will block + * new operations from starting while the close is in progress. + */ +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) +@InterfaceStability.Evolving +public interface Region extends ConfigurationObserver { + + /////////////////////////////////////////////////////////////////////////// + // Region state + + /** @return region information for this region */ + HRegionInfo getRegionInfo(); + + /** @return table descriptor for this region */ + HTableDescriptor getTableDesc(); + + /** @return true if region is available (not closed and not closing) */ + boolean isAvailable(); + + /** @return true if region is closed */ + boolean isClosed(); + + /** @return True if closing process has started */ + boolean isClosing(); + + /** @return True if region is in recovering state */ + boolean isRecovering(); + + /** @return True if region is read only */ + boolean isReadOnly(); + + /** + * Return the list of Stores managed by this region + *

      Use with caution. Exposed for use of fixup utilities. + * @return a list of the Stores managed by this region + */ + List getStores(); + + /** + * Return the Store for the given family + *

      Use with caution. Exposed for use of fixup utilities. + * @return the Store for the given family + */ + Store getStore(byte[] family); + + /** @return list of store file names for the given families */ + List getStoreFileList(byte [][] columns); + + /** + * Check the region's underlying store files, open the files that have not + * been opened yet, and remove the store file readers for store files no + * longer available. + * @throws IOException + */ + boolean refreshStoreFiles() throws IOException; + + /** @return the latest sequence number that was read from storage when this region was opened */ + long getOpenSeqNum(); + + /** @return the max sequence id of flushed data on this region */ + long getMaxFlushedSeqId(); + + /** @return the oldest sequence id found in the store for the given family */ + public long getOldestSeqIdOfStore(byte[] familyName); + + /** + * This can be used to determine the last time all files of this region were major compacted. + * @param majorCompactioOnly Only consider HFile that are the result of major compaction + * @return the timestamp of the oldest HFile for all stores of this region + */ + long getOldestHfileTs(boolean majorCompactioOnly) throws IOException; + + /** + * @return map of column family names to max sequence id that was read from storage when this + * region was opened + */ + public Map getMaxStoreSeqId(); + + /** @return true if loading column families on demand by default */ + boolean isLoadingCfsOnDemandDefault(); + + /** @return readpoint considering given IsolationLevel */ + long getReadpoint(IsolationLevel isolationLevel); + + /** + * @return The earliest time a store in the region was flushed. All + * other stores in the region would have been flushed either at, or + * after this time. + */ + long getEarliestFlushTimeForAllStores(); + + /////////////////////////////////////////////////////////////////////////// + // Metrics + + /** @return read requests count for this region */ + long getReadRequestsCount(); + + /** + * Update the read request count for this region + * @param i increment + */ + void updateReadRequestsCount(long i); + + /** @return write request count for this region */ + long getWriteRequestsCount(); + + /** + * Update the write request count for this region + * @param i increment + */ + void updateWriteRequestsCount(long i); + + /** @return memstore size for this region, in bytes */ + long getMemstoreSize(); + + /** @return the number of mutations processed bypassing the WAL */ + long getNumMutationsWithoutWAL(); + + /** @return the size of data processed bypassing the WAL, in bytes */ + long getDataInMemoryWithoutWAL(); + + /** @return the number of blocked requests */ + long getBlockedRequestsCount(); + + /** @return the number of checkAndMutate guards that passed */ + long getCheckAndMutateChecksPassed(); + + /** @return the number of failed checkAndMutate guards */ + long getCheckAndMutateChecksFailed(); + + /** @return the MetricsRegion for this region */ + MetricsRegion getMetrics(); + + /** @return the block distribution for all Stores managed by this region */ + HDFSBlocksDistribution getHDFSBlocksDistribution(); + + /////////////////////////////////////////////////////////////////////////// + // Locking + + // Region read locks + + /** + * Operation enum is used in {@link Region#startRegionOperation} to provide context for + * various checks before any region operation begins. + */ + enum Operation { + ANY, GET, PUT, DELETE, SCAN, APPEND, INCREMENT, SPLIT_REGION, MERGE_REGION, BATCH_MUTATE, + REPLAY_BATCH_MUTATE, COMPACT_REGION, REPLAY_EVENT + } + + /** + * This method needs to be called before any public call that reads or + * modifies data. + * Acquires a read lock and checks if the region is closing or closed. + *

      {@link #closeRegionOperation} MUST then always be called after + * the operation has completed, whether it succeeded or failed. + * @throws IOException + */ + void startRegionOperation() throws IOException; + + /** + * This method needs to be called before any public call that reads or + * modifies data. + * Acquires a read lock and checks if the region is closing or closed. + *

      {@link #closeRegionOperation} MUST then always be called after + * the operation has completed, whether it succeeded or failed. + * @param op The operation is about to be taken on the region + * @throws IOException + */ + void startRegionOperation(Operation op) throws IOException; + + /** + * Closes the region operation lock. + * @throws IOException + */ + void closeRegionOperation() throws IOException; + + // Row write locks + + /** + * Row lock held by a given thread. + * One thread may acquire multiple locks on the same row simultaneously. + * The locks must be released by calling release() from the same thread. + */ + public interface RowLock { + /** + * Release the given lock. If there are no remaining locks held by the current thread + * then unlock the row and allow other threads to acquire the lock. + * @throws IllegalArgumentException if called by a different thread than the lock owning + * thread + */ + void release(); + } + + /** + * Tries to acquire a lock on the given row. + * @param waitForLock if true, will block until the lock is available. + * Otherwise, just tries to obtain the lock and returns + * false if unavailable. + * @return the row lock if acquired, + * null if waitForLock was false and the lock was not acquired + * @throws IOException if waitForLock was true and the lock could not be acquired after waiting + */ + RowLock getRowLock(byte[] row, boolean waitForLock) throws IOException; + + /** + * If the given list of row locks is not null, releases all locks. + */ + void releaseRowLocks(List rowLocks); + + /////////////////////////////////////////////////////////////////////////// + // Region operations + + /** + * Perform one or more append operations on a row. + * @param append + * @param nonceGroup + * @param nonce + * @return result of the operation + * @throws IOException + */ + Result append(Append append, long nonceGroup, long nonce) throws IOException; + + /** + * Perform a batch of mutations. + *

      + * Note this supports only Put and Delete mutations and will ignore other types passed. + * @param mutations the list of mutations + * @param nonceGroup + * @param nonce + * @return an array of OperationStatus which internally contains the + * OperationStatusCode and the exceptionMessage if any. + * @throws IOException + */ + OperationStatus[] batchMutate(Mutation[] mutations, long nonceGroup, long nonce) + throws IOException; + + /** + * Replay a batch of mutations. + * @param mutations mutations to replay. + * @param replaySeqId + * @return an array of OperationStatus which internally contains the + * OperationStatusCode and the exceptionMessage if any. + * @throws IOException + */ + OperationStatus[] batchReplay(MutationReplay[] mutations, long replaySeqId) throws IOException; + + /** + * Atomically checks if a row/family/qualifier value matches the expected val + * If it does, it performs the row mutations. If the passed value is null, t + * is for the lack of column (ie: non-existence) + * @param row to check + * @param family column family to check + * @param qualifier column qualifier to check + * @param compareOp the comparison operator + * @param comparator + * @param mutation + * @param writeToWAL + * @return true if mutation was applied, false otherwise + * @throws IOException + */ + boolean checkAndMutate(byte [] row, byte [] family, byte [] qualifier, CompareOp compareOp, + ByteArrayComparable comparator, Mutation mutation, boolean writeToWAL) throws IOException; + + /** + * Atomically checks if a row/family/qualifier value matches the expected val + * If it does, it performs the row mutations. If the passed value is null, t + * is for the lack of column (ie: non-existence) + * @param row to check + * @param family column family to check + * @param qualifier column qualifier to check + * @param compareOp the comparison operator + * @param comparator + * @param mutations + * @param writeToWAL + * @return true if mutation was applied, false otherwise + * @throws IOException + */ + boolean checkAndRowMutate(byte [] row, byte [] family, byte [] qualifier, CompareOp compareOp, + ByteArrayComparable comparator, RowMutations mutations, boolean writeToWAL) + throws IOException; + + /** + * Deletes the specified cells/row. + * @param delete + * @throws IOException + */ + void delete(Delete delete) throws IOException; + + /** + * Do a get based on the get parameter. + * @param get query parameters + * @return result of the operation + */ + Result get(Get get) throws IOException; + + /** + * Do a get based on the get parameter. + * @param get query parameters + * @param withCoprocessor invoke coprocessor or not. We don't want to + * always invoke cp. + * @return list of cells resulting from the operation + */ + List get(Get get, boolean withCoprocessor) throws IOException; + + /** + * Return all the data for the row that matches row exactly, + * or the one that immediately preceeds it, at or immediately before + * ts. + * @param row + * @param family + * @return result of the operation + * @throws IOException + */ + Result getClosestRowBefore(byte[] row, byte[] family) throws IOException; + + /** + * Return an iterator that scans over the HRegion, returning the indicated + * columns and rows specified by the {@link Scan}. + *

      + * This Iterator must be closed by the caller. + * + * @param scan configured {@link Scan} + * @return RegionScanner + * @throws IOException read exceptions + */ + RegionScanner getScanner(Scan scan) throws IOException; + + /** + * Perform one or more increment operations on a row. + * @param increment + * @param nonceGroup + * @param nonce + * @return result of the operation + * @throws IOException + */ + Result increment(Increment increment, long nonceGroup, long nonce) throws IOException; + + /** + * Performs multiple mutations atomically on a single row. Currently + * {@link Put} and {@link Delete} are supported. + * + * @param mutations object that specifies the set of mutations to perform atomically + * @throws IOException + */ + void mutateRow(RowMutations mutations) throws IOException; + + /** + * Perform atomic mutations within the region. + * + * @param mutations The list of mutations to perform. + * mutations can contain operations for multiple rows. + * Caller has to ensure that all rows are contained in this region. + * @param rowsToLock Rows to lock + * @param nonceGroup Optional nonce group of the operation (client Id) + * @param nonce Optional nonce of the operation (unique random id to ensure "more idempotence") + * If multiple rows are locked care should be taken that + * rowsToLock is sorted in order to avoid deadlocks. + * @throws IOException + */ + void mutateRowsWithLocks(Collection mutations, Collection rowsToLock, + long nonceGroup, long nonce) throws IOException; + + /** + * Performs atomic multiple reads and writes on a given row. + * + * @param processor The object defines the reads and writes to a row. + */ + void processRowsWithLocks(RowProcessor processor) throws IOException; + + /** + * Performs atomic multiple reads and writes on a given row. + * + * @param processor The object defines the reads and writes to a row. + * @param nonceGroup Optional nonce group of the operation (client Id) + * @param nonce Optional nonce of the operation (unique random id to ensure "more idempotence") + */ + void processRowsWithLocks(RowProcessor processor, long nonceGroup, long nonce) + throws IOException; + + /** + * Performs atomic multiple reads and writes on a given row. + * + * @param processor The object defines the reads and writes to a row. + * @param timeout The timeout of the processor.process() execution + * Use a negative number to switch off the time bound + * @param nonceGroup Optional nonce group of the operation (client Id) + * @param nonce Optional nonce of the operation (unique random id to ensure "more idempotence") + */ + void processRowsWithLocks(RowProcessor processor, long timeout, long nonceGroup, long nonce) + throws IOException; + + /** + * Puts some data in the table. + * @param put + * @throws IOException + */ + void put(Put put) throws IOException; + + /** + * Listener class to enable callers of + * bulkLoadHFile() to perform any necessary + * pre/post processing of a given bulkload call + */ + interface BulkLoadListener { + + /** + * Called before an HFile is actually loaded + * @param family family being loaded to + * @param srcPath path of HFile + * @return final path to be used for actual loading + * @throws IOException + */ + String prepareBulkLoad(byte[] family, String srcPath) throws IOException; + + /** + * Called after a successful HFile load + * @param family family being loaded to + * @param srcPath path of HFile + * @throws IOException + */ + void doneBulkLoad(byte[] family, String srcPath) throws IOException; + + /** + * Called after a failed HFile load + * @param family family being loaded to + * @param srcPath path of HFile + * @throws IOException + */ + void failedBulkLoad(byte[] family, String srcPath) throws IOException; + } + + /** + * Attempts to atomically load a group of hfiles. This is critical for loading + * rows with multiple column families atomically. + * + * @param familyPaths List of Pair + * @param bulkLoadListener Internal hooks enabling massaging/preparation of a + * file about to be bulk loaded + * @param assignSeqId + * @return true if successful, false if failed recoverably + * @throws IOException if failed unrecoverably. + */ + boolean bulkLoadHFiles(Collection> familyPaths, boolean assignSeqId, + BulkLoadListener bulkLoadListener) throws IOException; + + /////////////////////////////////////////////////////////////////////////// + // Coprocessors + + /** @return the coprocessor host */ + RegionCoprocessorHost getCoprocessorHost(); + + /** + * Executes a single protocol buffer coprocessor endpoint {@link Service} method using + * the registered protocol handlers. {@link Service} implementations must be registered via the + * {@link Region#registerService(com.google.protobuf.Service)} + * method before they are available. + * + * @param controller an {@code RpcContoller} implementation to pass to the invoked service + * @param call a {@code CoprocessorServiceCall} instance identifying the service, method, + * and parameters for the method invocation + * @return a protocol buffer {@code Message} instance containing the method's result + * @throws IOException if no registered service handler is found or an error + * occurs during the invocation + * @see org.apache.hadoop.hbase.regionserver.Region#registerService(com.google.protobuf.Service) + */ + Message execService(RpcController controller, CoprocessorServiceCall call) throws IOException; + + /** + * Registers a new protocol buffer {@link Service} subclass as a coprocessor endpoint to + * be available for handling + * {@link Region#execService(com.google.protobuf.RpcController, + * org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceCall)}} calls. + * + *

      + * Only a single instance may be registered per region for a given {@link Service} subclass (the + * instances are keyed on {@link com.google.protobuf.Descriptors.ServiceDescriptor#getFullName()}. + * After the first registration, subsequent calls with the same service name will fail with + * a return value of {@code false}. + *

      + * @param instance the {@code Service} subclass instance to expose as a coprocessor endpoint + * @return {@code true} if the registration was successful, {@code false} + * otherwise + */ + boolean registerService(Service instance); + + /////////////////////////////////////////////////////////////////////////// + // RowMutation processor support + + /** + * Check the collection of families for validity. + * @param families + * @throws NoSuchColumnFamilyException + */ + void checkFamilies(Collection families) throws NoSuchColumnFamilyException; + + /** + * Check the collection of families for valid timestamps + * @param familyMap + * @param now current timestamp + * @throws FailedSanityCheckException + */ + void checkTimestamps(Map> familyMap, long now) + throws FailedSanityCheckException; + + /** + * Prepare a delete for a row mutation processor + * @param delete The passed delete is modified by this method. WARNING! + * @throws IOException + */ + void prepareDelete(Delete delete) throws IOException; + + /** + * Set up correct timestamps in the KVs in Delete object. + *

      Caller should have the row and region locks. + * @param mutation + * @param familyCellMap + * @param now + * @throws IOException + */ + void prepareDeleteTimestamps(Mutation mutation, Map> familyCellMap, + byte[] now) throws IOException; + + /** + * Replace any cell timestamps set to {@link org.apache.hadoop.hbase.HConstants#LATEST_TIMESTAMP} + * provided current timestamp. + * @param values + * @param now + */ + void updateCellTimestamps(final Iterable> values, final byte[] now) + throws IOException; + + /////////////////////////////////////////////////////////////////////////// + // Flushes, compactions, splits, etc. + // Wizards only, please + + interface FlushResult { + enum Result { + FLUSHED_NO_COMPACTION_NEEDED, + FLUSHED_COMPACTION_NEEDED, + // Special case where a flush didn't run because there's nothing in the memstores. Used when + // bulk loading to know when we can still load even if a flush didn't happen. + CANNOT_FLUSH_MEMSTORE_EMPTY, + CANNOT_FLUSH + } + + /** @return the detailed result code */ + Result getResult(); + + /** @return true if the memstores were flushed, else false */ + boolean isFlushSucceeded(); + + /** @return True if the flush requested a compaction, else false */ + boolean isCompactionNeeded(); + } + + /** + * Flush the cache. + * + *

      When this method is called the cache will be flushed unless: + *

        + *
      1. the cache is empty
      2. + *
      3. the region is closed.
      4. + *
      5. a flush is already in progress
      6. + *
      7. writes are disabled
      8. + *
      + * + *

      This method may block for some time, so it should not be called from a + * time-sensitive thread. + * @param force whether we want to force a flush of all stores + * @return FlushResult indicating whether the flush was successful or not and if + * the region needs compacting + * + * @throws IOException general io exceptions + * @throws DroppedSnapshotException Thrown when abort is required + * because a snapshot was not properly persisted. + */ + FlushResult flush(boolean force) throws IOException; + + /** + * Synchronously compact all stores in the region. + *

      This operation could block for a long time, so don't call it from a + * time-sensitive thread. + *

      Note that no locks are taken to prevent possible conflicts between + * compaction and splitting activities. The regionserver does not normally compact + * and split in parallel. However by calling this method you may introduce + * unexpected and unhandled concurrency. Don't do this unless you know what + * you are doing. + * + * @param majorCompaction True to force a major compaction regardless of thresholds + * @throws IOException + */ + void compact(final boolean majorCompaction) throws IOException; + + /** + * Trigger major compaction on all stores in the region. + *

      + * Compaction will be performed asynchronously to this call by the RegionServer's + * CompactSplitThread. See also {@link Store#triggerMajorCompaction()} + * @throws IOException + */ + void triggerMajorCompaction() throws IOException; + + /** + * @return if a given region is in compaction now. + */ + CompactionState getCompactionState(); + + /** Wait for all current flushes and compactions of the region to complete */ + void waitForFlushesAndCompactions(); + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java index a32a478edb5..6e239520dc6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java @@ -77,7 +77,7 @@ import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; import org.apache.hadoop.hbase.io.Reference; import org.apache.hadoop.hbase.io.hfile.CacheConfig; -import org.apache.hadoop.hbase.regionserver.HRegion.Operation; +import org.apache.hadoop.hbase.regionserver.Region.Operation; import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; import org.apache.hadoop.hbase.regionserver.wal.HLogKey; import org.apache.hadoop.hbase.wal.WALKey; @@ -88,7 +88,7 @@ import org.apache.hadoop.hbase.util.Pair; /** * Implements the coprocessor environment and runtime support for coprocessors - * loaded within a {@link HRegion}. + * loaded within a {@link Region}. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @InterfaceStability.Evolving @@ -106,7 +106,7 @@ public class RegionCoprocessorHost static class RegionEnvironment extends CoprocessorHost.Environment implements RegionCoprocessorEnvironment { - private HRegion region; + private Region region; private RegionServerServices rsServices; ConcurrentMap sharedData; private static final int LATENCY_BUFFER_SIZE = 100; @@ -121,7 +121,7 @@ public class RegionCoprocessorHost * @param priority chaining priority */ public RegionEnvironment(final Coprocessor impl, final int priority, - final int seq, final Configuration conf, final HRegion region, + final int seq, final Configuration conf, final Region region, final RegionServerServices services, final ConcurrentMap sharedData) { super(impl, priority, seq, conf); this.region = region; @@ -139,7 +139,7 @@ public class RegionCoprocessorHost /** @return the region */ @Override - public HRegion getRegion() { + public Region getRegion() { return region; } @@ -209,7 +209,7 @@ public class RegionCoprocessorHost /** The region server services */ RegionServerServices rsServices; /** The region */ - HRegion region; + Region region; /** * Constructor @@ -217,7 +217,7 @@ public class RegionCoprocessorHost * @param rsServices interface to available region server functionality * @param conf the configuration */ - public RegionCoprocessorHost(final HRegion region, + public RegionCoprocessorHost(final Region region, final RegionServerServices rsServices, final Configuration conf) { super(rsServices); this.conf = conf; @@ -258,8 +258,9 @@ public class RegionCoprocessorHost key + ", spec: " + spec); continue; } - int priority = matcher.group(3).trim().isEmpty() ? - Coprocessor.PRIORITY_USER : Integer.valueOf(matcher.group(3)); + String priorityStr = matcher.group(3).trim(); + int priority = priorityStr.isEmpty() ? + Coprocessor.PRIORITY_USER : Integer.parseInt(priorityStr); String cfgSpec = null; try { cfgSpec = matcher.group(4); @@ -328,6 +329,14 @@ public class RegionCoprocessorHost } void loadTableCoprocessors(final Configuration conf) { + boolean coprocessorsEnabled = conf.getBoolean(COPROCESSORS_ENABLED_CONF_KEY, + DEFAULT_COPROCESSORS_ENABLED); + boolean tableCoprocessorsEnabled = conf.getBoolean(USER_COPROCESSORS_ENABLED_CONF_KEY, + DEFAULT_USER_COPROCESSORS_ENABLED); + if (!(coprocessorsEnabled && tableCoprocessorsEnabled)) { + return; + } + // scan the table attributes for coprocessor load specifications // initialize the coprocessors List configured = new ArrayList(); @@ -698,7 +707,7 @@ public class RegionCoprocessorHost * @param r the new right-hand daughter region * @throws IOException */ - public void postSplit(final HRegion l, final HRegion r) throws IOException { + public void postSplit(final Region l, final Region r) throws IOException { execOperation(coprocessors.isEmpty() ? null : new RegionOperation() { @Override public void call(RegionObserver oserver, ObserverContext ctx) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeRequest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeRequest.java index 5226a984a39..534d01df36f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeRequest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeRequest.java @@ -42,10 +42,10 @@ class RegionMergeRequest implements Runnable { private final boolean forcible; private TableLock tableLock; - RegionMergeRequest(HRegion a, HRegion b, HRegionServer hrs, boolean forcible) { + RegionMergeRequest(Region a, Region b, HRegionServer hrs, boolean forcible) { Preconditions.checkNotNull(hrs); - this.region_a = a; - this.region_b = b; + this.region_a = (HRegion)a; + this.region_b = (HRegion)b; this.server = hrs; this.forcible = forcible; } @@ -65,13 +65,14 @@ class RegionMergeRequest implements Runnable { } try { final long startTime = EnvironmentEdgeManager.currentTime(); - RegionMergeTransaction mt = new RegionMergeTransaction(region_a, + RegionMergeTransactionImpl mt = new RegionMergeTransactionImpl(region_a, region_b, forcible); //acquire a shared read lock on the table, so that table schema modifications //do not happen concurrently tableLock = server.getTableLockManager().readLock(region_a.getTableDesc().getTableName() - , "MERGE_REGIONS:" + region_a.getRegionNameAsString() + ", " + region_b.getRegionNameAsString()); + , "MERGE_REGIONS:" + region_a.getRegionInfo().getRegionNameAsString() + ", " + + region_b.getRegionInfo().getRegionNameAsString()); try { tableLock.acquire(); } catch (IOException ex) { @@ -134,7 +135,7 @@ class RegionMergeRequest implements Runnable { LOG.error("Could not release the table lock (something is really wrong). " + "Aborting this server to avoid holding the lock forever."); this.server.abort("Abort; we got an error when releasing the table lock " - + "on " + region_a.getRegionNameAsString()); + + "on " + region_a.getRegionInfo().getRegionNameAsString()); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java index d478bfe518b..72f0e892fe2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java @@ -13,35 +13,20 @@ * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitationsME + * License for the specific language governing permissions and limitations * under the License. */ package org.apache.hadoop.hbase.regionserver; import java.io.IOException; -import java.util.ArrayList; import java.util.List; -import java.util.ListIterator; -import java.util.Map; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.regionserver.SplitTransaction.TransactionListener; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.MetaMutationAnnotation; -import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.Server; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.client.Delete; -import org.apache.hadoop.hbase.client.Mutation; -import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode; -import org.apache.hadoop.hbase.regionserver.SplitTransaction.LoggingProgressable; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.apache.hadoop.hbase.util.Pair; /** * Executes region merge as a "transaction". It is similar with @@ -50,12 +35,21 @@ import org.apache.hadoop.hbase.util.Pair; * transaction and {@link #rollback(Server, RegionServerServices)} to cleanup if * execute fails. * - *

      - * Here is an example of how you would use this class: - * + *

      Here is an example of how you would use this interface: *

      - *  RegionMergeTransaction mt = new RegionMergeTransaction(this.conf, parent, midKey)
      - *  if (!mt.prepare(services)) return;
      + *  RegionMergeTransactionFactory factory = new RegionMergeTransactionFactory(conf);
      + *  RegionMergeTransaction mt = factory.create(parent, midKey)
      + *    .registerTransactionListener(new TransactionListener() {
      + *       public void transition(RegionMergeTransaction transaction,
      + *         RegionMergeTransactionPhase from, RegionMergeTransactionPhase to) throws IOException {
      + *         // ...
      + *       }
      + *       public void rollback(RegionMergeTransaction transaction,
      + *         RegionMergeTransactionPhase from, RegionMergeTransactionPhase to) {
      + *         // ...
      + *       }
      + *    });
      + *  if (!mt.prepare()) return;
        *  try {
        *    mt.execute(server, services);
        *  } catch (IOException ioe) {
      @@ -63,33 +57,25 @@ import org.apache.hadoop.hbase.util.Pair;
        *      mt.rollback(server, services);
        *      return;
        *    } catch (RuntimeException e) {
      - *      myAbortable.abort("Failed merge, abort");
      + *      // abort the server
        *    }
        *  }
        * 
      - *

      - * This class is not thread safe. Caller needs ensure merge is run by one thread - * only. + *

      A merge transaction is not thread safe. Callers must ensure a split is run by + * one thread only. */ -@InterfaceAudience.Private -public class RegionMergeTransaction { - private static final Log LOG = LogFactory.getLog(RegionMergeTransaction.class); - - // Merged region info - private HRegionInfo mergedRegionInfo; - // region_a sorts before region_b - private final HRegion region_a; - private final HRegion region_b; - // merges dir is under region_a - private final Path mergesdir; - // We only merge adjacent regions if forcible is false - private final boolean forcible; - +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) +@InterfaceStability.Evolving +public interface RegionMergeTransaction { /** - * Types to add to the transaction journal. Each enum is a step in the merge - * transaction. Used to figure how much we need to rollback. + * Each enum is a step in the merge transaction. */ - enum JournalEntry { + enum RegionMergeTransactionPhase { + STARTED, + /** + * Prepared + */ + PREPARED, /** * Set region as in transition, set it into MERGING state. */ @@ -122,94 +108,59 @@ public class RegionMergeTransaction { * Point of no return. If we got here, then transaction is not recoverable * other than by crashing out the regionserver. */ - PONR - } - - /* - * Journal of how far the merge transaction has progressed. - */ - private final List journal = new ArrayList(); - - private static IOException closedByOtherException = new IOException( - "Failed to close region: already closed by another thread"); - - private RegionServerCoprocessorHost rsCoprocessorHost = null; - - /** - * Constructor - * @param a region a to merge - * @param b region b to merge - * @param forcible if false, we will only merge adjacent regions - */ - public RegionMergeTransaction(final HRegion a, final HRegion b, - final boolean forcible) { - if (a.getRegionInfo().compareTo(b.getRegionInfo()) <= 0) { - this.region_a = a; - this.region_b = b; - } else { - this.region_a = b; - this.region_b = a; - } - this.forcible = forcible; - this.mergesdir = region_a.getRegionFileSystem().getMergesDir(); + PONR, + /** + * Completed + */ + COMPLETED } /** - * Does checks on merge inputs. + * Split transaction journal entry + */ + public interface JournalEntry { + + /** @return the completed phase marked by this journal entry */ + RegionMergeTransactionPhase getPhase(); + + /** @return the time of phase completion */ + long getTimeStamp(); + } + + /** + * Split transaction listener + */ + public interface TransactionListener { + + /** + * Invoked when transitioning forward from one transaction phase to another + * @param transaction the transaction + * @param from the current phase + * @param to the next phase + * @throws IOException listener can throw this to abort + */ + void transition(RegionMergeTransaction transaction, RegionMergeTransactionPhase from, + RegionMergeTransactionPhase to) throws IOException; + + /** + * Invoked when rolling back a transaction from one transaction phase to the + * previous + * @param transaction the transaction + * @param from the current phase + * @param to the previous phase + */ + void rollback(RegionMergeTransaction transaction, RegionMergeTransactionPhase from, + RegionMergeTransactionPhase to); + } + + /** + * Check merge inputs and prepare the transaction. * @param services * @return true if the regions are mergeable else * false if they are not (e.g. its already closed, etc.). + * @throws IOException */ - public boolean prepare(final RegionServerServices services) { - if (!region_a.getTableDesc().getTableName() - .equals(region_b.getTableDesc().getTableName())) { - LOG.info("Can't merge regions " + region_a + "," + region_b - + " because they do not belong to the same table"); - return false; - } - if (region_a.getRegionInfo().equals(region_b.getRegionInfo())) { - LOG.info("Can't merge the same region " + region_a); - return false; - } - if (!forcible && !HRegionInfo.areAdjacent(region_a.getRegionInfo(), - region_b.getRegionInfo())) { - String msg = "Skip merging " + this.region_a.getRegionNameAsString() - + " and " + this.region_b.getRegionNameAsString() - + ", because they are not adjacent."; - LOG.info(msg); - return false; - } - if (!this.region_a.isMergeable() || !this.region_b.isMergeable()) { - return false; - } - try { - boolean regionAHasMergeQualifier = hasMergeQualifierInMeta(services, - region_a.getRegionName()); - if (regionAHasMergeQualifier || - hasMergeQualifierInMeta(services, region_b.getRegionName())) { - LOG.debug("Region " + (regionAHasMergeQualifier ? region_a.getRegionNameAsString() - : region_b.getRegionNameAsString()) - + " is not mergeable because it has merge qualifier in META"); - return false; - } - } catch (IOException e) { - LOG.warn("Failed judging whether merge transaction is available for " - + region_a.getRegionNameAsString() + " and " - + region_b.getRegionNameAsString(), e); - return false; - } - - // WARN: make sure there is no parent region of the two merging regions in - // hbase:meta If exists, fixing up daughters would cause daughter regions(we - // have merged one) online again when we restart master, so we should clear - // the parent region to prevent the above case - // Since HBASE-7721, we don't need fix up daughters any more. so here do - // nothing - - this.mergedRegionInfo = getMergedRegionInfo(region_a.getRegionInfo(), - region_b.getRegionInfo()); - return true; - } + boolean prepare(RegionServerServices services) throws IOException; /** * Run the transaction. @@ -221,325 +172,10 @@ public class RegionMergeTransaction { * @throws IOException * @see #rollback(Server, RegionServerServices) */ - public HRegion execute(final Server server, - final RegionServerServices services) throws IOException { - if (rsCoprocessorHost == null) { - rsCoprocessorHost = server != null ? - ((HRegionServer) server).getRegionServerCoprocessorHost() : null; - } - HRegion mergedRegion = createMergedRegion(server, services); - if (rsCoprocessorHost != null) { - rsCoprocessorHost.postMergeCommit(this.region_a, this.region_b, mergedRegion); - } - return stepsAfterPONR(server, services, mergedRegion); - } - - public HRegion stepsAfterPONR(final Server server, final RegionServerServices services, - HRegion mergedRegion) throws IOException { - openMergedRegion(server, services, mergedRegion); - if (rsCoprocessorHost != null) { - rsCoprocessorHost.postMerge(this.region_a, this.region_b, mergedRegion); - } - return mergedRegion; - } - - /** - * Prepare the merged region and region files. - * @param server Hosting server instance. Can be null when testing - * @param services Used to online/offline regions. - * @return merged region - * @throws IOException If thrown, transaction failed. Call - * {@link #rollback(Server, RegionServerServices)} - */ - HRegion createMergedRegion(final Server server, - final RegionServerServices services) throws IOException { - LOG.info("Starting merge of " + region_a + " and " - + region_b.getRegionNameAsString() + ", forcible=" + forcible); - if ((server != null && server.isStopped()) - || (services != null && services.isStopping())) { - throw new IOException("Server is stopped or stopping"); - } - - if (rsCoprocessorHost != null) { - if (rsCoprocessorHost.preMerge(this.region_a, this.region_b)) { - throw new IOException("Coprocessor bypassing regions " + this.region_a + " " - + this.region_b + " merge."); - } - } - - // If true, no cluster to write meta edits to or to use coordination. - boolean testing = server == null ? true : server.getConfiguration() - .getBoolean("hbase.testing.nocluster", false); - - HRegion mergedRegion = stepsBeforePONR(server, services, testing); - - @MetaMutationAnnotation - List metaEntries = new ArrayList(); - if (rsCoprocessorHost != null) { - if (rsCoprocessorHost.preMergeCommit(this.region_a, this.region_b, metaEntries)) { - throw new IOException("Coprocessor bypassing regions " + this.region_a + " " - + this.region_b + " merge."); - } - try { - for (Mutation p : metaEntries) { - HRegionInfo.parseRegionName(p.getRow()); - } - } catch (IOException e) { - LOG.error("Row key of mutation from coprocessor is not parsable as region name." - + "Mutations from coprocessor should only be for hbase:meta table.", e); - throw e; - } - } - - // This is the point of no return. Similar with SplitTransaction. - // IF we reach the PONR then subsequent failures need to crash out this - // regionserver - this.journal.add(JournalEntry.PONR); - - // Add merged region and delete region_a and region_b - // as an atomic update. See HBASE-7721. This update to hbase:meta makes the region - // will determine whether the region is merged or not in case of failures. - // If it is successful, master will roll-forward, if not, master will - // rollback - if (services != null && !services.reportRegionStateTransition(TransitionCode.MERGE_PONR, - mergedRegionInfo, region_a.getRegionInfo(), region_b.getRegionInfo())) { - // Passed PONR, let SSH clean it up - throw new IOException("Failed to notify master that merge passed PONR: " - + region_a.getRegionInfo().getRegionNameAsString() + " and " - + region_b.getRegionInfo().getRegionNameAsString()); - } - return mergedRegion; - } - - public void prepareMutationsForMerge(HRegionInfo mergedRegion, HRegionInfo regionA, - HRegionInfo regionB, ServerName serverName, List mutations) throws IOException { - HRegionInfo copyOfMerged = new HRegionInfo(mergedRegion); - - // Put for parent - Put putOfMerged = MetaTableAccessor.makePutFromRegionInfo(copyOfMerged); - putOfMerged.add(HConstants.CATALOG_FAMILY, HConstants.MERGEA_QUALIFIER, regionA.toByteArray()); - putOfMerged.add(HConstants.CATALOG_FAMILY, HConstants.MERGEB_QUALIFIER, regionB.toByteArray()); - mutations.add(putOfMerged); - // Deletes for merging regions - Delete deleteA = MetaTableAccessor.makeDeleteFromRegionInfo(regionA); - Delete deleteB = MetaTableAccessor.makeDeleteFromRegionInfo(regionB); - mutations.add(deleteA); - mutations.add(deleteB); - // The merged is a new region, openSeqNum = 1 is fine. - addLocation(putOfMerged, serverName, 1); - } - - public Put addLocation(final Put p, final ServerName sn, long openSeqNum) { - p.add(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER, Bytes - .toBytes(sn.getHostAndPort())); - p.add(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER, Bytes.toBytes(sn - .getStartcode())); - p.add(HConstants.CATALOG_FAMILY, HConstants.SEQNUM_QUALIFIER, Bytes.toBytes(openSeqNum)); - return p; - } - - public HRegion stepsBeforePONR(final Server server, final RegionServerServices services, - boolean testing) throws IOException { - if (services != null && !services.reportRegionStateTransition(TransitionCode.READY_TO_MERGE, - mergedRegionInfo, region_a.getRegionInfo(), region_b.getRegionInfo())) { - throw new IOException("Failed to get ok from master to merge " - + region_a.getRegionInfo().getRegionNameAsString() + " and " - + region_b.getRegionInfo().getRegionNameAsString()); - } - this.journal.add(JournalEntry.SET_MERGING); - - this.region_a.getRegionFileSystem().createMergesDir(); - this.journal.add(JournalEntry.CREATED_MERGE_DIR); - - Map> hstoreFilesOfRegionA = closeAndOfflineRegion( - services, this.region_a, true, testing); - Map> hstoreFilesOfRegionB = closeAndOfflineRegion( - services, this.region_b, false, testing); - - assert hstoreFilesOfRegionA != null && hstoreFilesOfRegionB != null; - - - // - // mergeStoreFiles creates merged region dirs under the region_a merges dir - // Nothing to unroll here if failure -- clean up of CREATE_MERGE_DIR will - // clean this up. - mergeStoreFiles(hstoreFilesOfRegionA, hstoreFilesOfRegionB); - - // Log to the journal that we are creating merged region. We could fail - // halfway through. If we do, we could have left - // stuff in fs that needs cleanup -- a storefile or two. Thats why we - // add entry to journal BEFORE rather than AFTER the change. - this.journal.add(JournalEntry.STARTED_MERGED_REGION_CREATION); - HRegion mergedRegion = createMergedRegionFromMerges(this.region_a, - this.region_b, this.mergedRegionInfo); - return mergedRegion; - } - - /** - * Create a merged region from the merges directory under region a. In order - * to mock it for tests, place it with a new method. - * @param a hri of region a - * @param b hri of region b - * @param mergedRegion hri of merged region - * @return merged HRegion. - * @throws IOException - */ - HRegion createMergedRegionFromMerges(final HRegion a, final HRegion b, - final HRegionInfo mergedRegion) throws IOException { - return a.createMergedRegionFromMerges(mergedRegion, b); - } - - /** - * Close the merging region and offline it in regionserver - * @param services - * @param region - * @param isRegionA true if it is merging region a, false if it is region b - * @param testing true if it is testing - * @return a map of family name to list of store files - * @throws IOException - */ - private Map> closeAndOfflineRegion( - final RegionServerServices services, final HRegion region, - final boolean isRegionA, final boolean testing) throws IOException { - Map> hstoreFilesToMerge = null; - Exception exceptionToThrow = null; - try { - hstoreFilesToMerge = region.close(false); - } catch (Exception e) { - exceptionToThrow = e; - } - if (exceptionToThrow == null && hstoreFilesToMerge == null) { - // The region was closed by a concurrent thread. We can't continue - // with the merge, instead we must just abandon the merge. If we - // reopen or merge this could cause problems because the region has - // probably already been moved to a different server, or is in the - // process of moving to a different server. - exceptionToThrow = closedByOtherException; - } - if (exceptionToThrow != closedByOtherException) { - this.journal.add(isRegionA ? JournalEntry.CLOSED_REGION_A - : JournalEntry.CLOSED_REGION_B); - } - if (exceptionToThrow != null) { - if (exceptionToThrow instanceof IOException) - throw (IOException) exceptionToThrow; - throw new IOException(exceptionToThrow); - } - - if (!testing) { - services.removeFromOnlineRegions(region, null); - } - this.journal.add(isRegionA ? JournalEntry.OFFLINED_REGION_A - : JournalEntry.OFFLINED_REGION_B); - return hstoreFilesToMerge; - } - - /** - * Get merged region info through the specified two regions - * @param a merging region A - * @param b merging region B - * @return the merged region info - */ - public static HRegionInfo getMergedRegionInfo(final HRegionInfo a, - final HRegionInfo b) { - long rid = EnvironmentEdgeManager.currentTime(); - // Regionid is timestamp. Merged region's id can't be less than that of - // merging regions else will insert at wrong location in hbase:meta - if (rid < a.getRegionId() || rid < b.getRegionId()) { - LOG.warn("Clock skew; merging regions id are " + a.getRegionId() - + " and " + b.getRegionId() + ", but current time here is " + rid); - rid = Math.max(a.getRegionId(), b.getRegionId()) + 1; - } - - byte[] startKey = null; - byte[] endKey = null; - // Choose the smaller as start key - if (a.compareTo(b) <= 0) { - startKey = a.getStartKey(); - } else { - startKey = b.getStartKey(); - } - // Choose the bigger as end key - if (Bytes.equals(a.getEndKey(), HConstants.EMPTY_BYTE_ARRAY) - || (!Bytes.equals(b.getEndKey(), HConstants.EMPTY_BYTE_ARRAY) - && Bytes.compareTo(a.getEndKey(), b.getEndKey()) > 0)) { - endKey = a.getEndKey(); - } else { - endKey = b.getEndKey(); - } - - // Merged region is sorted between two merging regions in META - HRegionInfo mergedRegionInfo = new HRegionInfo(a.getTable(), startKey, - endKey, false, rid); - return mergedRegionInfo; - } - - /** - * Perform time consuming opening of the merged region. - * @param server Hosting server instance. Can be null when testing - * @param services Used to online/offline regions. - * @param merged the merged region - * @throws IOException If thrown, transaction failed. Call - * {@link #rollback(Server, RegionServerServices)} - */ - void openMergedRegion(final Server server, - final RegionServerServices services, HRegion merged) throws IOException { - boolean stopped = server != null && server.isStopped(); - boolean stopping = services != null && services.isStopping(); - if (stopped || stopping) { - LOG.info("Not opening merged region " + merged.getRegionNameAsString() - + " because stopping=" + stopping + ", stopped=" + stopped); - return; - } - HRegionInfo hri = merged.getRegionInfo(); - LoggingProgressable reporter = server == null ? null - : new LoggingProgressable(hri, server.getConfiguration().getLong( - "hbase.regionserver.regionmerge.open.log.interval", 10000)); - merged.openHRegion(reporter); - - if (services != null) { - if (!services.reportRegionStateTransition(TransitionCode.MERGED, - mergedRegionInfo, region_a.getRegionInfo(), region_b.getRegionInfo())) { - throw new IOException("Failed to report merged region to master: " - + mergedRegionInfo.getShortNameToLog()); - } - services.addToOnlineRegions(merged); - } - } - - /** - * Create reference file(s) of merging regions under the region_a merges dir - * @param hstoreFilesOfRegionA - * @param hstoreFilesOfRegionB - * @throws IOException - */ - private void mergeStoreFiles( - Map> hstoreFilesOfRegionA, - Map> hstoreFilesOfRegionB) - throws IOException { - // Create reference file(s) of region A in mergdir - HRegionFileSystem fs_a = this.region_a.getRegionFileSystem(); - for (Map.Entry> entry : hstoreFilesOfRegionA - .entrySet()) { - String familyName = Bytes.toString(entry.getKey()); - for (StoreFile storeFile : entry.getValue()) { - fs_a.mergeStoreFile(this.mergedRegionInfo, familyName, storeFile, - this.mergesdir); - } - } - // Create reference file(s) of region B in mergedir - HRegionFileSystem fs_b = this.region_b.getRegionFileSystem(); - for (Map.Entry> entry : hstoreFilesOfRegionB - .entrySet()) { - String familyName = Bytes.toString(entry.getKey()); - for (StoreFile storeFile : entry.getValue()) { - fs_b.mergeStoreFile(this.mergedRegionInfo, familyName, storeFile, - this.mergesdir); - } - } - } + Region execute(Server server, RegionServerServices services) throws IOException; /** + * Roll back a failed transaction * @param server Hosting server instance (May be null when testing). * @param services Services of regionserver, used to online regions. * @throws IOException If thrown, rollback failed. Take drastic action. @@ -547,123 +183,37 @@ public class RegionMergeTransaction { * of no return and so now need to abort the server to minimize * damage. */ - @SuppressWarnings("deprecation") - public boolean rollback(final Server server, - final RegionServerServices services) throws IOException { - assert this.mergedRegionInfo != null; - // Coprocessor callback - if (rsCoprocessorHost != null) { - rsCoprocessorHost.preRollBackMerge(this.region_a, this.region_b); - } - - boolean result = true; - ListIterator iterator = this.journal - .listIterator(this.journal.size()); - // Iterate in reverse. - while (iterator.hasPrevious()) { - JournalEntry je = iterator.previous(); - switch (je) { - - case SET_MERGING: - if (services != null - && !services.reportRegionStateTransition(TransitionCode.MERGE_REVERTED, - mergedRegionInfo, region_a.getRegionInfo(), region_b.getRegionInfo())) { - return false; - } - break; - - case CREATED_MERGE_DIR: - this.region_a.writestate.writesEnabled = true; - this.region_b.writestate.writesEnabled = true; - this.region_a.getRegionFileSystem().cleanupMergesDir(); - break; - - case CLOSED_REGION_A: - try { - // So, this returns a seqid but if we just closed and then reopened, - // we should be ok. On close, we flushed using sequenceid obtained - // from hosting regionserver so no need to propagate the sequenceid - // returned out of initialize below up into regionserver as we - // normally do. - this.region_a.initialize(); - } catch (IOException e) { - LOG.error("Failed rollbacking CLOSED_REGION_A of region " - + this.region_a.getRegionNameAsString(), e); - throw new RuntimeException(e); - } - break; - - case OFFLINED_REGION_A: - if (services != null) - services.addToOnlineRegions(this.region_a); - break; - - case CLOSED_REGION_B: - try { - this.region_b.initialize(); - } catch (IOException e) { - LOG.error("Failed rollbacking CLOSED_REGION_A of region " - + this.region_b.getRegionNameAsString(), e); - throw new RuntimeException(e); - } - break; - - case OFFLINED_REGION_B: - if (services != null) - services.addToOnlineRegions(this.region_b); - break; - - case STARTED_MERGED_REGION_CREATION: - this.region_a.getRegionFileSystem().cleanupMergedRegion( - this.mergedRegionInfo); - break; - - case PONR: - // We got to the point-of-no-return so we need to just abort. Return - // immediately. Do not clean up created merged regions. - return false; - - default: - throw new RuntimeException("Unhandled journal entry: " + je); - } - } - // Coprocessor callback - if (rsCoprocessorHost != null) { - rsCoprocessorHost.postRollBackMerge(this.region_a, this.region_b); - } - - return result; - } - - HRegionInfo getMergedRegionInfo() { - return this.mergedRegionInfo; - } - - // For unit testing. - Path getMergesDir() { - return this.mergesdir; - } + boolean rollback(Server server, RegionServerServices services) throws IOException; /** - * Checks if the given region has merge qualifier in hbase:meta - * @param services - * @param regionName name of specified region - * @return true if the given region has merge qualifier in META.(It will be - * cleaned by CatalogJanitor) - * @throws IOException + * Register a listener for transaction preparation, execution, and possibly + * rollback phases. + *

      A listener can abort a transaction by throwing an exception. + * @param listener the listener + * @return 'this' for chaining */ - boolean hasMergeQualifierInMeta(final RegionServerServices services, - final byte[] regionName) throws IOException { - if (services == null) return false; - // Get merge regions if it is a merged region and already has merge - // qualifier - Pair mergeRegions = MetaTableAccessor - .getRegionsFromMergeQualifier(services.getConnection(), regionName); - if (mergeRegions != null && - (mergeRegions.getFirst() != null || mergeRegions.getSecond() != null)) { - // It has merge qualifier - return true; - } - return false; - } + RegionMergeTransaction registerTransactionListener(TransactionListener listener); + + /** @return merged region info */ + HRegionInfo getMergedRegionInfo(); + + /** + * Get the journal for the transaction. + *

      Journal entries are an opaque type represented as JournalEntry. They can + * also provide useful debugging information via their toString method. + * @return the transaction journal + */ + List getJournal(); + + /** + * Get the Server running the transaction or rollback + * @return server instance + */ + Server getServer(); + + /** + * Get the RegonServerServices of the server running the transaction or rollback + * @return region server services + */ + RegionServerServices getRegionServerServices(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransactionFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransactionFactory.java new file mode 100644 index 00000000000..c844d547296 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransactionFactory.java @@ -0,0 +1,76 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import org.apache.hadoop.conf.Configurable; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.util.ReflectionUtils; + +/** + * A factory for creating RegionMergeTransactions, which execute region split as a "transaction". + * See {@link RegionMergeTransactionImpl} + */ +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) +@InterfaceStability.Evolving +public class RegionMergeTransactionFactory implements Configurable { + + public static final String MERGE_TRANSACTION_IMPL_KEY = + "hbase.regionserver.merge.transaction.impl"; + + private Configuration conf; + + public RegionMergeTransactionFactory(Configuration conf) { + this.conf = conf; + } + + @Override + public Configuration getConf() { + return conf; + } + + @Override + public void setConf(Configuration conf) { + this.conf = conf; + } + + /** + * Create a merge transaction + * @param a region a to merge + * @param b region b to merge + * @param forcible if false, we will only merge adjacent regions + * @return transaction instance + */ + public RegionMergeTransactionImpl create(final Region a, final Region b, + final boolean forcible) { + // The implementation class must extend RegionMergeTransactionImpl, not only + // implement the RegionMergeTransaction interface like you might expect, + // because various places such as AssignmentManager use static methods + // from RegionMergeTransactionImpl. Whatever we use for implementation must + // be compatible, so it's safest to require ? extends RegionMergeTransactionImpl. + // If not compatible we will throw a runtime exception from here. + return ReflectionUtils.instantiateWithCustomCtor( + conf.getClass(MERGE_TRANSACTION_IMPL_KEY, RegionMergeTransactionImpl.class, + RegionMergeTransactionImpl.class).getName(), + new Class[] { Region.class, Region.class, boolean.class }, + new Object[] { a, b, forcible }); + } + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransactionImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransactionImpl.java new file mode 100644 index 00000000000..cd0542512ab --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransactionImpl.java @@ -0,0 +1,702 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitationsME + * under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.ListIterator; +import java.util.Map; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.MetaMutationAnnotation; +import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.Mutation; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode; +import org.apache.hadoop.hbase.regionserver.SplitTransactionImpl.LoggingProgressable; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.util.Pair; + +import com.google.common.annotations.VisibleForTesting; + +@InterfaceAudience.Private +public class RegionMergeTransactionImpl implements RegionMergeTransaction { + private static final Log LOG = LogFactory.getLog(RegionMergeTransactionImpl.class); + + // Merged region info + private HRegionInfo mergedRegionInfo; + // region_a sorts before region_b + private final HRegion region_a; + private final HRegion region_b; + // merges dir is under region_a + private final Path mergesdir; + // We only merge adjacent regions if forcible is false + private final boolean forcible; + + /* + * Transaction state for listener, only valid during execute and + * rollback + */ + private RegionMergeTransactionPhase currentPhase = RegionMergeTransactionPhase.STARTED; + private Server server; + private RegionServerServices rsServices; + + public static class JournalEntryImpl implements JournalEntry { + private RegionMergeTransactionPhase type; + private long timestamp; + + public JournalEntryImpl(RegionMergeTransactionPhase type) { + this(type, EnvironmentEdgeManager.currentTime()); + } + + public JournalEntryImpl(RegionMergeTransactionPhase type, long timestamp) { + this.type = type; + this.timestamp = timestamp; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(type); + sb.append(" at "); + sb.append(timestamp); + return sb.toString(); + } + + @Override + public RegionMergeTransactionPhase getPhase() { + return type; + } + + @Override + public long getTimeStamp() { + return timestamp; + } + } + + /* + * Journal of how far the merge transaction has progressed. + */ + private final List journal = new ArrayList(); + + /** + * Listeners + */ + private final ArrayList listeners = new ArrayList(); + + private static IOException closedByOtherException = new IOException( + "Failed to close region: already closed by another thread"); + + private RegionServerCoprocessorHost rsCoprocessorHost = null; + + /** + * Constructor + * @param a region a to merge + * @param b region b to merge + * @param forcible if false, we will only merge adjacent regions + */ + public RegionMergeTransactionImpl(final Region a, final Region b, + final boolean forcible) { + if (a.getRegionInfo().compareTo(b.getRegionInfo()) <= 0) { + this.region_a = (HRegion)a; + this.region_b = (HRegion)b; + } else { + this.region_a = (HRegion)b; + this.region_b = (HRegion)a; + } + this.forcible = forcible; + this.mergesdir = region_a.getRegionFileSystem().getMergesDir(); + } + + private void transition(RegionMergeTransactionPhase nextPhase) throws IOException { + transition(nextPhase, false); + } + + private void transition(RegionMergeTransactionPhase nextPhase, boolean isRollback) + throws IOException { + if (!isRollback) { + // Add to the journal first, because if the listener throws an exception + // we need to roll back starting at 'nextPhase' + this.journal.add(new JournalEntryImpl(nextPhase)); + } + for (int i = 0; i < listeners.size(); i++) { + TransactionListener listener = listeners.get(i); + if (!isRollback) { + listener.transition(this, currentPhase, nextPhase); + } else { + listener.rollback(this, currentPhase, nextPhase); + } + } + currentPhase = nextPhase; + } + + @Override + public boolean prepare(final RegionServerServices services) throws IOException { + if (!region_a.getTableDesc().getTableName() + .equals(region_b.getTableDesc().getTableName())) { + LOG.info("Can't merge regions " + region_a + "," + region_b + + " because they do not belong to the same table"); + return false; + } + if (region_a.getRegionInfo().equals(region_b.getRegionInfo())) { + LOG.info("Can't merge the same region " + region_a); + return false; + } + if (!forcible && !HRegionInfo.areAdjacent(region_a.getRegionInfo(), + region_b.getRegionInfo())) { + String msg = "Skip merging " + region_a.getRegionInfo().getRegionNameAsString() + + " and " + region_b.getRegionInfo().getRegionNameAsString() + + ", because they are not adjacent."; + LOG.info(msg); + return false; + } + if (!this.region_a.isMergeable() || !this.region_b.isMergeable()) { + return false; + } + try { + boolean regionAHasMergeQualifier = hasMergeQualifierInMeta(services, + region_a.getRegionInfo().getRegionName()); + if (regionAHasMergeQualifier || + hasMergeQualifierInMeta(services, region_b.getRegionInfo().getRegionName())) { + LOG.debug("Region " + (regionAHasMergeQualifier ? + region_a.getRegionInfo().getRegionNameAsString() + : region_b.getRegionInfo().getRegionNameAsString()) + + " is not mergeable because it has merge qualifier in META"); + return false; + } + } catch (IOException e) { + LOG.warn("Failed judging whether merge transaction is available for " + + region_a.getRegionInfo().getRegionNameAsString() + " and " + + region_b.getRegionInfo().getRegionNameAsString(), e); + return false; + } + + // WARN: make sure there is no parent region of the two merging regions in + // hbase:meta If exists, fixing up daughters would cause daughter regions(we + // have merged one) online again when we restart master, so we should clear + // the parent region to prevent the above case + // Since HBASE-7721, we don't need fix up daughters any more. so here do + // nothing + + this.mergedRegionInfo = getMergedRegionInfo(region_a.getRegionInfo(), + region_b.getRegionInfo()); + + transition(RegionMergeTransactionPhase.PREPARED); + return true; + } + + @Override + public Region execute(final Server server, final RegionServerServices services) + throws IOException { + this.server = server; + this.rsServices = services; + if (rsCoprocessorHost == null) { + rsCoprocessorHost = server != null ? + ((HRegionServer) server).getRegionServerCoprocessorHost() : null; + } + HRegion mergedRegion = createMergedRegion(server, services); + if (rsCoprocessorHost != null) { + rsCoprocessorHost.postMergeCommit(this.region_a, this.region_b, mergedRegion); + } + stepsAfterPONR(server, services, mergedRegion); + + transition(RegionMergeTransactionPhase.COMPLETED); + + return mergedRegion; + } + + @VisibleForTesting + public void stepsAfterPONR(final Server server, final RegionServerServices services, + HRegion mergedRegion) throws IOException { + openMergedRegion(server, services, mergedRegion); + if (rsCoprocessorHost != null) { + rsCoprocessorHost.postMerge(this.region_a, this.region_b, mergedRegion); + } + } + + /** + * Prepare the merged region and region files. + * @param server Hosting server instance. Can be null when testing + * @param services Used to online/offline regions. + * @return merged region + * @throws IOException If thrown, transaction failed. Call + * {@link #rollback(Server, RegionServerServices)} + */ + private HRegion createMergedRegion(final Server server, final RegionServerServices services) + throws IOException { + LOG.info("Starting merge of " + region_a + " and " + + region_b.getRegionInfo().getRegionNameAsString() + ", forcible=" + forcible); + if ((server != null && server.isStopped()) + || (services != null && services.isStopping())) { + throw new IOException("Server is stopped or stopping"); + } + + if (rsCoprocessorHost != null) { + if (rsCoprocessorHost.preMerge(this.region_a, this.region_b)) { + throw new IOException("Coprocessor bypassing regions " + this.region_a + " " + + this.region_b + " merge."); + } + } + + // If true, no cluster to write meta edits to or to use coordination. + boolean testing = server == null ? true : server.getConfiguration() + .getBoolean("hbase.testing.nocluster", false); + + HRegion mergedRegion = stepsBeforePONR(server, services, testing); + + @MetaMutationAnnotation + List metaEntries = new ArrayList(); + if (rsCoprocessorHost != null) { + if (rsCoprocessorHost.preMergeCommit(this.region_a, this.region_b, metaEntries)) { + throw new IOException("Coprocessor bypassing regions " + this.region_a + " " + + this.region_b + " merge."); + } + try { + for (Mutation p : metaEntries) { + HRegionInfo.parseRegionName(p.getRow()); + } + } catch (IOException e) { + LOG.error("Row key of mutation from coprocessor is not parsable as region name." + + "Mutations from coprocessor should only be for hbase:meta table.", e); + throw e; + } + } + + // This is the point of no return. Similar with SplitTransaction. + // IF we reach the PONR then subsequent failures need to crash out this + // regionserver + transition(RegionMergeTransactionPhase.PONR); + + // Add merged region and delete region_a and region_b + // as an atomic update. See HBASE-7721. This update to hbase:meta makes the region + // will determine whether the region is merged or not in case of failures. + // If it is successful, master will roll-forward, if not, master will + // rollback + if (services != null && !services.reportRegionStateTransition(TransitionCode.MERGE_PONR, + mergedRegionInfo, region_a.getRegionInfo(), region_b.getRegionInfo())) { + // Passed PONR, let SSH clean it up + throw new IOException("Failed to notify master that merge passed PONR: " + + region_a.getRegionInfo().getRegionNameAsString() + " and " + + region_b.getRegionInfo().getRegionNameAsString()); + } + return mergedRegion; + } + + @VisibleForTesting + public void prepareMutationsForMerge(HRegionInfo mergedRegion, HRegionInfo regionA, + HRegionInfo regionB, ServerName serverName, List mutations) throws IOException { + HRegionInfo copyOfMerged = new HRegionInfo(mergedRegion); + + // Put for parent + Put putOfMerged = MetaTableAccessor.makePutFromRegionInfo(copyOfMerged); + putOfMerged.add(HConstants.CATALOG_FAMILY, HConstants.MERGEA_QUALIFIER, + regionA.toByteArray()); + putOfMerged.add(HConstants.CATALOG_FAMILY, HConstants.MERGEB_QUALIFIER, + regionB.toByteArray()); + mutations.add(putOfMerged); + // Deletes for merging regions + Delete deleteA = MetaTableAccessor.makeDeleteFromRegionInfo(regionA); + Delete deleteB = MetaTableAccessor.makeDeleteFromRegionInfo(regionB); + mutations.add(deleteA); + mutations.add(deleteB); + // The merged is a new region, openSeqNum = 1 is fine. + addLocation(putOfMerged, serverName, 1); + } + + @VisibleForTesting + Put addLocation(final Put p, final ServerName sn, long openSeqNum) { + p.add(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER, Bytes + .toBytes(sn.getHostAndPort())); + p.add(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER, Bytes.toBytes(sn + .getStartcode())); + p.add(HConstants.CATALOG_FAMILY, HConstants.SEQNUM_QUALIFIER, Bytes.toBytes(openSeqNum)); + return p; + } + + @VisibleForTesting + public HRegion stepsBeforePONR(final Server server, final RegionServerServices services, + boolean testing) throws IOException { + if (services != null && !services.reportRegionStateTransition(TransitionCode.READY_TO_MERGE, + mergedRegionInfo, region_a.getRegionInfo(), region_b.getRegionInfo())) { + throw new IOException("Failed to get ok from master to merge " + + region_a.getRegionInfo().getRegionNameAsString() + " and " + + region_b.getRegionInfo().getRegionNameAsString()); + } + + transition(RegionMergeTransactionPhase.SET_MERGING); + + this.region_a.getRegionFileSystem().createMergesDir(); + + transition(RegionMergeTransactionPhase.CREATED_MERGE_DIR); + + Map> hstoreFilesOfRegionA = closeAndOfflineRegion( + services, this.region_a, true, testing); + Map> hstoreFilesOfRegionB = closeAndOfflineRegion( + services, this.region_b, false, testing); + + assert hstoreFilesOfRegionA != null && hstoreFilesOfRegionB != null; + + // mergeStoreFiles creates merged region dirs under the region_a merges dir + // Nothing to unroll here if failure -- clean up of CREATE_MERGE_DIR will + // clean this up. + mergeStoreFiles(hstoreFilesOfRegionA, hstoreFilesOfRegionB); + + // Log to the journal that we are creating merged region. We could fail + // halfway through. If we do, we could have left + // stuff in fs that needs cleanup -- a storefile or two. Thats why we + // add entry to journal BEFORE rather than AFTER the change. + + transition(RegionMergeTransactionPhase.STARTED_MERGED_REGION_CREATION); + + HRegion mergedRegion = createMergedRegionFromMerges(this.region_a, + this.region_b, this.mergedRegionInfo); + return mergedRegion; + } + + /** + * Create a merged region from the merges directory under region a. In order + * to mock it for tests, place it with a new method. + * @param a hri of region a + * @param b hri of region b + * @param mergedRegion hri of merged region + * @return merged HRegion. + * @throws IOException + */ + @VisibleForTesting + HRegion createMergedRegionFromMerges(final HRegion a, final HRegion b, + final HRegionInfo mergedRegion) throws IOException { + return a.createMergedRegionFromMerges(mergedRegion, b); + } + + /** + * Close the merging region and offline it in regionserver + * @param services + * @param region + * @param isRegionA true if it is merging region a, false if it is region b + * @param testing true if it is testing + * @return a map of family name to list of store files + * @throws IOException + */ + private Map> closeAndOfflineRegion( + final RegionServerServices services, final HRegion region, + final boolean isRegionA, final boolean testing) throws IOException { + Map> hstoreFilesToMerge = null; + Exception exceptionToThrow = null; + try { + hstoreFilesToMerge = region.close(false); + } catch (Exception e) { + exceptionToThrow = e; + } + if (exceptionToThrow == null && hstoreFilesToMerge == null) { + // The region was closed by a concurrent thread. We can't continue + // with the merge, instead we must just abandon the merge. If we + // reopen or merge this could cause problems because the region has + // probably already been moved to a different server, or is in the + // process of moving to a different server. + exceptionToThrow = closedByOtherException; + } + if (exceptionToThrow != closedByOtherException) { + transition(isRegionA ? RegionMergeTransactionPhase.CLOSED_REGION_A + : RegionMergeTransactionPhase.CLOSED_REGION_B); + } + if (exceptionToThrow != null) { + if (exceptionToThrow instanceof IOException) + throw (IOException) exceptionToThrow; + throw new IOException(exceptionToThrow); + } + if (!testing) { + services.removeFromOnlineRegions(region, null); + } + + transition(isRegionA ? RegionMergeTransactionPhase.OFFLINED_REGION_A + : RegionMergeTransactionPhase.OFFLINED_REGION_B); + + return hstoreFilesToMerge; + } + + /** + * Get merged region info through the specified two regions + * @param a merging region A + * @param b merging region B + * @return the merged region info + */ + @VisibleForTesting + static HRegionInfo getMergedRegionInfo(final HRegionInfo a, final HRegionInfo b) { + long rid = EnvironmentEdgeManager.currentTime(); + // Regionid is timestamp. Merged region's id can't be less than that of + // merging regions else will insert at wrong location in hbase:meta + if (rid < a.getRegionId() || rid < b.getRegionId()) { + LOG.warn("Clock skew; merging regions id are " + a.getRegionId() + + " and " + b.getRegionId() + ", but current time here is " + rid); + rid = Math.max(a.getRegionId(), b.getRegionId()) + 1; + } + + byte[] startKey = null; + byte[] endKey = null; + // Choose the smaller as start key + if (a.compareTo(b) <= 0) { + startKey = a.getStartKey(); + } else { + startKey = b.getStartKey(); + } + // Choose the bigger as end key + if (Bytes.equals(a.getEndKey(), HConstants.EMPTY_BYTE_ARRAY) + || (!Bytes.equals(b.getEndKey(), HConstants.EMPTY_BYTE_ARRAY) + && Bytes.compareTo(a.getEndKey(), b.getEndKey()) > 0)) { + endKey = a.getEndKey(); + } else { + endKey = b.getEndKey(); + } + + // Merged region is sorted between two merging regions in META + HRegionInfo mergedRegionInfo = new HRegionInfo(a.getTable(), startKey, + endKey, false, rid); + return mergedRegionInfo; + } + + /** + * Perform time consuming opening of the merged region. + * @param server Hosting server instance. Can be null when testing + * @param services Used to online/offline regions. + * @param merged the merged region + * @throws IOException If thrown, transaction failed. Call + * {@link #rollback(Server, RegionServerServices)} + */ + @VisibleForTesting + void openMergedRegion(final Server server, final RegionServerServices services, + HRegion merged) throws IOException { + boolean stopped = server != null && server.isStopped(); + boolean stopping = services != null && services.isStopping(); + if (stopped || stopping) { + LOG.info("Not opening merged region " + merged.getRegionInfo().getRegionNameAsString() + + " because stopping=" + stopping + ", stopped=" + stopped); + return; + } + HRegionInfo hri = merged.getRegionInfo(); + LoggingProgressable reporter = server == null ? null + : new LoggingProgressable(hri, server.getConfiguration().getLong( + "hbase.regionserver.regionmerge.open.log.interval", 10000)); + merged.openHRegion(reporter); + + if (services != null) { + if (!services.reportRegionStateTransition(TransitionCode.MERGED, + mergedRegionInfo, region_a.getRegionInfo(), region_b.getRegionInfo())) { + throw new IOException("Failed to report merged region to master: " + + mergedRegionInfo.getShortNameToLog()); + } + services.addToOnlineRegions(merged); + } + } + + /** + * Create reference file(s) of merging regions under the region_a merges dir + * @param hstoreFilesOfRegionA + * @param hstoreFilesOfRegionB + * @throws IOException + */ + private void mergeStoreFiles( + Map> hstoreFilesOfRegionA, + Map> hstoreFilesOfRegionB) + throws IOException { + // Create reference file(s) of region A in mergdir + HRegionFileSystem fs_a = this.region_a.getRegionFileSystem(); + for (Map.Entry> entry : hstoreFilesOfRegionA + .entrySet()) { + String familyName = Bytes.toString(entry.getKey()); + for (StoreFile storeFile : entry.getValue()) { + fs_a.mergeStoreFile(this.mergedRegionInfo, familyName, storeFile, + this.mergesdir); + } + } + // Create reference file(s) of region B in mergedir + HRegionFileSystem fs_b = this.region_b.getRegionFileSystem(); + for (Map.Entry> entry : hstoreFilesOfRegionB + .entrySet()) { + String familyName = Bytes.toString(entry.getKey()); + for (StoreFile storeFile : entry.getValue()) { + fs_b.mergeStoreFile(this.mergedRegionInfo, familyName, storeFile, + this.mergesdir); + } + } + } + + @Override + public boolean rollback(final Server server, + final RegionServerServices services) throws IOException { + assert this.mergedRegionInfo != null; + this.server = server; + this.rsServices = services; + // Coprocessor callback + if (rsCoprocessorHost != null) { + rsCoprocessorHost.preRollBackMerge(this.region_a, this.region_b); + } + + boolean result = true; + ListIterator iterator = this.journal + .listIterator(this.journal.size()); + // Iterate in reverse. + while (iterator.hasPrevious()) { + JournalEntry je = iterator.previous(); + + transition(je.getPhase(), true); + + switch (je.getPhase()) { + + case SET_MERGING: + if (services != null + && !services.reportRegionStateTransition(TransitionCode.MERGE_REVERTED, + mergedRegionInfo, region_a.getRegionInfo(), region_b.getRegionInfo())) { + return false; + } + break; + + case CREATED_MERGE_DIR: + this.region_a.writestate.writesEnabled = true; + this.region_b.writestate.writesEnabled = true; + this.region_a.getRegionFileSystem().cleanupMergesDir(); + break; + + case CLOSED_REGION_A: + try { + // So, this returns a seqid but if we just closed and then reopened, + // we should be ok. On close, we flushed using sequenceid obtained + // from hosting regionserver so no need to propagate the sequenceid + // returned out of initialize below up into regionserver as we + // normally do. + this.region_a.initialize(); + } catch (IOException e) { + LOG.error("Failed rollbacking CLOSED_REGION_A of region " + + region_a.getRegionInfo().getRegionNameAsString(), e); + throw new RuntimeException(e); + } + break; + + case OFFLINED_REGION_A: + if (services != null) + services.addToOnlineRegions(this.region_a); + break; + + case CLOSED_REGION_B: + try { + this.region_b.initialize(); + } catch (IOException e) { + LOG.error("Failed rollbacking CLOSED_REGION_A of region " + + region_b.getRegionInfo().getRegionNameAsString(), e); + throw new RuntimeException(e); + } + break; + + case OFFLINED_REGION_B: + if (services != null) + services.addToOnlineRegions(this.region_b); + break; + + case STARTED_MERGED_REGION_CREATION: + this.region_a.getRegionFileSystem().cleanupMergedRegion( + this.mergedRegionInfo); + break; + + case PONR: + // We got to the point-of-no-return so we need to just abort. Return + // immediately. Do not clean up created merged regions. + return false; + + // Informational states only + case STARTED: + case PREPARED: + case COMPLETED: + break; + + default: + throw new RuntimeException("Unhandled journal entry: " + je); + } + } + // Coprocessor callback + if (rsCoprocessorHost != null) { + rsCoprocessorHost.postRollBackMerge(this.region_a, this.region_b); + } + + return result; + } + + @Override + public HRegionInfo getMergedRegionInfo() { + return this.mergedRegionInfo; + } + + @VisibleForTesting + Path getMergesDir() { + return this.mergesdir; + } + + /** + * Checks if the given region has merge qualifier in hbase:meta + * @param services + * @param regionName name of specified region + * @return true if the given region has merge qualifier in META.(It will be + * cleaned by CatalogJanitor) + * @throws IOException + */ + @VisibleForTesting + boolean hasMergeQualifierInMeta(final RegionServerServices services, final byte[] regionName) + throws IOException { + if (services == null) return false; + // Get merge regions if it is a merged region and already has merge + // qualifier + Pair mergeRegions = MetaTableAccessor + .getRegionsFromMergeQualifier(services.getConnection(), regionName); + if (mergeRegions != null && + (mergeRegions.getFirst() != null || mergeRegions.getSecond() != null)) { + // It has merge qualifier + return true; + } + return false; + } + + @Override + public List getJournal() { + return journal; + } + + @Override + public RegionMergeTransaction registerTransactionListener(TransactionListener listener) { + listeners.add(listener); + return this; + } + + @Override + public Server getServer() { + return server; + } + + @Override + public RegionServerServices getRegionServerServices() { + return rsServices; + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScanner.java index ec68dc74806..66e087bfd58 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScanner.java @@ -21,17 +21,17 @@ package org.apache.hadoop.hbase.regionserver; import java.io.IOException; import java.util.List; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; /** * RegionScanner describes iterators over rows in an HRegion. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) -@InterfaceStability.Stable +@InterfaceStability.Evolving public interface RegionScanner extends InternalScanner { /** * @return The RegionInfo for this scanner. @@ -68,25 +68,30 @@ public interface RegionScanner extends InternalScanner { long getMvccReadPoint(); /** - * Grab the next row's worth of values with the default limit on the number of values - * to return. - * This is a special internal method to be called from coprocessor hooks to avoid expensive setup. - * Caller must set the thread's readpoint, start and close a region operation, an synchronize on the scanner object. - * Caller should maintain and update metrics. - * See {@link #nextRaw(List, int)} + * @return The limit on the number of cells to retrieve on each call to next(). See + * {@link org.apache.hadoop.hbase.client.Scan#setBatch(int)} + */ + int getBatch(); + + /** + * Grab the next row's worth of values. This is a special internal method to be called from + * coprocessor hooks to avoid expensive setup. Caller must set the thread's readpoint, start and + * close a region operation, an synchronize on the scanner object. Caller should maintain and + * update metrics. See {@link #nextRaw(List, ScannerContext)} * @param result return output array * @return true if more rows exist after this one, false if scanner is done * @throws IOException e */ boolean nextRaw(List result) throws IOException; - + /** - * Grab the next row's worth of values with a limit on the number of values - * to return. - * This is a special internal method to be called from coprocessor hooks to avoid expensive setup. - * Caller must set the thread's readpoint, start and close a region operation, an synchronize on the scanner object. - * Example: - *

      +   * Grab the next row's worth of values. The {@link ScannerContext} is used to enforce and track
      +   * any limits associated with this call. Any progress that exists in the {@link ScannerContext}
      +   * prior to calling this method will be LOST if {@link ScannerContext#getKeepProgress()} is false.
      +   * Upon returning from this method, the {@link ScannerContext} will contain information about the
      +   * progress made towards the limits. This is a special internal method to be called from
      +   * coprocessor hooks to avoid expensive setup. Caller must set the thread's readpoint, start and
      +   * close a region operation, an synchronize on the scanner object. Example: 
          * HRegion region = ...;
          * RegionScanner scanner = ...
          * MultiVersionConsistencyControl.setThreadReadPoint(scanner.getMvccReadPoint());
      @@ -102,9 +107,12 @@ public interface RegionScanner extends InternalScanner {
          * }
          * 
      * @param result return output array - * @param limit limit on row count to get + * @param scannerContext The {@link ScannerContext} instance encapsulating all limits that should + * be tracked during calls to this method. The progress towards these limits can be + * tracked within this instance. * @return true if more rows exist after this one, false if scanner is done * @throws IOException e */ - boolean nextRaw(List result, int limit) throws IOException; + boolean nextRaw(List result, ScannerContext scannerContext) + throws IOException; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.java index 43a3f32757a..ab8e948b35d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerCoprocessorHost.java @@ -22,6 +22,8 @@ import java.io.IOException; import java.util.Comparator; import java.util.List; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; @@ -44,6 +46,8 @@ import org.apache.hadoop.hbase.replication.ReplicationEndpoint; public class RegionServerCoprocessorHost extends CoprocessorHost { + private static final Log LOG = LogFactory.getLog(RegionServerCoprocessorHost.class); + private RegionServerServices rsServices; public RegionServerCoprocessorHost(RegionServerServices rsServices, @@ -51,7 +55,16 @@ public class RegionServerCoprocessorHost extends super(rsServices); this.rsServices = rsServices; this.conf = conf; - // load system default cp's from configuration. + // Log the state of coprocessor loading here; should appear only once or + // twice in the daemon log, depending on HBase version, because there is + // only one RegionServerCoprocessorHost instance in the RS process + boolean coprocessorsEnabled = conf.getBoolean(COPROCESSORS_ENABLED_CONF_KEY, + DEFAULT_COPROCESSORS_ENABLED); + boolean tableCoprocessorsEnabled = conf.getBoolean(USER_COPROCESSORS_ENABLED_CONF_KEY, + DEFAULT_USER_COPROCESSORS_ENABLED); + LOG.info("System coprocessor loading is " + (coprocessorsEnabled ? "enabled" : "disabled")); + LOG.info("Table coprocessor loading is " + + ((coprocessorsEnabled && tableCoprocessorsEnabled) ? "enabled" : "disabled")); loadSystemCoprocessors(conf, REGIONSERVER_COPROCESSOR_CONF_KEY); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java index c75ed53dadd..eaffa380e08 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java @@ -24,9 +24,11 @@ import java.util.Set; import java.util.concurrent.ConcurrentMap; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.ipc.RpcServerInterface; import org.apache.hadoop.hbase.master.TableLockManager; @@ -40,9 +42,9 @@ import com.google.protobuf.Service; /** * Services provided by {@link HRegionServer} */ -@InterfaceAudience.Private -public interface RegionServerServices - extends OnlineRegions, FavoredNodesForRegion { +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) +@InterfaceStability.Evolving +public interface RegionServerServices extends OnlineRegions, FavoredNodesForRegion { /** * @return True if this regionserver is stopping. */ @@ -85,8 +87,7 @@ public interface RegionServerServices * @throws KeeperException * @throws IOException */ - void postOpenDeployTasks(final HRegion r) - throws KeeperException, IOException; + void postOpenDeployTasks(final Region r) throws KeeperException, IOException; /** * Notify master that a handler requests to change a region state @@ -127,7 +128,7 @@ public interface RegionServerServices /** * @return set of recovering regions on the hosting region server */ - Map getRecoveringRegions(); + Map getRecoveringRegions(); /** * Only required for "old" log replay; if it's removed, remove this. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.java index ec7f9fe02ac..0e28ebbfafb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hbase.regionserver; import java.io.IOException; -import java.util.Map; +import java.util.List; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; @@ -74,11 +74,11 @@ public abstract class RegionSplitPolicy extends Configured { if (explicitSplitPoint != null) { return explicitSplitPoint; } - Map stores = region.getStores(); + List stores = region.getStores(); byte[] splitPointFromLargestStore = null; long largestStoreSize = 0; - for (Store s : stores.values()) { + for (Store s : stores) { byte[] splitPoint = s.getSplitPoint(); long storeSize = s.getSize(); if (splitPoint != null && largestStoreSize < storeSize) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationService.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationService.java index 92ac8236c72..25a27a90534 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationService.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReplicationService.java @@ -22,11 +22,12 @@ import java.io.IOException; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.replication.regionserver.ReplicationLoad; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; /** - * Gateway to Cluster Replication. + * Gateway to Cluster Replication. * Used by {@link org.apache.hadoop.hbase.regionserver.HRegionServer}. * One such application is a cross-datacenter * replication service that can keep two hbase clusters in sync. @@ -52,4 +53,9 @@ public interface ReplicationService { * Stops replication service. */ void stopReplicationService(); + + /** + * Refresh and Get ReplicationLoad + */ + public ReplicationLoad refreshAndGetReplicationLoad(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedMobStoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedMobStoreScanner.java index 4c46218d374..85be382c802 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedMobStoreScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedMobStoreScanner.java @@ -54,8 +54,8 @@ public class ReversedMobStoreScanner extends ReversedStoreScanner { * from the mob file as the result. */ @Override - public boolean next(List outResult, int limit) throws IOException { - boolean result = super.next(outResult, limit); + public boolean next(List outResult, ScannerContext ctx) throws IOException { + boolean result = super.next(outResult, ctx); if (!MobUtils.isRawMobScan(scan)) { // retrieve the mob data if (outResult.isEmpty()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java index c46da2a33a0..032b4ce1c5c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java @@ -22,6 +22,7 @@ package org.apache.hadoop.hbase.regionserver; import java.io.IOException; import java.util.NavigableSet; +import org.apache.hadoop.hbase.KeyValue.Type; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; @@ -204,9 +205,8 @@ public class ScanQueryMatcher { // We can share the ExplicitColumnTracker, diff is we reset // between rows, not between storefiles. - byte[] attr = scan.getAttribute(Scan.HINT_LOOKAHEAD); this.columns = new ExplicitColumnTracker(columns, scanInfo.getMinVersions(), maxVersions, - oldestUnexpiredTS, attr == null ? 0 : Bytes.toInt(attr)); + oldestUnexpiredTS); } this.isReversed = scan.isReversed(); } @@ -233,8 +233,9 @@ public class ScanQueryMatcher { * @throws IOException */ public ScanQueryMatcher(Scan scan, ScanInfo scanInfo, NavigableSet columns, - long readPointToUse, long earliestPutTs, long oldestUnexpiredTS, long now, byte[] dropDeletesFromRow, - byte[] dropDeletesToRow, RegionCoprocessorHost regionCoprocessorHost) throws IOException { + long readPointToUse, long earliestPutTs, long oldestUnexpiredTS, long now, + byte[] dropDeletesFromRow, byte[] dropDeletesToRow, + RegionCoprocessorHost regionCoprocessorHost) throws IOException { this(scan, scanInfo, columns, ScanType.COMPACT_RETAIN_DELETES, readPointToUse, earliestPutTs, oldestUnexpiredTS, now, regionCoprocessorHost); Preconditions.checkArgument((dropDeletesFromRow != null) && (dropDeletesToRow != null)); @@ -577,6 +578,41 @@ public class ScanQueryMatcher { null, 0, 0); } + /** + * @param nextIndexed the key of the next entry in the block index (if any) + * @param kv The Cell we're using to calculate the seek key + * @return result of the compare between the indexed key and the key portion of the passed cell + */ + public int compareKeyForNextRow(Cell nextIndexed, Cell kv) { + return rowComparator.compareKey(nextIndexed, + kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), + null, 0, 0, + null, 0, 0, + HConstants.OLDEST_TIMESTAMP, Type.Minimum.getCode()); + } + + /** + * @param nextIndexed the key of the next entry in the block index (if any) + * @param kv The Cell we're using to calculate the seek key + * @return result of the compare between the indexed key and the key portion of the passed cell + */ + public int compareKeyForNextColumn(Cell nextIndexed, Cell kv) { + ColumnCount nextColumn = columns.getColumnHint(); + if (nextColumn == null) { + return rowComparator.compareKey(nextIndexed, + kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), + kv.getFamilyArray(), kv.getFamilyOffset(), kv.getFamilyLength(), + kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength(), + HConstants.OLDEST_TIMESTAMP, Type.Minimum.getCode()); + } else { + return rowComparator.compareKey(nextIndexed, + kv.getRowArray(), kv.getRowOffset(), kv.getRowLength(), + kv.getFamilyArray(), kv.getFamilyOffset(), kv.getFamilyLength(), + nextColumn.getBuffer(), nextColumn.getOffset(), nextColumn.getLength(), + HConstants.LATEST_TIMESTAMP, Type.Maximum.getCode()); + } + } + //Used only for testing purposes static MatchCode checkColumn(ColumnTracker columnTracker, byte[] bytes, int offset, int length, long ttl, byte type, boolean ignoreCount) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java new file mode 100644 index 00000000000..6e487ca7a0c --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java @@ -0,0 +1,527 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + +/** + * ScannerContext instances encapsulate limit tracking AND progress towards those limits during + * invocations of {@link InternalScanner#next(java.util.List)} and + * {@link RegionScanner#next(java.util.List)}. + *

      + * A ScannerContext instance should be updated periodically throughout execution whenever progress + * towards a limit has been made. Each limit can be checked via the appropriate checkLimit method. + *

      + * Once a limit has been reached, the scan will stop. The invoker of + * {@link InternalScanner#next(java.util.List)} or {@link RegionScanner#next(java.util.List)} can + * use the appropriate check*Limit methods to see exactly which limits have been reached. + * Alternatively, {@link #checkAnyLimitReached(LimitScope)} is provided to see if ANY limit was + * reached + *

      + * {@link NoLimitScannerContext#NO_LIMIT} is an immutable static definition that can be used + * whenever a {@link ScannerContext} is needed but limits do not need to be enforced. + *

      + * NOTE: It is important that this class only ever expose setter methods that can be safely skipped + * when limits should be NOT enforced. This is because of the necessary immutability of the class + * {@link NoLimitScannerContext}. If a setter cannot be safely skipped, the immutable nature of + * {@link NoLimitScannerContext} will lead to incorrect behavior. + */ +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) +@InterfaceStability.Evolving +public class ScannerContext { + private final Log LOG = LogFactory.getLog(this.getClass()); + + /** + * Two sets of the same fields. One for the limits, another for the progress towards those limits + */ + LimitFields limits; + LimitFields progress; + + /** + * The state of the scanner after the invocation of {@link InternalScanner#next(java.util.List)} + * or {@link RegionScanner#next(java.util.List)}. + */ + NextState scannerState; + private static final NextState DEFAULT_STATE = NextState.MORE_VALUES; + + /** + * Used as an indication to invocations of {@link InternalScanner#next(java.util.List)} and + * {@link RegionScanner#next(java.util.List)} that, if true, the progress tracked within this + * {@link ScannerContext} instance should be considered while evaluating the limits. Useful for + * enforcing a set of limits across multiple calls (i.e. the limit may not be reached in a single + * invocation, but any progress made should be considered in future invocations) + *

      + * Defaulting this value to false means that, by default, any tracked progress will be wiped clean + * on invocations to {@link InternalScanner#next(java.util.List)} and + * {@link RegionScanner#next(java.util.List)} and the call will be treated as though no progress + * has been made towards the limits so far. + *

      + * This is an important mechanism. Users of Internal/Region scanners expect that they can define + * some limits and then repeatedly invoke {@link InternalScanner#next(List)} or + * {@link RegionScanner#next(List)} where each invocation respects these limits separately. + *

      + * For example:

      +   * ScannerContext context = new ScannerContext.newBuilder().setBatchLimit(5).build();
      +   * RegionScanner scanner = ...
      +   * List results = new ArrayList();
      +   * while(scanner.next(results, context)) {
      +   *   // Do something with a batch of 5 cells
      +   * }
      +   * 
      However, in the case of RPCs, the server wants to be able to define a set of + * limits for a particular RPC request and have those limits respected across multiple + * invocations. This means that the progress made towards the limits in earlier calls will be + * saved and considered in future invocations + */ + boolean keepProgress; + private static boolean DEFAULT_KEEP_PROGRESS = false; + + ScannerContext(boolean keepProgress, LimitFields limitsToCopy) { + this.limits = new LimitFields(); + if (limitsToCopy != null) this.limits.copy(limitsToCopy); + + // Progress fields are initialized to 0 + progress = new LimitFields(0, LimitFields.DEFAULT_SCOPE, 0); + + this.keepProgress = keepProgress; + this.scannerState = DEFAULT_STATE; + } + + /** + * @return true if the progress tracked so far in this instance will be considered during an + * invocation of {@link InternalScanner#next(java.util.List)} or + * {@link RegionScanner#next(java.util.List)}. false when the progress tracked so far + * should not be considered and should instead be wiped away via {@link #clearProgress()} + */ + boolean getKeepProgress() { + return keepProgress; + } + + void setKeepProgress(boolean keepProgress) { + this.keepProgress = keepProgress; + } + + /** + * Progress towards the batch limit has been made. Increment internal tracking of batch progress + */ + void incrementBatchProgress(int batch) { + int currentBatch = progress.getBatch(); + progress.setBatch(currentBatch + batch); + } + + /** + * Progress towards the size limit has been made. Increment internal tracking of size progress + */ + void incrementSizeProgress(long size) { + long currentSize = progress.getSize(); + progress.setSize(currentSize + size); + } + + int getBatchProgress() { + return progress.getBatch(); + } + + long getSizeProgress() { + return progress.getSize(); + } + + void setProgress(int batchProgress, long sizeProgress) { + setBatchProgress(batchProgress); + setSizeProgress(sizeProgress); + } + + void setSizeProgress(long sizeProgress) { + progress.setSize(sizeProgress); + } + + void setBatchProgress(int batchProgress) { + progress.setBatch(batchProgress); + } + + /** + * Clear away any progress that has been made so far. All progress fields are reset to initial + * values + */ + void clearProgress() { + progress.setFields(0, LimitFields.DEFAULT_SCOPE, 0); + } + + /** + * Note that this is not a typical setter. This setter returns the {@link NextState} that was + * passed in so that methods can be invoked against the new state. Furthermore, this pattern + * allows the {@link NoLimitScannerContext} to cleanly override this setter and simply return the + * new state, thus preserving the immutability of {@link NoLimitScannerContext} + * @param state + * @return The state that + */ + NextState setScannerState(NextState state) { + if (!NextState.isValidState(state)) { + throw new IllegalArgumentException("Cannot set to invalid state: " + state); + } + + this.scannerState = state; + return state; + } + + /** + * @return true when a partial result is formed. A partial result is formed when a limit is + * reached in the middle of a row. + */ + boolean partialResultFormed() { + return scannerState == NextState.SIZE_LIMIT_REACHED_MID_ROW; + } + + /** + * @param checkerScope + * @return true if the batch limit can be enforced in the checker's scope + */ + boolean hasBatchLimit(LimitScope checkerScope) { + return limits.canEnforceBatchLimitFromScope(checkerScope) && limits.getBatch() > 0; + } + + /** + * @param checkerScope + * @return true if the size limit can be enforced in the checker's scope + */ + boolean hasSizeLimit(LimitScope checkerScope) { + return limits.canEnforceSizeLimitFromScope(checkerScope) && limits.getSize() > 0; + } + + /** + * @param checkerScope + * @return true if any limit can be enforced within the checker's scope + */ + boolean hasAnyLimit(LimitScope checkerScope) { + return hasBatchLimit(checkerScope) || hasSizeLimit(checkerScope); + } + + /** + * @param scope The scope in which the size limit will be enforced + */ + void setSizeLimitScope(LimitScope scope) { + limits.setSizeScope(scope); + } + + int getBatchLimit() { + return limits.getBatch(); + } + + long getSizeLimit() { + return limits.getSize(); + } + + /** + * @param checkerScope The scope that the limit is being checked from + * @return true when the limit is enforceable from the checker's scope and it has been reached + */ + boolean checkBatchLimit(LimitScope checkerScope) { + return hasBatchLimit(checkerScope) && progress.getBatch() >= limits.getBatch(); + } + + /** + * @param checkerScope The scope that the limit is being checked from + * @return true when the limit is enforceable from the checker's scope and it has been reached + */ + boolean checkSizeLimit(LimitScope checkerScope) { + return hasSizeLimit(checkerScope) && progress.getSize() >= limits.getSize(); + } + + /** + * @param checkerScope The scope that the limits are being checked from + * @return true when some limit is enforceable from the checker's scope and it has been reached + */ + boolean checkAnyLimitReached(LimitScope checkerScope) { + return checkSizeLimit(checkerScope) || checkBatchLimit(checkerScope); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("{"); + + sb.append("limits:"); + sb.append(limits); + + sb.append(", progress:"); + sb.append(progress); + + sb.append(", keepProgress:"); + sb.append(keepProgress); + + sb.append(", state:"); + sb.append(scannerState); + + sb.append("}"); + return sb.toString(); + } + + public static Builder newBuilder() { + return new Builder(); + } + + public static Builder newBuilder(boolean keepProgress) { + return new Builder(keepProgress); + } + + public static final class Builder { + boolean keepProgress = DEFAULT_KEEP_PROGRESS; + LimitFields limits = new LimitFields(); + + private Builder() { + } + + private Builder(boolean keepProgress) { + this.keepProgress = keepProgress; + } + + public Builder setKeepProgress(boolean keepProgress) { + this.keepProgress = keepProgress; + return this; + } + + public Builder setSizeLimit(LimitScope sizeScope, long sizeLimit) { + limits.setSize(sizeLimit); + limits.setSizeScope(sizeScope); + return this; + } + + public Builder setBatchLimit(int batchLimit) { + limits.setBatch(batchLimit); + return this; + } + + public ScannerContext build() { + return new ScannerContext(keepProgress, limits); + } + } + + /** + * The possible states a scanner may be in following a call to {@link InternalScanner#next(List)} + */ + public enum NextState { + MORE_VALUES(true, false), + NO_MORE_VALUES(false, false), + SIZE_LIMIT_REACHED(true, true), + + /** + * Special case of size limit reached to indicate that the size limit was reached in the middle + * of a row and thus a partial results was formed + */ + SIZE_LIMIT_REACHED_MID_ROW(true, true), + BATCH_LIMIT_REACHED(true, true); + + private boolean moreValues; + private boolean limitReached; + + private NextState(boolean moreValues, boolean limitReached) { + this.moreValues = moreValues; + this.limitReached = limitReached; + } + + /** + * @return true when the state indicates that more values may follow those that have been + * returned + */ + public boolean hasMoreValues() { + return this.moreValues; + } + + /** + * @return true when the state indicates that a limit has been reached and scan should stop + */ + public boolean limitReached() { + return this.limitReached; + } + + public static boolean isValidState(NextState state) { + return state != null; + } + + public static boolean hasMoreValues(NextState state) { + return isValidState(state) && state.hasMoreValues(); + } + } + + /** + * The various scopes where a limit can be enforced. Used to differentiate when a limit should be + * enforced or not. + */ + public enum LimitScope { + /** + * Enforcing a limit between rows means that the limit will not be considered until all the + * cells for a particular row have been retrieved + */ + BETWEEN_ROWS(0), + + /** + * Enforcing a limit between cells means that the limit will be considered after each full cell + * has been retrieved + */ + BETWEEN_CELLS(1); + + /** + * When enforcing a limit, we must check that the scope is appropriate for enforcement. + *

      + * To communicate this concept, each scope has a depth. A limit will be enforced if the depth of + * the checker's scope is less than or equal to the limit's scope. This means that when checking + * limits, the checker must know their own scope (i.e. are they checking the limits between + * rows, between cells, etc...) + */ + int depth; + + LimitScope(int depth) { + this.depth = depth; + } + + int depth() { + return depth; + } + + /** + * @param checkerScope The scope in which the limit is being checked + * @return true when the checker is in a scope that indicates the limit can be enforced. Limits + * can be enforced from "higher or equal" scopes (i.e. the checker's scope is at a + * lesser depth than the limit) + */ + boolean canEnforceLimitFromScope(LimitScope checkerScope) { + return checkerScope != null && checkerScope.depth() <= depth; + } + } + + /** + * The different fields that can be used as limits in calls to + * {@link InternalScanner#next(java.util.List)} and {@link RegionScanner#next(java.util.List)} + */ + private static class LimitFields { + /** + * Default values of the limit fields. Defined such that if a field does NOT change from its + * default, it will not be enforced + */ + private static int DEFAULT_BATCH = -1; + private static long DEFAULT_SIZE = -1L; + + /** + * Default scope that is assigned to a limit if a scope is not specified. + */ + private static final LimitScope DEFAULT_SCOPE = LimitScope.BETWEEN_ROWS; + + // The batch limit will always be enforced between cells, thus, there isn't a field to hold the + // batch scope + int batch = DEFAULT_BATCH; + + LimitScope sizeScope = DEFAULT_SCOPE; + long size = DEFAULT_SIZE; + + /** + * Fields keep their default values. + */ + LimitFields() { + } + + LimitFields(int batch, LimitScope sizeScope, long size) { + setFields(batch, sizeScope, size); + } + + void copy(LimitFields limitsToCopy) { + if (limitsToCopy != null) { + setFields(limitsToCopy.getBatch(), limitsToCopy.getSizeScope(), limitsToCopy.getSize()); + } + } + + /** + * Set all fields together. + * @param batch + * @param sizeScope + * @param size + */ + void setFields(int batch, LimitScope sizeScope, long size) { + setBatch(batch); + setSizeScope(sizeScope); + setSize(size); + } + + int getBatch() { + return this.batch; + } + + void setBatch(int batch) { + this.batch = batch; + } + + /** + * @param checkerScope + * @return true when the limit can be enforced from the scope of the checker + */ + boolean canEnforceBatchLimitFromScope(LimitScope checkerScope) { + return LimitScope.BETWEEN_CELLS.canEnforceLimitFromScope(checkerScope); + } + + long getSize() { + return this.size; + } + + void setSize(long size) { + this.size = size; + } + + /** + * @return {@link LimitScope} indicating scope in which the size limit is enforced + */ + LimitScope getSizeScope() { + return this.sizeScope; + } + + /** + * Change the scope in which the size limit is enforced + */ + void setSizeScope(LimitScope scope) { + this.sizeScope = scope; + } + + /** + * @param checkerScope + * @return true when the limit can be enforced from the scope of the checker + */ + boolean canEnforceSizeLimitFromScope(LimitScope checkerScope) { + return this.sizeScope.canEnforceLimitFromScope(checkerScope); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("{"); + + sb.append("batch:"); + sb.append(batch); + + sb.append(", size:"); + sb.append(size); + + sb.append(", sizeScope:"); + sb.append(sizeScope); + + sb.append("}"); + return sb.toString(); + } + } +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java index 1dfe4adac4d..b1600c03130 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java @@ -42,9 +42,9 @@ class SplitRequest implements Runnable { private final HRegionServer server; private TableLock tableLock; - SplitRequest(HRegion region, byte[] midKey, HRegionServer hrs) { + SplitRequest(Region region, byte[] midKey, HRegionServer hrs) { Preconditions.checkNotNull(hrs); - this.parent = region; + this.parent = (HRegion)region; this.midKey = midKey; this.server = hrs; } @@ -64,12 +64,12 @@ class SplitRequest implements Runnable { boolean success = false; server.metricsRegionServer.incrSplitRequest(); long startTime = EnvironmentEdgeManager.currentTime(); - SplitTransaction st = new SplitTransaction(parent, midKey); + SplitTransactionImpl st = new SplitTransactionImpl(parent, midKey); try { //acquire a shared read lock on the table, so that table schema modifications //do not happen concurrently tableLock = server.getTableLockManager().readLock(parent.getTableDesc().getTableName() - , "SPLIT_REGION:" + parent.getRegionNameAsString()); + , "SPLIT_REGION:" + parent.getRegionInfo().getRegionNameAsString()); try { tableLock.acquire(); } catch (IOException ex) { @@ -87,22 +87,22 @@ class SplitRequest implements Runnable { if (this.server.isStopping() || this.server.isStopped()) { LOG.info( "Skip rollback/cleanup of failed split of " - + parent.getRegionNameAsString() + " because server is" + + parent.getRegionInfo().getRegionNameAsString() + " because server is" + (this.server.isStopping() ? " stopping" : " stopped"), e); return; } try { LOG.info("Running rollback/cleanup of failed split of " + - parent.getRegionNameAsString() + "; " + e.getMessage(), e); + parent.getRegionInfo().getRegionNameAsString() + "; " + e.getMessage(), e); if (st.rollback(this.server, this.server)) { LOG.info("Successful rollback of failed split of " + - parent.getRegionNameAsString()); + parent.getRegionInfo().getRegionNameAsString()); } else { this.server.abort("Abort; we got an error after point-of-no-return"); } } catch (RuntimeException ee) { String msg = "Failed rollback of failed split of " + - parent.getRegionNameAsString() + " -- aborting server"; + parent.getRegionInfo().getRegionNameAsString() + " -- aborting server"; // If failed rollback, kill this server to avoid having a hole in table. LOG.info(msg, ee); this.server.abort(msg + " -- Cause: " + ee.getMessage()); @@ -133,7 +133,7 @@ class SplitRequest implements Runnable { server.metricsRegionServer.incrSplitSuccess(); // Log success LOG.info("Region split, hbase:meta updated, and report to master. Parent=" - + parent.getRegionNameAsString() + ", new regions: " + + parent.getRegionInfo().getRegionNameAsString() + ", new regions: " + st.getFirstDaughter().getRegionNameAsString() + ", " + st.getSecondDaughter().getRegionNameAsString() + ". Split took " + StringUtils.formatTimeDiff(EnvironmentEdgeManager.currentTime(), startTime)); @@ -151,7 +151,7 @@ class SplitRequest implements Runnable { LOG.error("Could not release the table lock (something is really wrong). " + "Aborting this server to avoid holding the lock forever."); this.server.abort("Abort; we got an error when releasing the table lock " - + "on " + parent.getRegionNameAsString()); + + "on " + parent.getRegionInfo().getRegionNameAsString()); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java index dbcf0333a5d..a21c19d7b0e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,93 +18,62 @@ package org.apache.hadoop.hbase.regionserver; import java.io.IOException; -import java.io.InterruptedIOException; -import java.util.ArrayList; import java.util.List; -import java.util.ListIterator; -import java.util.Map; -import java.util.concurrent.Callable; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Executors; -import java.util.concurrent.Future; -import java.util.concurrent.ThreadFactory; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.Server; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.client.Mutation; -import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.CancelableProgressable; -import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.apache.hadoop.hbase.util.FSUtils; -import org.apache.hadoop.hbase.util.HasThread; -import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.util.PairOfSameType; -import org.apache.zookeeper.KeeperException; - -import com.google.common.util.concurrent.ThreadFactoryBuilder; /** * Executes region split as a "transaction". Call {@link #prepare()} to setup * the transaction, {@link #execute(Server, RegionServerServices)} to run the * transaction and {@link #rollback(Server, RegionServerServices)} to cleanup if execute fails. * - *

      Here is an example of how you would use this class: + *

      Here is an example of how you would use this interface: *

      - *  SplitTransaction st = new SplitTransaction(this.conf, parent, midKey)
      + *  SplitTransactionFactory factory = new SplitTransactionFactory(conf);
      + *  SplitTransaction st = factory.create(parent, midKey)
      + *    .registerTransactionListener(new TransactionListener() {
      + *       public void transition(SplitTransaction transaction, SplitTransactionPhase from,
      + *           SplitTransactionPhase to) throws IOException {
      + *         // ...
      + *       }
      + *       public void rollback(SplitTransaction transaction, SplitTransactionPhase from,
      + *           SplitTransactionPhase to) {
      + *         // ...
      + *       }
      + *    });
        *  if (!st.prepare()) return;
        *  try {
        *    st.execute(server, services);
      - *  } catch (IOException ioe) {
      + *  } catch (IOException e) {
        *    try {
        *      st.rollback(server, services);
        *      return;
        *    } catch (RuntimeException e) {
      - *      myAbortable.abort("Failed split, abort");
      + *      // abort the server
        *    }
        *  }
        * 
      - *

      This class is not thread safe. Caller needs ensure split is run by + *

      A split transaction is not thread safe. Callers must ensure a split is run by * one thread only. */ -@InterfaceAudience.Private -public class SplitTransaction { - private static final Log LOG = LogFactory.getLog(SplitTransaction.class); - - /* - * Region to split - */ - private final HRegion parent; - private HRegionInfo hri_a; - private HRegionInfo hri_b; - private long fileSplitTimeout = 30000; - - /* - * Row to split around - */ - private final byte [] splitrow; +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) +@InterfaceStability.Evolving +public interface SplitTransaction { /** - * Types to add to the transaction journal. - * Each enum is a step in the split transaction. Used to figure how much - * we need to rollback. + * Each enum is a step in the split transaction. */ - static enum JournalEntryType { + public enum SplitTransactionPhase { /** * Started */ STARTED, /** - * Prepared (after table lock) + * Prepared */ PREPARED, /** @@ -148,6 +116,12 @@ public class SplitTransaction { * Opened the second daughter region */ OPENED_REGION_B, + /** + * Point of no return. + * If we got here, then transaction is not recoverable other than by + * crashing out the regionserver. + */ + PONR, /** * Before postSplit coprocessor hook */ @@ -157,327 +131,60 @@ public class SplitTransaction { */ AFTER_POST_SPLIT_HOOK, /** - * Point of no return. - * If we got here, then transaction is not recoverable other than by - * crashing out the regionserver. + * Completed */ - PONR - } - - static class JournalEntry { - private JournalEntryType type; - private long timestamp; - - public JournalEntry(JournalEntryType type) { - this(type, EnvironmentEdgeManager.currentTime()); - } - - public JournalEntry(JournalEntryType type, long timestamp) { - this.type = type; - this.timestamp = timestamp; - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append(type); - sb.append(" at "); - sb.append(timestamp); - return sb.toString(); - } - } - - /* - * Journal of how far the split transaction has progressed. - */ - private final List journal = new ArrayList(); - - /** - * Constructor - * @param r Region to split - * @param splitrow Row to split around - */ - public SplitTransaction(final HRegion r, final byte [] splitrow) { - this.parent = r; - this.splitrow = splitrow; - this.journal.add(new JournalEntry(JournalEntryType.STARTED)); + COMPLETED } /** - * Does checks on split inputs. + * Split transaction journal entry + */ + public interface JournalEntry { + + /** @return the completed phase marked by this journal entry */ + SplitTransactionPhase getPhase(); + + /** @return the time of phase completion */ + long getTimeStamp(); + } + + /** + * Split transaction listener + */ + public interface TransactionListener { + + /** + * Invoked when transitioning forward from one transaction phase to another + * @param transaction the transaction + * @param from the current phase + * @param to the next phase + * @throws IOException listener can throw this to abort + */ + void transition(SplitTransaction transaction, SplitTransactionPhase from, + SplitTransactionPhase to) throws IOException; + + /** + * Invoked when rolling back a transaction from one transaction phase to the + * previous + * @param transaction the transaction + * @param from the current phase + * @param to the previous phase + */ + void rollback(SplitTransaction transaction, SplitTransactionPhase from, + SplitTransactionPhase to); + } + + /** + * Check split inputs and prepare the transaction. * @return true if the region is splittable else * false if it is not (e.g. its already closed, etc.). + * @throws IOException */ - public boolean prepare() { - if (!this.parent.isSplittable()) return false; - // Split key can be null if this region is unsplittable; i.e. has refs. - if (this.splitrow == null) return false; - HRegionInfo hri = this.parent.getRegionInfo(); - parent.prepareToSplit(); - // Check splitrow. - byte [] startKey = hri.getStartKey(); - byte [] endKey = hri.getEndKey(); - if (Bytes.equals(startKey, splitrow) || - !this.parent.getRegionInfo().containsRow(splitrow)) { - LOG.info("Split row is not inside region key range or is equal to " + - "startkey: " + Bytes.toStringBinary(this.splitrow)); - return false; - } - long rid = getDaughterRegionIdTimestamp(hri); - this.hri_a = new HRegionInfo(hri.getTable(), startKey, this.splitrow, false, rid); - this.hri_b = new HRegionInfo(hri.getTable(), this.splitrow, endKey, false, rid); - this.journal.add(new JournalEntry(JournalEntryType.PREPARED)); - return true; - } - - /** - * Calculate daughter regionid to use. - * @param hri Parent {@link HRegionInfo} - * @return Daughter region id (timestamp) to use. - */ - private static long getDaughterRegionIdTimestamp(final HRegionInfo hri) { - long rid = EnvironmentEdgeManager.currentTime(); - // Regionid is timestamp. Can't be less than that of parent else will insert - // at wrong location in hbase:meta (See HBASE-710). - if (rid < hri.getRegionId()) { - LOG.warn("Clock skew; parent regions id is " + hri.getRegionId() + - " but current time here is " + rid); - rid = hri.getRegionId() + 1; - } - return rid; - } - - private static IOException closedByOtherException = new IOException( - "Failed to close region: already closed by another thread"); - - /** - * Prepare the regions and region files. - * @param server Hosting server instance. Can be null when testing (won't try - * and update in zk if a null server) - * @param services Used to online/offline regions. - * @throws IOException If thrown, transaction failed. - * Call {@link #rollback(Server, RegionServerServices)} - * @return Regions created - */ - /* package */PairOfSameType createDaughters(final Server server, - final RegionServerServices services) throws IOException { - LOG.info("Starting split of region " + this.parent); - if ((server != null && server.isStopped()) || - (services != null && services.isStopping())) { - throw new IOException("Server is stopped or stopping"); - } - assert !this.parent.lock.writeLock().isHeldByCurrentThread(): - "Unsafe to hold write lock while performing RPCs"; - - journal.add(new JournalEntry(JournalEntryType.BEFORE_PRE_SPLIT_HOOK)); - - // Coprocessor callback - if (this.parent.getCoprocessorHost() != null) { - // TODO: Remove one of these - this.parent.getCoprocessorHost().preSplit(); - this.parent.getCoprocessorHost().preSplit(this.splitrow); - } - - journal.add(new JournalEntry(JournalEntryType.AFTER_PRE_SPLIT_HOOK)); - - // If true, no cluster to write meta edits to or to update znodes in. - boolean testing = server == null? true: - server.getConfiguration().getBoolean("hbase.testing.nocluster", false); - this.fileSplitTimeout = testing ? this.fileSplitTimeout : - server.getConfiguration().getLong("hbase.regionserver.fileSplitTimeout", - this.fileSplitTimeout); - - PairOfSameType daughterRegions = stepsBeforePONR(server, services, testing); - - List metaEntries = new ArrayList(); - if (this.parent.getCoprocessorHost() != null) { - if (this.parent.getCoprocessorHost(). - preSplitBeforePONR(this.splitrow, metaEntries)) { - throw new IOException("Coprocessor bypassing region " - + this.parent.getRegionNameAsString() + " split."); - } - try { - for (Mutation p : metaEntries) { - HRegionInfo.parseRegionName(p.getRow()); - } - } catch (IOException e) { - LOG.error("Row key of mutation from coprossor is not parsable as region name." - + "Mutations from coprocessor should only for hbase:meta table."); - throw e; - } - } - - // This is the point of no return. Adding subsequent edits to .META. as we - // do below when we do the daughter opens adding each to .META. can fail in - // various interesting ways the most interesting of which is a timeout - // BUT the edits all go through (See HBASE-3872). IF we reach the PONR - // then subsequent failures need to crash out this regionserver; the - // server shutdown processing should be able to fix-up the incomplete split. - // The offlined parent will have the daughters as extra columns. If - // we leave the daughter regions in place and do not remove them when we - // crash out, then they will have their references to the parent in place - // still and the server shutdown fixup of .META. will point to these - // regions. - // We should add PONR JournalEntry before offlineParentInMeta,so even if - // OfflineParentInMeta timeout,this will cause regionserver exit,and then - // master ServerShutdownHandler will fix daughter & avoid data loss. (See - // HBase-4562). - this.journal.add(new JournalEntry(JournalEntryType.PONR)); - - // Edit parent in meta. Offlines parent region and adds splita and splitb - // as an atomic update. See HBASE-7721. This update to META makes the region - // will determine whether the region is split or not in case of failures. - // If it is successful, master will roll-forward, if not, master will rollback - // and assign the parent region. - if (services != null && !services.reportRegionStateTransition(TransitionCode.SPLIT_PONR, - parent.getRegionInfo(), hri_a, hri_b)) { - // Passed PONR, let SSH clean it up - throw new IOException("Failed to notify master that split passed PONR: " - + parent.getRegionInfo().getRegionNameAsString()); - } - return daughterRegions; - } - - public PairOfSameType stepsBeforePONR(final Server server, - final RegionServerServices services, boolean testing) throws IOException { - if (services != null && !services.reportRegionStateTransition(TransitionCode.READY_TO_SPLIT, - parent.getRegionInfo(), hri_a, hri_b)) { - throw new IOException("Failed to get ok from master to split " - + parent.getRegionNameAsString()); - } - this.journal.add(new JournalEntry(JournalEntryType.SET_SPLITTING)); - - this.parent.getRegionFileSystem().createSplitsDir(); - this.journal.add(new JournalEntry(JournalEntryType.CREATE_SPLIT_DIR)); - - Map> hstoreFilesToSplit = null; - Exception exceptionToThrow = null; - try{ - hstoreFilesToSplit = this.parent.close(false); - } catch (Exception e) { - exceptionToThrow = e; - } - if (exceptionToThrow == null && hstoreFilesToSplit == null) { - // The region was closed by a concurrent thread. We can't continue - // with the split, instead we must just abandon the split. If we - // reopen or split this could cause problems because the region has - // probably already been moved to a different server, or is in the - // process of moving to a different server. - exceptionToThrow = closedByOtherException; - } - if (exceptionToThrow != closedByOtherException) { - this.journal.add(new JournalEntry(JournalEntryType.CLOSED_PARENT_REGION)); - } - if (exceptionToThrow != null) { - if (exceptionToThrow instanceof IOException) throw (IOException)exceptionToThrow; - throw new IOException(exceptionToThrow); - } - if (!testing) { - services.removeFromOnlineRegions(this.parent, null); - } - this.journal.add(new JournalEntry(JournalEntryType.OFFLINED_PARENT)); - - // TODO: If splitStoreFiles were multithreaded would we complete steps in - // less elapsed time? St.Ack 20100920 - // - // splitStoreFiles creates daughter region dirs under the parent splits dir - // Nothing to unroll here if failure -- clean up of CREATE_SPLIT_DIR will - // clean this up. - Pair expectedReferences = splitStoreFiles(hstoreFilesToSplit); - - // Log to the journal that we are creating region A, the first daughter - // region. We could fail halfway through. If we do, we could have left - // stuff in fs that needs cleanup -- a storefile or two. Thats why we - // add entry to journal BEFORE rather than AFTER the change. - this.journal.add(new JournalEntry(JournalEntryType.STARTED_REGION_A_CREATION)); - assertReferenceFileCount(expectedReferences.getFirst(), - this.parent.getRegionFileSystem().getSplitsDir(this.hri_a)); - HRegion a = this.parent.createDaughterRegionFromSplits(this.hri_a); - assertReferenceFileCount(expectedReferences.getFirst(), - new Path(this.parent.getRegionFileSystem().getTableDir(), this.hri_a.getEncodedName())); - - // Ditto - this.journal.add(new JournalEntry(JournalEntryType.STARTED_REGION_B_CREATION)); - assertReferenceFileCount(expectedReferences.getSecond(), - this.parent.getRegionFileSystem().getSplitsDir(this.hri_b)); - HRegion b = this.parent.createDaughterRegionFromSplits(this.hri_b); - assertReferenceFileCount(expectedReferences.getSecond(), - new Path(this.parent.getRegionFileSystem().getTableDir(), this.hri_b.getEncodedName())); - - return new PairOfSameType(a, b); - } - - void assertReferenceFileCount(int expectedReferenceFileCount, Path dir) - throws IOException { - if (expectedReferenceFileCount != 0 && - expectedReferenceFileCount != FSUtils.getRegionReferenceFileCount(this.parent.getFilesystem(), dir)) { - throw new IOException("Failing split. Expected reference file count isn't equal."); - } - } - - /** - * Perform time consuming opening of the daughter regions. - * @param server Hosting server instance. Can be null when testing - * @param services Used to online/offline regions. - * @param a first daughter region - * @param a second daughter region - * @throws IOException If thrown, transaction failed. - * Call {@link #rollback(Server, RegionServerServices)} - */ - /* package */void openDaughters(final Server server, - final RegionServerServices services, HRegion a, HRegion b) - throws IOException { - boolean stopped = server != null && server.isStopped(); - boolean stopping = services != null && services.isStopping(); - // TODO: Is this check needed here? - if (stopped || stopping) { - LOG.info("Not opening daughters " + - b.getRegionInfo().getRegionNameAsString() + - " and " + - a.getRegionInfo().getRegionNameAsString() + - " because stopping=" + stopping + ", stopped=" + stopped); - } else { - // Open daughters in parallel. - DaughterOpener aOpener = new DaughterOpener(server, a); - DaughterOpener bOpener = new DaughterOpener(server, b); - aOpener.start(); - bOpener.start(); - try { - aOpener.join(); - if (aOpener.getException() == null) { - journal.add(new JournalEntry(JournalEntryType.OPENED_REGION_A)); - } - bOpener.join(); - if (bOpener.getException() == null) { - journal.add(new JournalEntry(JournalEntryType.OPENED_REGION_B)); - } - } catch (InterruptedException e) { - throw (InterruptedIOException)new InterruptedIOException().initCause(e); - } - if (aOpener.getException() != null) { - throw new IOException("Failed " + - aOpener.getName(), aOpener.getException()); - } - if (bOpener.getException() != null) { - throw new IOException("Failed " + - bOpener.getName(), bOpener.getException()); - } - if (services != null) { - if (!services.reportRegionStateTransition(TransitionCode.SPLIT, - parent.getRegionInfo(), hri_a, hri_b)) { - throw new IOException("Failed to report split region to master: " - + parent.getRegionInfo().getShortNameToLog()); - } - // Should add it to OnlineRegions - services.addToOnlineRegions(b); - services.addToOnlineRegions(a); - } - } - } + boolean prepare() throws IOException; /** * Run the transaction. - * @param server Hosting server instance. Can be null when testing + * @param server Hosting server instance. Can be null when testing. * @param services Used to online/offline regions. * @throws IOException If thrown, transaction failed. * Call {@link #rollback(Server, RegionServerServices)} @@ -485,325 +192,44 @@ public class SplitTransaction { * @throws IOException * @see #rollback(Server, RegionServerServices) */ - public PairOfSameType execute(final Server server, - final RegionServerServices services) - throws IOException { - PairOfSameType regions = createDaughters(server, services); - if (this.parent.getCoprocessorHost() != null) { - this.parent.getCoprocessorHost().preSplitAfterPONR(); - } - return stepsAfterPONR(server, services, regions); - } - - public PairOfSameType stepsAfterPONR(final Server server, - final RegionServerServices services, PairOfSameType regions) - throws IOException { - openDaughters(server, services, regions.getFirst(), regions.getSecond()); - journal.add(new JournalEntry(JournalEntryType.BEFORE_POST_SPLIT_HOOK)); - // Coprocessor callback - if (parent.getCoprocessorHost() != null) { - parent.getCoprocessorHost().postSplit(regions.getFirst(), regions.getSecond()); - } - journal.add(new JournalEntry(JournalEntryType.AFTER_POST_SPLIT_HOOK)); - return regions; - } - - public Put addLocation(final Put p, final ServerName sn, long openSeqNum) { - p.addImmutable(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER, - Bytes.toBytes(sn.getHostAndPort())); - p.addImmutable(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER, - Bytes.toBytes(sn.getStartcode())); - p.addImmutable(HConstants.CATALOG_FAMILY, HConstants.SEQNUM_QUALIFIER, - Bytes.toBytes(openSeqNum)); - return p; - } - - /* - * Open daughter region in its own thread. - * If we fail, abort this hosting server. - */ - class DaughterOpener extends HasThread { - private final Server server; - private final HRegion r; - private Throwable t = null; - - DaughterOpener(final Server s, final HRegion r) { - super((s == null? "null-services": s.getServerName()) + - "-daughterOpener=" + r.getRegionInfo().getEncodedName()); - setDaemon(true); - this.server = s; - this.r = r; - } - - /** - * @return Null if open succeeded else exception that causes us fail open. - * Call it after this thread exits else you may get wrong view on result. - */ - Throwable getException() { - return this.t; - } - - @Override - public void run() { - try { - openDaughterRegion(this.server, r); - } catch (Throwable t) { - this.t = t; - } - } - } - - /** - * Open daughter regions, add them to online list and update meta. - * @param server - * @param daughter - * @throws IOException - * @throws KeeperException - */ - void openDaughterRegion(final Server server, final HRegion daughter) - throws IOException, KeeperException { - HRegionInfo hri = daughter.getRegionInfo(); - LoggingProgressable reporter = server == null ? null - : new LoggingProgressable(hri, server.getConfiguration().getLong( - "hbase.regionserver.split.daughter.open.log.interval", 10000)); - daughter.openHRegion(reporter); - } - - static class LoggingProgressable implements CancelableProgressable { - private final HRegionInfo hri; - private long lastLog = -1; - private final long interval; - - LoggingProgressable(final HRegionInfo hri, final long interval) { - this.hri = hri; - this.interval = interval; - } - - @Override - public boolean progress() { - long now = EnvironmentEdgeManager.currentTime(); - if (now - lastLog > this.interval) { - LOG.info("Opening " + this.hri.getRegionNameAsString()); - this.lastLog = now; - } - return true; - } - } - - /** - * Creates reference files for top and bottom half of the - * @param hstoreFilesToSplit map of store files to create half file references for. - * @return the number of reference files that were created. - * @throws IOException - */ - private Pair splitStoreFiles( - final Map> hstoreFilesToSplit) - throws IOException { - if (hstoreFilesToSplit == null) { - // Could be null because close didn't succeed -- for now consider it fatal - throw new IOException("Close returned empty list of StoreFiles"); - } - // The following code sets up a thread pool executor with as many slots as - // there's files to split. It then fires up everything, waits for - // completion and finally checks for any exception - int nbFiles = hstoreFilesToSplit.size(); - if (nbFiles == 0) { - // no file needs to be splitted. - return new Pair(0,0); - } - LOG.info("Preparing to split " + nbFiles + " storefiles for region " + this.parent); - ThreadFactoryBuilder builder = new ThreadFactoryBuilder(); - builder.setNameFormat("StoreFileSplitter-%1$d"); - ThreadFactory factory = builder.build(); - ThreadPoolExecutor threadPool = - (ThreadPoolExecutor) Executors.newFixedThreadPool(nbFiles, factory); - List>> futures = new ArrayList>> (nbFiles); - - // Split each store file. - for (Map.Entry> entry: hstoreFilesToSplit.entrySet()) { - for (StoreFile sf: entry.getValue()) { - StoreFileSplitter sfs = new StoreFileSplitter(entry.getKey(), sf); - futures.add(threadPool.submit(sfs)); - } - } - // Shutdown the pool - threadPool.shutdown(); - - // Wait for all the tasks to finish - try { - boolean stillRunning = !threadPool.awaitTermination( - this.fileSplitTimeout, TimeUnit.MILLISECONDS); - if (stillRunning) { - threadPool.shutdownNow(); - // wait for the thread to shutdown completely. - while (!threadPool.isTerminated()) { - Thread.sleep(50); - } - throw new IOException("Took too long to split the" + - " files and create the references, aborting split"); - } - } catch (InterruptedException e) { - throw (InterruptedIOException)new InterruptedIOException().initCause(e); - } - - int created_a = 0; - int created_b = 0; - // Look for any exception - for (Future> future : futures) { - try { - Pair p = future.get(); - created_a += p.getFirst() != null ? 1 : 0; - created_b += p.getSecond() != null ? 1 : 0; - } catch (InterruptedException e) { - throw (InterruptedIOException) new InterruptedIOException().initCause(e); - } catch (ExecutionException e) { - throw new IOException(e); - } - } - - if (LOG.isDebugEnabled()) { - LOG.debug("Split storefiles for region " + this.parent + " Daugther A: " + created_a - + " storefiles, Daugther B: " + created_b + " storefiles."); - } - return new Pair(created_a, created_b); - } - - private Pair splitStoreFile(final byte[] family, final StoreFile sf) throws IOException { - HRegionFileSystem fs = this.parent.getRegionFileSystem(); - String familyName = Bytes.toString(family); - Path path_a = - fs.splitStoreFile(this.hri_a, familyName, sf, this.splitrow, false, - this.parent.getSplitPolicy()); - Path path_b = - fs.splitStoreFile(this.hri_b, familyName, sf, this.splitrow, true, - this.parent.getSplitPolicy()); - return new Pair(path_a, path_b); - } - - /** - * Utility class used to do the file splitting / reference writing - * in parallel instead of sequentially. - */ - class StoreFileSplitter implements Callable> { - private final byte[] family; - private final StoreFile sf; - - /** - * Constructor that takes what it needs to split - * @param family Family that contains the store file - * @param sf which file - */ - public StoreFileSplitter(final byte[] family, final StoreFile sf) { - this.sf = sf; - this.family = family; - } - - public Pair call() throws IOException { - return splitStoreFile(family, sf); - } - } + PairOfSameType execute(Server server, RegionServerServices services) throws IOException; /** + * Roll back a failed transaction * @param server Hosting server instance (May be null when testing). * @param services * @throws IOException If thrown, rollback failed. Take drastic action. * @return True if we successfully rolled back, false if we got to the point * of no return and so now need to abort the server to minimize damage. */ - @SuppressWarnings("deprecation") - public boolean rollback(final Server server, final RegionServerServices services) - throws IOException { - // Coprocessor callback - if (this.parent.getCoprocessorHost() != null) { - this.parent.getCoprocessorHost().preRollBackSplit(); - } + boolean rollback(Server server, RegionServerServices services) throws IOException; - boolean result = true; - ListIterator iterator = - this.journal.listIterator(this.journal.size()); - // Iterate in reverse. - while (iterator.hasPrevious()) { - JournalEntry je = iterator.previous(); - switch(je.type) { + /** + * Register a listener for transaction preparation, execution, and possibly + * rollback phases. + *

      A listener can abort a transaction by throwing an exception. + * @param listener the listener + * @return 'this' for chaining + */ + SplitTransaction registerTransactionListener(TransactionListener listener); - case SET_SPLITTING: - if (services != null - && !services.reportRegionStateTransition(TransitionCode.SPLIT_REVERTED, - parent.getRegionInfo(), hri_a, hri_b)) { - return false; - } - break; + /** + * Get the journal for the transaction. + *

      Journal entries are an opaque type represented as JournalEntry. They can + * also provide useful debugging information via their toString method. + * @return the transaction journal + */ + List getJournal(); - case CREATE_SPLIT_DIR: - this.parent.writestate.writesEnabled = true; - this.parent.getRegionFileSystem().cleanupSplitsDir(); - break; + /** + * Get the Server running the transaction or rollback + * @return server instance + */ + Server getServer(); - case CLOSED_PARENT_REGION: - try { - // So, this returns a seqid but if we just closed and then reopened, we - // should be ok. On close, we flushed using sequenceid obtained from - // hosting regionserver so no need to propagate the sequenceid returned - // out of initialize below up into regionserver as we normally do. - // TODO: Verify. - this.parent.initialize(); - } catch (IOException e) { - LOG.error("Failed rollbacking CLOSED_PARENT_REGION of region " + - this.parent.getRegionNameAsString(), e); - throw new RuntimeException(e); - } - break; - - case STARTED_REGION_A_CREATION: - this.parent.getRegionFileSystem().cleanupDaughterRegion(this.hri_a); - break; - - case STARTED_REGION_B_CREATION: - this.parent.getRegionFileSystem().cleanupDaughterRegion(this.hri_b); - break; - - case OFFLINED_PARENT: - if (services != null) services.addToOnlineRegions(this.parent); - break; - - case PONR: - // We got to the point-of-no-return so we need to just abort. Return - // immediately. Do not clean up created daughter regions. They need - // to be in place so we don't delete the parent region mistakenly. - // See HBASE-3872. - return false; - - // Informational only cases - case STARTED: - case PREPARED: - case BEFORE_PRE_SPLIT_HOOK: - case AFTER_PRE_SPLIT_HOOK: - case BEFORE_POST_SPLIT_HOOK: - case AFTER_POST_SPLIT_HOOK: - case OPENED_REGION_A: - case OPENED_REGION_B: - break; - - default: - throw new RuntimeException("Unhandled journal entry: " + je); - } - } - // Coprocessor callback - if (this.parent.getCoprocessorHost() != null) { - this.parent.getCoprocessorHost().postRollBackSplit(); - } - return result; - } - - HRegionInfo getFirstDaughter() { - return hri_a; - } - - HRegionInfo getSecondDaughter() { - return hri_b; - } - - List getJournal() { - return journal; - } + /** + * Get the RegonServerServices of the server running the transaction or rollback + * @return region server services + */ + RegionServerServices getRegionServerServices(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransactionFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransactionFactory.java new file mode 100644 index 00000000000..7df8233f96a --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransactionFactory.java @@ -0,0 +1,74 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import org.apache.hadoop.conf.Configurable; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.util.ReflectionUtils; + +/** + * A factory for creating SplitTransactions, which execute region split as a "transaction". + * See {@link SplitTransaction} + */ +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) +@InterfaceStability.Evolving +public class SplitTransactionFactory implements Configurable { + + public static final String SPLIT_TRANSACTION_IMPL_KEY = + "hbase.regionserver.split.transaction.impl"; + + private Configuration conf; + + public SplitTransactionFactory(Configuration conf) { + this.conf = conf; + } + + @Override + public Configuration getConf() { + return conf; + } + + @Override + public void setConf(Configuration conf) { + this.conf = conf; + } + + /** + * Create a split transaction + * @param r the region to split + * @param splitrow the split point in the keyspace + * @return transaction instance + */ + public SplitTransaction create(final Region r, final byte [] splitrow) { + return ReflectionUtils.instantiateWithCustomCtor( + // The implementation class must extend SplitTransactionImpl, not only + // implement the SplitTransaction interface like you might expect, + // because various places such as AssignmentManager use static methods + // from SplitTransactionImpl. Whatever we use for implementation must + // be compatible, so it's safest to require ? extends SplitTransactionImpl. + // If not compatible we will throw a runtime exception from here. + conf.getClass(SPLIT_TRANSACTION_IMPL_KEY, SplitTransactionImpl.class, + SplitTransactionImpl.class).getName(), + new Class[] { Region.class, byte[].class }, + new Object[] { r, splitrow }); + } + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransactionImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransactionImpl.java new file mode 100644 index 00000000000..8695c774aea --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransactionImpl.java @@ -0,0 +1,789 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import java.io.IOException; +import java.io.InterruptedIOException; +import java.util.ArrayList; +import java.util.List; +import java.util.ListIterator; +import java.util.Map; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.client.Mutation; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.CancelableProgressable; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.util.FSUtils; +import org.apache.hadoop.hbase.util.HasThread; +import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.hbase.util.PairOfSameType; +import org.apache.zookeeper.KeeperException; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.util.concurrent.ThreadFactoryBuilder; + +@InterfaceAudience.Private +public class SplitTransactionImpl implements SplitTransaction { + private static final Log LOG = LogFactory.getLog(SplitTransactionImpl.class); + + /* + * Region to split + */ + private final HRegion parent; + private HRegionInfo hri_a; + private HRegionInfo hri_b; + private long fileSplitTimeout = 30000; + + /* + * Row to split around + */ + private final byte [] splitrow; + + /* + * Transaction state for listener, only valid during execute and + * rollback + */ + private SplitTransactionPhase currentPhase = SplitTransactionPhase.STARTED; + private Server server; + private RegionServerServices rsServices; + + public static class JournalEntryImpl implements JournalEntry { + private SplitTransactionPhase type; + private long timestamp; + + public JournalEntryImpl(SplitTransactionPhase type) { + this(type, EnvironmentEdgeManager.currentTime()); + } + + public JournalEntryImpl(SplitTransactionPhase type, long timestamp) { + this.type = type; + this.timestamp = timestamp; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(type); + sb.append(" at "); + sb.append(timestamp); + return sb.toString(); + } + + @Override + public SplitTransactionPhase getPhase() { + return type; + } + + @Override + public long getTimeStamp() { + return timestamp; + } + } + + /* + * Journal of how far the split transaction has progressed. + */ + private final ArrayList journal = new ArrayList(); + + /** + * Listeners + */ + private final ArrayList listeners = new ArrayList(); + + /** + * Constructor + * @param r Region to split + * @param splitrow Row to split around + */ + public SplitTransactionImpl(final Region r, final byte [] splitrow) { + this.parent = (HRegion)r; + this.splitrow = splitrow; + this.journal.add(new JournalEntryImpl(SplitTransactionPhase.STARTED)); + } + + private void transition(SplitTransactionPhase nextPhase) throws IOException { + transition(nextPhase, false); + } + + private void transition(SplitTransactionPhase nextPhase, boolean isRollback) + throws IOException { + if (!isRollback) { + // Add to the journal first, because if the listener throws an exception + // we need to roll back starting at 'nextPhase' + this.journal.add(new JournalEntryImpl(nextPhase)); + } + for (int i = 0; i < listeners.size(); i++) { + TransactionListener listener = listeners.get(i); + if (!isRollback) { + listener.transition(this, currentPhase, nextPhase); + } else { + listener.rollback(this, currentPhase, nextPhase); + } + } + currentPhase = nextPhase; + } + + @Override + public boolean prepare() throws IOException { + if (!this.parent.isSplittable()) return false; + // Split key can be null if this region is unsplittable; i.e. has refs. + if (this.splitrow == null) return false; + HRegionInfo hri = this.parent.getRegionInfo(); + parent.prepareToSplit(); + // Check splitrow. + byte [] startKey = hri.getStartKey(); + byte [] endKey = hri.getEndKey(); + if (Bytes.equals(startKey, splitrow) || + !this.parent.getRegionInfo().containsRow(splitrow)) { + LOG.info("Split row is not inside region key range or is equal to " + + "startkey: " + Bytes.toStringBinary(this.splitrow)); + return false; + } + long rid = getDaughterRegionIdTimestamp(hri); + this.hri_a = new HRegionInfo(hri.getTable(), startKey, this.splitrow, false, rid); + this.hri_b = new HRegionInfo(hri.getTable(), this.splitrow, endKey, false, rid); + + transition(SplitTransactionPhase.PREPARED); + + return true; + } + + /** + * Calculate daughter regionid to use. + * @param hri Parent {@link HRegionInfo} + * @return Daughter region id (timestamp) to use. + */ + private static long getDaughterRegionIdTimestamp(final HRegionInfo hri) { + long rid = EnvironmentEdgeManager.currentTime(); + // Regionid is timestamp. Can't be less than that of parent else will insert + // at wrong location in hbase:meta (See HBASE-710). + if (rid < hri.getRegionId()) { + LOG.warn("Clock skew; parent regions id is " + hri.getRegionId() + + " but current time here is " + rid); + rid = hri.getRegionId() + 1; + } + return rid; + } + + private static IOException closedByOtherException = new IOException( + "Failed to close region: already closed by another thread"); + + /** + * Prepare the regions and region files. + * @param server Hosting server instance. Can be null when testing (won't try + * and update in zk if a null server) + * @param services Used to online/offline regions. + * @throws IOException If thrown, transaction failed. + * Call {@link #rollback(Server, RegionServerServices)} + * @return Regions created + */ + @VisibleForTesting + PairOfSameType createDaughters(final Server server, + final RegionServerServices services) throws IOException { + LOG.info("Starting split of region " + this.parent); + if ((server != null && server.isStopped()) || + (services != null && services.isStopping())) { + throw new IOException("Server is stopped or stopping"); + } + assert !this.parent.lock.writeLock().isHeldByCurrentThread(): + "Unsafe to hold write lock while performing RPCs"; + + transition(SplitTransactionPhase.BEFORE_PRE_SPLIT_HOOK); + + // Coprocessor callback + if (this.parent.getCoprocessorHost() != null) { + // TODO: Remove one of these + this.parent.getCoprocessorHost().preSplit(); + this.parent.getCoprocessorHost().preSplit(this.splitrow); + } + + transition(SplitTransactionPhase.AFTER_PRE_SPLIT_HOOK); + + // If true, no cluster to write meta edits to or to update znodes in. + boolean testing = server == null? true: + server.getConfiguration().getBoolean("hbase.testing.nocluster", false); + this.fileSplitTimeout = testing ? this.fileSplitTimeout : + server.getConfiguration().getLong("hbase.regionserver.fileSplitTimeout", + this.fileSplitTimeout); + + PairOfSameType daughterRegions = stepsBeforePONR(server, services, testing); + + List metaEntries = new ArrayList(); + if (this.parent.getCoprocessorHost() != null) { + if (this.parent.getCoprocessorHost(). + preSplitBeforePONR(this.splitrow, metaEntries)) { + throw new IOException("Coprocessor bypassing region " + + parent.getRegionInfo().getRegionNameAsString() + " split."); + } + try { + for (Mutation p : metaEntries) { + HRegionInfo.parseRegionName(p.getRow()); + } + } catch (IOException e) { + LOG.error("Row key of mutation from coprossor is not parsable as region name." + + "Mutations from coprocessor should only for hbase:meta table."); + throw e; + } + } + + // This is the point of no return. Adding subsequent edits to .META. as we + // do below when we do the daughter opens adding each to .META. can fail in + // various interesting ways the most interesting of which is a timeout + // BUT the edits all go through (See HBASE-3872). IF we reach the PONR + // then subsequent failures need to crash out this regionserver; the + // server shutdown processing should be able to fix-up the incomplete split. + // The offlined parent will have the daughters as extra columns. If + // we leave the daughter regions in place and do not remove them when we + // crash out, then they will have their references to the parent in place + // still and the server shutdown fixup of .META. will point to these + // regions. + // We should add PONR JournalEntry before offlineParentInMeta,so even if + // OfflineParentInMeta timeout,this will cause regionserver exit,and then + // master ServerShutdownHandler will fix daughter & avoid data loss. (See + // HBase-4562). + + transition(SplitTransactionPhase.PONR); + + // Edit parent in meta. Offlines parent region and adds splita and splitb + // as an atomic update. See HBASE-7721. This update to META makes the region + // will determine whether the region is split or not in case of failures. + // If it is successful, master will roll-forward, if not, master will rollback + // and assign the parent region. + if (services != null && !services.reportRegionStateTransition(TransitionCode.SPLIT_PONR, + parent.getRegionInfo(), hri_a, hri_b)) { + // Passed PONR, let SSH clean it up + throw new IOException("Failed to notify master that split passed PONR: " + + parent.getRegionInfo().getRegionNameAsString()); + } + return daughterRegions; + } + + @VisibleForTesting + Put addLocation(final Put p, final ServerName sn, long openSeqNum) { + p.add(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER, Bytes + .toBytes(sn.getHostAndPort())); + p.add(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER, Bytes.toBytes(sn + .getStartcode())); + p.add(HConstants.CATALOG_FAMILY, HConstants.SEQNUM_QUALIFIER, Bytes.toBytes(openSeqNum)); + return p; + } + + @VisibleForTesting + public PairOfSameType stepsBeforePONR(final Server server, + final RegionServerServices services, boolean testing) throws IOException { + if (services != null && !services.reportRegionStateTransition(TransitionCode.READY_TO_SPLIT, + parent.getRegionInfo(), hri_a, hri_b)) { + throw new IOException("Failed to get ok from master to split " + + parent.getRegionInfo().getRegionNameAsString()); + } + + transition(SplitTransactionPhase.SET_SPLITTING); + + this.parent.getRegionFileSystem().createSplitsDir(); + + transition(SplitTransactionPhase.CREATE_SPLIT_DIR); + + Map> hstoreFilesToSplit = null; + Exception exceptionToThrow = null; + try{ + hstoreFilesToSplit = this.parent.close(false); + } catch (Exception e) { + exceptionToThrow = e; + } + if (exceptionToThrow == null && hstoreFilesToSplit == null) { + // The region was closed by a concurrent thread. We can't continue + // with the split, instead we must just abandon the split. If we + // reopen or split this could cause problems because the region has + // probably already been moved to a different server, or is in the + // process of moving to a different server. + exceptionToThrow = closedByOtherException; + } + if (exceptionToThrow != closedByOtherException) { + transition(SplitTransactionPhase.CLOSED_PARENT_REGION); + } + if (exceptionToThrow != null) { + if (exceptionToThrow instanceof IOException) throw (IOException)exceptionToThrow; + throw new IOException(exceptionToThrow); + } + if (!testing) { + services.removeFromOnlineRegions(this.parent, null); + } + + transition(SplitTransactionPhase.OFFLINED_PARENT); + + // TODO: If splitStoreFiles were multithreaded would we complete steps in + // less elapsed time? St.Ack 20100920 + // + // splitStoreFiles creates daughter region dirs under the parent splits dir + // Nothing to unroll here if failure -- clean up of CREATE_SPLIT_DIR will + // clean this up. + Pair expectedReferences = splitStoreFiles(hstoreFilesToSplit); + + // Log to the journal that we are creating region A, the first daughter + // region. We could fail halfway through. If we do, we could have left + // stuff in fs that needs cleanup -- a storefile or two. Thats why we + // add entry to journal BEFORE rather than AFTER the change. + + transition(SplitTransactionPhase.STARTED_REGION_A_CREATION); + + assertReferenceFileCount(expectedReferences.getFirst(), + this.parent.getRegionFileSystem().getSplitsDir(this.hri_a)); + HRegion a = this.parent.createDaughterRegionFromSplits(this.hri_a); + assertReferenceFileCount(expectedReferences.getFirst(), + new Path(this.parent.getRegionFileSystem().getTableDir(), this.hri_a.getEncodedName())); + + // Ditto + + transition(SplitTransactionPhase.STARTED_REGION_B_CREATION); + + assertReferenceFileCount(expectedReferences.getSecond(), + this.parent.getRegionFileSystem().getSplitsDir(this.hri_b)); + HRegion b = this.parent.createDaughterRegionFromSplits(this.hri_b); + assertReferenceFileCount(expectedReferences.getSecond(), + new Path(this.parent.getRegionFileSystem().getTableDir(), this.hri_b.getEncodedName())); + + return new PairOfSameType(a, b); + } + + @VisibleForTesting + void assertReferenceFileCount(int expectedReferenceFileCount, Path dir) + throws IOException { + if (expectedReferenceFileCount != 0 && + expectedReferenceFileCount != FSUtils.getRegionReferenceFileCount(parent.getFilesystem(), + dir)) { + throw new IOException("Failing split. Expected reference file count isn't equal."); + } + } + + /** + * Perform time consuming opening of the daughter regions. + * @param server Hosting server instance. Can be null when testing + * @param services Used to online/offline regions. + * @param a first daughter region + * @param a second daughter region + * @throws IOException If thrown, transaction failed. + * Call {@link #rollback(Server, RegionServerServices)} + */ + @VisibleForTesting + void openDaughters(final Server server, final RegionServerServices services, Region a, + Region b) throws IOException { + boolean stopped = server != null && server.isStopped(); + boolean stopping = services != null && services.isStopping(); + // TODO: Is this check needed here? + if (stopped || stopping) { + LOG.info("Not opening daughters " + + b.getRegionInfo().getRegionNameAsString() + + " and " + + a.getRegionInfo().getRegionNameAsString() + + " because stopping=" + stopping + ", stopped=" + stopped); + } else { + // Open daughters in parallel. + DaughterOpener aOpener = new DaughterOpener(server, a); + DaughterOpener bOpener = new DaughterOpener(server, b); + aOpener.start(); + bOpener.start(); + try { + aOpener.join(); + if (aOpener.getException() == null) { + transition(SplitTransactionPhase.OPENED_REGION_A); + } + bOpener.join(); + if (bOpener.getException() == null) { + transition(SplitTransactionPhase.OPENED_REGION_B); + } + } catch (InterruptedException e) { + throw (InterruptedIOException)new InterruptedIOException().initCause(e); + } + if (aOpener.getException() != null) { + throw new IOException("Failed " + + aOpener.getName(), aOpener.getException()); + } + if (bOpener.getException() != null) { + throw new IOException("Failed " + + bOpener.getName(), bOpener.getException()); + } + if (services != null) { + if (!services.reportRegionStateTransition(TransitionCode.SPLIT, + parent.getRegionInfo(), hri_a, hri_b)) { + throw new IOException("Failed to report split region to master: " + + parent.getRegionInfo().getShortNameToLog()); + } + // Should add it to OnlineRegions + services.addToOnlineRegions(b); + services.addToOnlineRegions(a); + } + } + } + + @Override + public PairOfSameType execute(final Server server, final RegionServerServices services) + throws IOException { + this.server = server; + this.rsServices = services; + PairOfSameType regions = createDaughters(server, services); + stepsAfterPONR(server, services, regions); + transition(SplitTransactionPhase.COMPLETED); + return regions; + } + + @VisibleForTesting + void stepsAfterPONR(final Server server, + final RegionServerServices services, PairOfSameType regions) + throws IOException { + if (this.parent.getCoprocessorHost() != null) { + this.parent.getCoprocessorHost().preSplitAfterPONR(); + } + + openDaughters(server, services, regions.getFirst(), regions.getSecond()); + + transition(SplitTransactionPhase.BEFORE_POST_SPLIT_HOOK); + + // Coprocessor callback + if (parent.getCoprocessorHost() != null) { + parent.getCoprocessorHost().postSplit(regions.getFirst(), regions.getSecond()); + } + + transition(SplitTransactionPhase.AFTER_POST_SPLIT_HOOK); + } + + /* + * Open daughter region in its own thread. + * If we fail, abort this hosting server. + */ + private class DaughterOpener extends HasThread { + private final Server server; + private final Region r; + private Throwable t = null; + + DaughterOpener(final Server s, final Region r) { + super((s == null? "null-services": s.getServerName()) + + "-daughterOpener=" + r.getRegionInfo().getEncodedName()); + setDaemon(true); + this.server = s; + this.r = r; + } + + /** + * @return Null if open succeeded else exception that causes us fail open. + * Call it after this thread exits else you may get wrong view on result. + */ + Throwable getException() { + return this.t; + } + + @Override + public void run() { + try { + openDaughterRegion(this.server, r); + } catch (Throwable t) { + this.t = t; + } + } + } + + /** + * Open daughter regions, add them to online list and update meta. + * @param server + * @param daughter + * @throws IOException + * @throws KeeperException + */ + @VisibleForTesting + void openDaughterRegion(final Server server, final Region daughter) + throws IOException, KeeperException { + HRegionInfo hri = daughter.getRegionInfo(); + LoggingProgressable reporter = server == null ? null + : new LoggingProgressable(hri, server.getConfiguration().getLong( + "hbase.regionserver.split.daughter.open.log.interval", 10000)); + ((HRegion)daughter).openHRegion(reporter); + } + + static class LoggingProgressable implements CancelableProgressable { + private final HRegionInfo hri; + private long lastLog = -1; + private final long interval; + + LoggingProgressable(final HRegionInfo hri, final long interval) { + this.hri = hri; + this.interval = interval; + } + + @Override + public boolean progress() { + long now = EnvironmentEdgeManager.currentTime(); + if (now - lastLog > this.interval) { + LOG.info("Opening " + this.hri.getRegionNameAsString()); + this.lastLog = now; + } + return true; + } + } + + /** + * Creates reference files for top and bottom half of the + * @param hstoreFilesToSplit map of store files to create half file references for. + * @return the number of reference files that were created. + * @throws IOException + */ + private Pair splitStoreFiles( + final Map> hstoreFilesToSplit) + throws IOException { + if (hstoreFilesToSplit == null) { + // Could be null because close didn't succeed -- for now consider it fatal + throw new IOException("Close returned empty list of StoreFiles"); + } + // The following code sets up a thread pool executor with as many slots as + // there's files to split. It then fires up everything, waits for + // completion and finally checks for any exception + int nbFiles = hstoreFilesToSplit.size(); + if (nbFiles == 0) { + // no file needs to be splitted. + return new Pair(0,0); + } + LOG.info("Preparing to split " + nbFiles + " storefiles for region " + this.parent); + ThreadFactoryBuilder builder = new ThreadFactoryBuilder(); + builder.setNameFormat("StoreFileSplitter-%1$d"); + ThreadFactory factory = builder.build(); + ThreadPoolExecutor threadPool = + (ThreadPoolExecutor) Executors.newFixedThreadPool(nbFiles, factory); + List>> futures = new ArrayList>> (nbFiles); + + // Split each store file. + for (Map.Entry> entry: hstoreFilesToSplit.entrySet()) { + for (StoreFile sf: entry.getValue()) { + StoreFileSplitter sfs = new StoreFileSplitter(entry.getKey(), sf); + futures.add(threadPool.submit(sfs)); + } + } + // Shutdown the pool + threadPool.shutdown(); + + // Wait for all the tasks to finish + try { + boolean stillRunning = !threadPool.awaitTermination( + this.fileSplitTimeout, TimeUnit.MILLISECONDS); + if (stillRunning) { + threadPool.shutdownNow(); + // wait for the thread to shutdown completely. + while (!threadPool.isTerminated()) { + Thread.sleep(50); + } + throw new IOException("Took too long to split the" + + " files and create the references, aborting split"); + } + } catch (InterruptedException e) { + throw (InterruptedIOException)new InterruptedIOException().initCause(e); + } + + int created_a = 0; + int created_b = 0; + // Look for any exception + for (Future> future : futures) { + try { + Pair p = future.get(); + created_a += p.getFirst() != null ? 1 : 0; + created_b += p.getSecond() != null ? 1 : 0; + } catch (InterruptedException e) { + throw (InterruptedIOException) new InterruptedIOException().initCause(e); + } catch (ExecutionException e) { + throw new IOException(e); + } + } + + if (LOG.isDebugEnabled()) { + LOG.debug("Split storefiles for region " + this.parent + " Daugther A: " + created_a + + " storefiles, Daugther B: " + created_b + " storefiles."); + } + return new Pair(created_a, created_b); + } + + private Pair splitStoreFile(final byte[] family, final StoreFile sf) + throws IOException { + HRegionFileSystem fs = this.parent.getRegionFileSystem(); + String familyName = Bytes.toString(family); + Path path_a = + fs.splitStoreFile(this.hri_a, familyName, sf, this.splitrow, false, + this.parent.getSplitPolicy()); + Path path_b = + fs.splitStoreFile(this.hri_b, familyName, sf, this.splitrow, true, + this.parent.getSplitPolicy()); + return new Pair(path_a, path_b); + } + + /** + * Utility class used to do the file splitting / reference writing + * in parallel instead of sequentially. + */ + private class StoreFileSplitter implements Callable> { + private final byte[] family; + private final StoreFile sf; + + /** + * Constructor that takes what it needs to split + * @param family Family that contains the store file + * @param sf which file + */ + public StoreFileSplitter(final byte[] family, final StoreFile sf) { + this.sf = sf; + this.family = family; + } + + public Pair call() throws IOException { + return splitStoreFile(family, sf); + } + } + + @Override + public boolean rollback(final Server server, final RegionServerServices services) + throws IOException { + this.server = server; + this.rsServices = services; + // Coprocessor callback + if (this.parent.getCoprocessorHost() != null) { + this.parent.getCoprocessorHost().preRollBackSplit(); + } + + boolean result = true; + ListIterator iterator = + this.journal.listIterator(this.journal.size()); + // Iterate in reverse. + while (iterator.hasPrevious()) { + JournalEntry je = iterator.previous(); + + transition(je.getPhase(), true); + + switch (je.getPhase()) { + + case SET_SPLITTING: + if (services != null + && !services.reportRegionStateTransition(TransitionCode.SPLIT_REVERTED, + parent.getRegionInfo(), hri_a, hri_b)) { + return false; + } + break; + + case CREATE_SPLIT_DIR: + this.parent.writestate.writesEnabled = true; + this.parent.getRegionFileSystem().cleanupSplitsDir(); + break; + + case CLOSED_PARENT_REGION: + try { + // So, this returns a seqid but if we just closed and then reopened, we + // should be ok. On close, we flushed using sequenceid obtained from + // hosting regionserver so no need to propagate the sequenceid returned + // out of initialize below up into regionserver as we normally do. + // TODO: Verify. + this.parent.initialize(); + } catch (IOException e) { + LOG.error("Failed rollbacking CLOSED_PARENT_REGION of region " + + parent.getRegionInfo().getRegionNameAsString(), e); + throw new RuntimeException(e); + } + break; + + case STARTED_REGION_A_CREATION: + this.parent.getRegionFileSystem().cleanupDaughterRegion(this.hri_a); + break; + + case STARTED_REGION_B_CREATION: + this.parent.getRegionFileSystem().cleanupDaughterRegion(this.hri_b); + break; + + case OFFLINED_PARENT: + if (services != null) services.addToOnlineRegions(this.parent); + break; + + case PONR: + // We got to the point-of-no-return so we need to just abort. Return + // immediately. Do not clean up created daughter regions. They need + // to be in place so we don't delete the parent region mistakenly. + // See HBASE-3872. + return false; + + // Informational only cases + case STARTED: + case PREPARED: + case BEFORE_PRE_SPLIT_HOOK: + case AFTER_PRE_SPLIT_HOOK: + case BEFORE_POST_SPLIT_HOOK: + case AFTER_POST_SPLIT_HOOK: + case OPENED_REGION_A: + case OPENED_REGION_B: + case COMPLETED: + break; + + default: + throw new RuntimeException("Unhandled journal entry: " + je); + } + } + // Coprocessor callback + if (this.parent.getCoprocessorHost() != null) { + this.parent.getCoprocessorHost().postRollBackSplit(); + } + return result; + } + + /* package */ HRegionInfo getFirstDaughter() { + return hri_a; + } + + /* package */ HRegionInfo getSecondDaughter() { + return hri_b; + } + + @Override + public List getJournal() { + return journal; + } + + @Override + public SplitTransaction registerTransactionListener(TransactionListener listener) { + listeners.add(listener); + return this; + } + + @Override + public Server getServer() { + return server; + } + + @Override + public RegionServerServices getRegionServerServices() { + return rsServices; + } + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java index 0c420b5ce41..a77fc0e85ca 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.KeyValue; @@ -48,7 +49,7 @@ import org.apache.hadoop.hbase.util.Pair; * Interface for objects that hold a column family in a Region. Its a memstore and a set of zero or * more StoreFiles, which stretch backwards over time. */ -@InterfaceAudience.Private +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @InterfaceStability.Evolving public interface Store extends HeapSize, StoreConfigInformation, PropagatingConfigurationObserver { @@ -63,7 +64,7 @@ public interface Store extends HeapSize, StoreConfigInformation, PropagatingConf Collection getStorefiles(); /** - * Close all the readers We don't need to worry about subsequent requests because the HRegion + * Close all the readers We don't need to worry about subsequent requests because the Region * holds a write lock that will prevent any more reads or writes. * @return the {@link StoreFile StoreFiles} that were previously being used. * @throws IOException on failure @@ -213,9 +214,13 @@ public interface Store extends HeapSize, StoreConfigInformation, PropagatingConf * Call to complete a compaction. Its for the case where we find in the WAL a compaction * that was not finished. We could find one recovering a WAL after a regionserver crash. * See HBASE-2331. - * @param compaction + * @param compaction the descriptor for compaction + * @param pickCompactionFiles whether or not pick up the new compaction output files and + * add it to the store + * @param removeFiles whether to remove/archive files from filesystem */ - void completeCompactionMarker(CompactionDescriptor compaction) + void replayCompactionMarker(CompactionDescriptor compaction, boolean pickCompactionFiles, + boolean removeFiles) throws IOException; // Split oriented methods @@ -237,13 +242,13 @@ public interface Store extends HeapSize, StoreConfigInformation, PropagatingConf void assertBulkLoadHFileOk(Path srcPath) throws IOException; /** - * This method should only be called from HRegion. It is assumed that the ranges of values in the + * This method should only be called from Region. It is assumed that the ranges of values in the * HFile fit within the stores assigned region. (assertBulkLoadHFileOk checks this) * * @param srcPathStr * @param sequenceId sequence Id associated with the HFile */ - void bulkLoadHFile(String srcPathStr, long sequenceId) throws IOException; + Path bulkLoadHFile(String srcPathStr, long sequenceId) throws IOException; // General accessors into the state of the store // TODO abstract some of this out into a metrics class @@ -265,8 +270,19 @@ public interface Store extends HeapSize, StoreConfigInformation, PropagatingConf */ long getFlushableSize(); + /** + * Returns the memstore snapshot size + * @return size of the memstore snapshot + */ + long getSnapshotSize(); + HColumnDescriptor getFamily(); + /** + * @return The maximum sequence id in all store files. + */ + long getMaxSequenceId(); + /** * @return The maximum memstoreTS in all store files. */ @@ -416,4 +432,14 @@ public interface Store extends HeapSize, StoreConfigInformation, PropagatingConf * linear formula. */ double getCompactionPressure(); + + /** + * Replaces the store files that the store has with the given files. Mainly used by + * secondary region replicas to keep up to date with + * the primary region files. + * @throws IOException + */ + void refreshStoreFiles(Collection newFiles) throws IOException; + + void bulkLoadHFile(StoreFileInfo fileInfo) throws IOException; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java index 7e087fcf6a6..36a7602694b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java @@ -33,7 +33,6 @@ import java.util.concurrent.atomic.AtomicBoolean; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -44,6 +43,7 @@ import org.apache.hadoop.hbase.HDFSBlocksDistribution; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.KeyValueUtil; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; import org.apache.hadoop.hbase.io.hfile.BlockType; @@ -51,7 +51,6 @@ import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.hfile.HFileContext; import org.apache.hadoop.hbase.io.hfile.HFileScanner; -import org.apache.hadoop.hbase.io.hfile.HFileWriterV2; import org.apache.hadoop.hbase.regionserver.compactions.Compactor; import org.apache.hadoop.hbase.util.BloomFilter; import org.apache.hadoop.hbase.util.BloomFilterFactory; @@ -412,7 +411,7 @@ public class StoreFile { } this.reader.setSequenceID(this.sequenceid); - b = metadataMap.get(HFileWriterV2.MAX_MEMSTORE_TS_KEY); + b = metadataMap.get(HFile.Writer.MAX_MEMSTORE_TS_KEY); if (b != null) { this.maxMemstoreTS = Bytes.toLong(b); } @@ -1308,7 +1307,7 @@ public class StoreFile { // columns, a file might be skipped if using row+col Bloom filter. // In order to ensure this file is included an additional check is // required looking only for a row bloom. - byte[] rowBloomKey = bloomFilter.createBloomKey(row, 0, row.length, + byte[] rowBloomKey = bloomFilter.createBloomKey(row, rowOffset, rowLen, null, 0, 0); if (keyIsAfterLast diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java index 8f494c00a93..22fd46e9779 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java @@ -422,8 +422,7 @@ public class StoreFileScanner implements KeyValueScanner { KeyValue seekKey = KeyValueUtil.createFirstOnRow(key.getRowArray(), key.getRowOffset(), key.getRowLength()); if (seekCount != null) seekCount.incrementAndGet(); - if (!hfs.seekBefore(seekKey.getBuffer(), seekKey.getKeyOffset(), - seekKey.getKeyLength())) { + if (!hfs.seekBefore(seekKey)) { close(); return false; } @@ -484,4 +483,9 @@ public class StoreFileScanner implements KeyValueScanner { } return true; } + + @Override + public Cell getNextIndexedKey() { + return hfs.getNextIndexedKey(); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlushContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlushContext.java index 0c2fe6fd19c..34ba1fa4637 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlushContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlushContext.java @@ -64,6 +64,22 @@ interface StoreFlushContext { */ boolean commit(MonitoredTask status) throws IOException; + /** + * Similar to commit, but called in secondary region replicas for replaying the + * flush cache from primary region. Adds the new files to the store, and drops the + * snapshot depending on dropMemstoreSnapshot argument. + * @param fileNames names of the flushed files + * @param dropMemstoreSnapshot whether to drop the prepared memstore snapshot + * @throws IOException + */ + void replayFlush(List fileNames, boolean dropMemstoreSnapshot) throws IOException; + + /** + * Abort the snapshot preparation. Drops the snapshot if any. + * @throws IOException + */ + void abort() throws IOException; + /** * Returns the newly committed files from the flush. Called only if commit returns true * @return a list of Paths for new files diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlusher.java index ca341646056..bcc0a904d3b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlusher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFlusher.java @@ -23,11 +23,11 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.regionserver.compactions.Compactor; @@ -109,10 +109,14 @@ abstract class StoreFlusher { Compactor.CellSink sink, long smallestReadPoint) throws IOException { int compactionKVMax = conf.getInt(HConstants.COMPACTION_KV_MAX, HConstants.COMPACTION_KV_MAX_DEFAULT); + + ScannerContext scannerContext = + ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build(); + List kvs = new ArrayList(); boolean hasMore; do { - hasMore = scanner.next(kvs, compactionKVMax); + hasMore = scanner.next(kvs, scannerContext); if (!kvs.isEmpty()) { for (Cell c : kvs) { // If we know that this KV is going to be included always, then let us diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java index cd8773e20bf..da6ea1cf744 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java @@ -30,7 +30,6 @@ import java.util.concurrent.locks.ReentrantLock; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.DoNotRetryIOException; @@ -38,10 +37,14 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.KeyValueUtil; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.IsolationLevel; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.filter.Filter; +import org.apache.hadoop.hbase.regionserver.ScanQueryMatcher.MatchCode; +import org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope; +import org.apache.hadoop.hbase.regionserver.ScannerContext.NextState; import org.apache.hadoop.hbase.regionserver.handler.ParallelSeekHandler; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; @@ -337,7 +340,7 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner } scanner.seek(seekKey); Cell c = scanner.peek(); - if (c != null ) { + if (c != null) { totalScannersSoughtBytes += CellUtil.estimatedSerializedSizeOf(c); } } @@ -441,31 +444,39 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner } } + @Override + public boolean next(List outResult) throws IOException { + return next(outResult, NoLimitScannerContext.getInstance()); + } + /** * Get the next row of values from this Store. * @param outResult - * @param limit + * @param scannerContext * @return true if there are more rows, false if scanner is done */ @Override - public boolean next(List outResult, int limit) throws IOException { + public boolean next(List outResult, ScannerContext scannerContext) throws IOException { lock.lock(); try { + if (scannerContext == null) { + throw new IllegalArgumentException("Scanner context cannot be null"); + } if (checkReseek()) { - return true; + return scannerContext.setScannerState(NextState.MORE_VALUES).hasMoreValues(); } // if the heap was left null, then the scanners had previously run out anyways, close and // return. if (this.heap == null) { close(); - return false; + return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); } Cell peeked = this.heap.peek(); if (peeked == null) { close(); - return false; + return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); } // only call setRow if the row changes; avoids confusing the query matcher @@ -473,12 +484,19 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner byte[] row = peeked.getRowArray(); int offset = peeked.getRowOffset(); short length = peeked.getRowLength(); - if (limit < 0 || matcher.row == null || !Bytes.equals(row, offset, length, matcher.row, - matcher.rowOffset, matcher.rowLength)) { + + // If no limits exists in the scope LimitScope.Between_Cells then we are sure we are changing + // rows. Else it is possible we are still traversing the same row so we must perform the row + // comparison. + if (!scannerContext.hasAnyLimit(LimitScope.BETWEEN_CELLS) || matcher.row == null || + !Bytes.equals(row, offset, length, matcher.row, matcher.rowOffset, matcher.rowLength)) { this.countPerRow = 0; matcher.setRow(row, offset, length); } + // Clear progress away unless invoker has indicated it should be kept. + if (!scannerContext.getKeepProgress()) scannerContext.clearProgress(); + Cell cell; // Only do a sanity-check if store and comparator are available. @@ -494,6 +512,7 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner prevCell = cell; ScanQueryMatcher.MatchCode qcode = matcher.match(cell); + qcode = optimize(qcode, cell); switch(qcode) { case INCLUDE: case INCLUDE_AND_SEEK_NEXT_ROW: @@ -510,7 +529,7 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner this.countPerRow > (storeLimit + storeOffset)) { // do what SEEK_NEXT_ROW does. if (!matcher.moreRowsMayExistAfter(cell)) { - return false; + return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); } seekToNextRow(cell); break LOOP; @@ -520,8 +539,15 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner // also update metric accordingly if (this.countPerRow > storeOffset) { outResult.add(cell); + + // Update local tracking information count++; totalBytesRead += CellUtil.estimatedSerializedSizeOf(cell); + + // Update the progress of the scanner context + scannerContext.incrementSizeProgress(CellUtil.estimatedHeapSizeOfWithoutTags(cell)); + scannerContext.incrementBatchProgress(1); + if (totalBytesRead > maxRowSize) { throw new RowTooBigException("Max row size allowed: " + maxRowSize + ", but the row is bigger than that."); @@ -530,7 +556,7 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner if (qcode == ScanQueryMatcher.MatchCode.INCLUDE_AND_SEEK_NEXT_ROW) { if (!matcher.moreRowsMayExistAfter(cell)) { - return false; + return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); } seekToNextRow(cell); } else if (qcode == ScanQueryMatcher.MatchCode.INCLUDE_AND_SEEK_NEXT_COL) { @@ -539,23 +565,26 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner this.heap.next(); } - if (limit > 0 && (count == limit)) { + if (scannerContext.checkBatchLimit(LimitScope.BETWEEN_CELLS)) { + break LOOP; + } + if (scannerContext.checkSizeLimit(LimitScope.BETWEEN_CELLS)) { break LOOP; } continue; case DONE: - return true; + return scannerContext.setScannerState(NextState.MORE_VALUES).hasMoreValues(); case DONE_SCAN: close(); - return false; + return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); case SEEK_NEXT_ROW: // This is just a relatively simple end of scan fix, to short-cut end // us if there is an endKey in the scan. if (!matcher.moreRowsMayExistAfter(cell)) { - return false; + return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); } seekToNextRow(cell); @@ -585,20 +614,48 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner } if (count > 0) { - return true; + return scannerContext.setScannerState(NextState.MORE_VALUES).hasMoreValues(); } // No more keys close(); - return false; + return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues(); } finally { lock.unlock(); } } - @Override - public boolean next(List outResult) throws IOException { - return next(outResult, -1); + /* + * See if we should actually SEEK or rather just SKIP to the next Cell. + * (see HBASE-13109) + */ + private ScanQueryMatcher.MatchCode optimize(ScanQueryMatcher.MatchCode qcode, Cell cell) { + Cell nextIndexedKey = getNextIndexedKey(); + if (nextIndexedKey == null || nextIndexedKey == HConstants.NO_NEXT_INDEXED_KEY || + store == null) { + return qcode; + } + switch(qcode) { + case INCLUDE_AND_SEEK_NEXT_COL: + case SEEK_NEXT_COL: + { + if (matcher.compareKeyForNextColumn(nextIndexedKey, cell) >= 0) { + return qcode == MatchCode.SEEK_NEXT_COL ? MatchCode.SKIP : MatchCode.INCLUDE; + } + break; + } + case INCLUDE_AND_SEEK_NEXT_ROW: + case SEEK_NEXT_ROW: + { + if (matcher.compareKeyForNextRow(nextIndexedKey, cell) >= 0) { + return qcode == MatchCode.SEEK_NEXT_ROW ? MatchCode.SKIP : MatchCode.INCLUDE; + } + break; + } + default: + break; + } + return qcode; } // Implementation of ChangedReadersObserver @@ -799,5 +856,10 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner public long getEstimatedNumberOfKvsScanned() { return this.kvsScanned; } + + @Override + public Cell getNextIndexedKey() { + return this.heap.getNextIndexedKey(); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StorefileRefresherChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StorefileRefresherChore.java index 49183914a8a..a2a0dcccc44 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StorefileRefresherChore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StorefileRefresherChore.java @@ -84,8 +84,8 @@ public class StorefileRefresherChore extends ScheduledChore { @Override protected void chore() { - for (HRegion r : regionServer.getOnlineRegionsLocalContext()) { - if (!r.writestate.isReadOnly()) { + for (Region r : regionServer.getOnlineRegionsLocalContext()) { + if (!r.isReadOnly()) { // skip checking for this region if it can accept writes continue; } @@ -98,7 +98,7 @@ public class StorefileRefresherChore extends ScheduledChore { lastRefreshTimes.put(encodedName, time); } try { - for (Store store : r.getStores().values()) { + for (Store store : r.getStores()) { // TODO: some stores might see new data from flush, while others do not which // MIGHT break atomic edits across column families. We can fix this with setting // mvcc read numbers that we know every store has seen @@ -110,12 +110,12 @@ public class StorefileRefresherChore extends ScheduledChore { // Store files have a TTL in the archive directory. If we fail to refresh for that long, we stop serving reads if (isRegionStale(encodedName, time)) { - r.setReadsEnabled(false); // stop serving reads + ((HRegion)r).setReadsEnabled(false); // stop serving reads } continue; } lastRefreshTimes.put(encodedName, time); - r.setReadsEnabled(true); // restart serving reads + ((HRegion)r).setReadsEnabled(true); // restart serving reads } // remove closed regions diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java index 78700407e46..f30a0a34ec9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java @@ -34,11 +34,12 @@ import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.compress.Compression; +import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo; -import org.apache.hadoop.hbase.io.hfile.HFileWriterV2; import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.InternalScanner; import org.apache.hadoop.hbase.regionserver.ScanType; +import org.apache.hadoop.hbase.regionserver.ScannerContext; import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.regionserver.StoreFile; import org.apache.hadoop.hbase.regionserver.StoreFileScanner; @@ -146,7 +147,7 @@ public abstract class Compactor { fd.maxMVCCReadpoint = Math.max(fd.maxMVCCReadpoint, r.getSequenceID()); } else { - tmp = fileInfo.get(HFileWriterV2.MAX_MEMSTORE_TS_KEY); + tmp = fileInfo.get(HFile.Writer.MAX_MEMSTORE_TS_KEY); if (tmp != null) { fd.maxMVCCReadpoint = Math.max(fd.maxMVCCReadpoint, Bytes.toLong(tmp)); } @@ -261,10 +262,13 @@ public abstract class Compactor { store.getRegionInfo().getRegionNameAsString() + "#" + store.getFamily().getNameAsString(); long now = 0; boolean hasMore; + ScannerContext scannerContext = + ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build(); + throughputController.start(compactionName); try { do { - hasMore = scanner.next(cells, compactionKVMax); + hasMore = scanner.next(cells, scannerContext); if (LOG.isDebugEnabled()) { now = EnvironmentEdgeManager.currentTime(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseRegionHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseRegionHandler.java index dbc45e7f8d9..26e824de6ae 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseRegionHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseRegionHandler.java @@ -90,7 +90,7 @@ public class CloseRegionHandler extends EventHandler { LOG.debug("Processing close of " + name); String encodedRegionName = regionInfo.getEncodedName(); // Check that this region is being served here - HRegion region = this.rsServices.getFromOnlineRegions(encodedRegionName); + HRegion region = (HRegion)rsServices.getFromOnlineRegions(encodedRegionName); if (region == null) { LOG.warn("Received CLOSE for region " + name + " but currently not serving - ignoring"); // TODO: do better than a simple warning @@ -119,7 +119,7 @@ public class CloseRegionHandler extends EventHandler { rsServices.reportRegionStateTransition(TransitionCode.CLOSED, regionInfo); // Done! Region is closed on this RS - LOG.debug("Closed " + region.getRegionNameAsString()); + LOG.debug("Closed " + region.getRegionInfo().getRegionNameAsString()); } finally { this.rsServices.getRegionsInTransitionInRS(). remove(this.regionInfo.getEncodedNameAsBytes(), Boolean.FALSE); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/FinishRegionRecoveringHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/FinishRegionRecoveringHandler.java new file mode 100644 index 00000000000..19838d3648c --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/FinishRegionRecoveringHandler.java @@ -0,0 +1,56 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.regionserver.handler; + +import java.io.IOException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.executor.EventHandler; +import org.apache.hadoop.hbase.executor.EventType; +import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.regionserver.Region; +import org.apache.hadoop.hbase.regionserver.RegionServerServices; + +public class FinishRegionRecoveringHandler extends EventHandler { + private static final Log LOG = LogFactory.getLog(FinishRegionRecoveringHandler.class); + + protected final RegionServerServices rss; + protected final String regionName; + protected final String path; + + public FinishRegionRecoveringHandler(RegionServerServices rss, + String regionName, String path) { + // we are using the open region handlers, since this operation is in the region open lifecycle + super(rss, EventType.M_RS_OPEN_REGION); + this.rss = rss; + this.regionName = regionName; + this.path = path; + } + + @Override + public void process() throws IOException { + Region region = this.rss.getRecoveringRegions().remove(regionName); + if (region != null) { + ((HRegion)region).setRecovering(false); + LOG.info(path + " deleted; " + regionName + " recovered."); + } + } + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java new file mode 100644 index 00000000000..e0921b09256 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java @@ -0,0 +1,187 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.regionserver.handler; + +import java.io.IOException; +import java.io.InterruptedIOException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.TableNotFoundException; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.ClusterConnection; +import org.apache.hadoop.hbase.client.FlushRegionCallable; +import org.apache.hadoop.hbase.client.RegionReplicaUtil; +import org.apache.hadoop.hbase.client.RpcRetryingCallerFactory; +import org.apache.hadoop.hbase.executor.EventHandler; +import org.apache.hadoop.hbase.executor.EventType; +import org.apache.hadoop.hbase.ipc.RpcControllerFactory; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionResponse; +import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.util.RetryCounter; +import org.apache.hadoop.hbase.util.RetryCounterFactory; +import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil; + +/** + * HBASE-11580: With the async wal approach (HBASE-11568), the edits are not persisted to wal in + * secondary region replicas. This means that a secondary region replica can serve some edits from + * it's memstore that that is still not flushed from primary. We do not want to allow secondary + * region's seqId to go back in time, when this secondary region is opened elsewhere after a + * crash or region move. We will trigger a flush cache in the primary region replica and wait + * for observing a complete flush cycle before marking the region readsEnabled. This handler does + * the flushing of the primary region replica and ensures that regular region opening is not + * blocked while the secondary replica is blocked on flush. + */ +@InterfaceAudience.Private +public class RegionReplicaFlushHandler extends EventHandler { + + private static final Log LOG = LogFactory.getLog(RegionReplicaFlushHandler.class); + + private final ClusterConnection connection; + private final RpcRetryingCallerFactory rpcRetryingCallerFactory; + private final RpcControllerFactory rpcControllerFactory; + private final int operationTimeout; + private final HRegion region; + + public RegionReplicaFlushHandler(Server server, ClusterConnection connection, + RpcRetryingCallerFactory rpcRetryingCallerFactory, RpcControllerFactory rpcControllerFactory, + int operationTimeout, HRegion region) { + super(server, EventType.RS_REGION_REPLICA_FLUSH); + this.connection = connection; + this.rpcRetryingCallerFactory = rpcRetryingCallerFactory; + this.rpcControllerFactory = rpcControllerFactory; + this.operationTimeout = operationTimeout; + this.region = region; + } + + @Override + public void process() throws IOException { + triggerFlushInPrimaryRegion(region); + } + + @Override + protected void handleException(Throwable t) { + super.handleException(t); + + if (t instanceof InterruptedIOException || t instanceof InterruptedException) { + // ignore + } else if (t instanceof RuntimeException) { + server.abort("ServerAborting because a runtime exception was thrown", t); + } else { + // something fishy since we cannot flush the primary region until all retries (retries from + // rpc times 35 trigger). We cannot close the region since there is no such mechanism to + // close a region without master triggering it. We just abort the server for now. + server.abort("ServerAborting because an exception was thrown", t); + } + } + + private int getRetriesCount(Configuration conf) { + int numRetries = conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, + HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER); + if (numRetries > 10) { + int mult = conf.getInt("hbase.client.serverside.retries.multiplier", 10); + numRetries = numRetries / mult; // reset if HRS has multiplied this already + } + return numRetries; + } + + void triggerFlushInPrimaryRegion(final HRegion region) throws IOException, RuntimeException { + long pause = connection.getConfiguration().getLong(HConstants.HBASE_CLIENT_PAUSE, + HConstants.DEFAULT_HBASE_CLIENT_PAUSE); + + int maxAttempts = getRetriesCount(connection.getConfiguration()); + RetryCounter counter = new RetryCounterFactory(maxAttempts, (int)pause).create(); + + if (LOG.isDebugEnabled()) { + LOG.debug("Attempting to do an RPC to the primary region replica " + ServerRegionReplicaUtil + .getRegionInfoForDefaultReplica(region.getRegionInfo()).getEncodedName() + " of region " + + region.getRegionInfo().getEncodedName() + " to trigger a flush"); + } + while (!region.isClosing() && !region.isClosed() + && !server.isAborted() && !server.isStopped()) { + FlushRegionCallable flushCallable = new FlushRegionCallable( + connection, rpcControllerFactory, + RegionReplicaUtil.getRegionInfoForDefaultReplica(region.getRegionInfo()), true); + + // TODO: flushRegion() is a blocking call waiting for the flush to complete. Ideally we + // do not have to wait for the whole flush here, just initiate it. + FlushRegionResponse response = null; + try { + response = rpcRetryingCallerFactory.newCaller() + .callWithRetries(flushCallable, this.operationTimeout); + } catch (IOException ex) { + if (ex instanceof TableNotFoundException + || connection.isTableDisabled(region.getRegionInfo().getTable())) { + return; + } + throw ex; + } + + if (response.getFlushed()) { + // then we have to wait for seeing the flush entry. All reads will be rejected until we see + // a complete flush cycle or replay a region open event + if (LOG.isDebugEnabled()) { + LOG.debug("Successfully triggered a flush of primary region replica " + + ServerRegionReplicaUtil + .getRegionInfoForDefaultReplica(region.getRegionInfo()).getEncodedName() + + " of region " + region.getRegionInfo().getEncodedName() + + " Now waiting and blocking reads until observing a full flush cycle"); + } + break; + } else { + if (response.hasWroteFlushWalMarker()) { + if(response.getWroteFlushWalMarker()) { + if (LOG.isDebugEnabled()) { + LOG.debug("Successfully triggered an empty flush marker(memstore empty) of primary " + + "region replica " + ServerRegionReplicaUtil + .getRegionInfoForDefaultReplica(region.getRegionInfo()).getEncodedName() + + " of region " + region.getRegionInfo().getEncodedName() + " Now waiting and " + + "blocking reads until observing a flush marker"); + } + break; + } else { + // somehow we were not able to get the primary to write the flush request. It may be + // closing or already flushing. Retry flush again after some sleep. + if (!counter.shouldRetry()) { + throw new IOException("Cannot cause primary to flush or drop a wal marker after " + + "retries. Failing opening of this region replica " + + region.getRegionInfo().getEncodedName()); + } + } + } else { + // nothing to do. Are we dealing with an old server? + LOG.warn("Was not able to trigger a flush from primary region due to old server version? " + + "Continuing to open the secondary region replica: " + + region.getRegionInfo().getEncodedName()); + region.setReadsEnabled(true); + break; + } + } + try { + counter.sleepUntilNextRetry(); + } catch (InterruptedException e) { + throw new InterruptedIOException(e.getMessage()); + } + } + } + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/FlushSnapshotSubprocedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/FlushSnapshotSubprocedure.java index 611e432ddc8..998c1fb3d7d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/FlushSnapshotSubprocedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/FlushSnapshotSubprocedure.java @@ -30,6 +30,8 @@ import org.apache.hadoop.hbase.procedure.ProcedureMember; import org.apache.hadoop.hbase.procedure.Subprocedure; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.regionserver.Region; +import org.apache.hadoop.hbase.regionserver.Region.Operation; import org.apache.hadoop.hbase.regionserver.snapshot.RegionServerSnapshotManager.SnapshotSubprocedurePool; import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils; @@ -45,14 +47,14 @@ import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils; public class FlushSnapshotSubprocedure extends Subprocedure { private static final Log LOG = LogFactory.getLog(FlushSnapshotSubprocedure.class); - private final List regions; + private final List regions; private final SnapshotDescription snapshot; private final SnapshotSubprocedurePool taskManager; private boolean snapshotSkipFlush = false; public FlushSnapshotSubprocedure(ProcedureMember member, ForeignExceptionDispatcher errorListener, long wakeFrequency, long timeout, - List regions, SnapshotDescription snapshot, + List regions, SnapshotDescription snapshot, SnapshotSubprocedurePool taskManager) { super(member, snapshot.getName(), errorListener, wakeFrequency, timeout); this.snapshot = snapshot; @@ -68,8 +70,8 @@ public class FlushSnapshotSubprocedure extends Subprocedure { * Callable for adding files to snapshot manifest working dir. Ready for multithreading. */ private class RegionSnapshotTask implements Callable { - HRegion region; - RegionSnapshotTask(HRegion region) { + Region region; + RegionSnapshotTask(Region region) { this.region = region; } @@ -94,9 +96,9 @@ public class FlushSnapshotSubprocedure extends Subprocedure { LOG.debug("take snapshot without flush memstore first"); } else { LOG.debug("Flush Snapshotting region " + region.toString() + " started..."); - region.flushcache(); + region.flush(true); } - region.addRegionToSnapshot(snapshot, monitor); + ((HRegion)region).addRegionToSnapshot(snapshot, monitor); if (snapshotSkipFlush) { LOG.debug("... SkipFlush Snapshotting region " + region.toString() + " completed."); } else { @@ -126,7 +128,7 @@ public class FlushSnapshotSubprocedure extends Subprocedure { } // Add all hfiles already existing in region. - for (HRegion region : regions) { + for (Region region : regions) { // submit one task per region for parallelize by region. taskManager.submitTask(new RegionSnapshotTask(region)); monitor.rethrowException(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java index 93d836d761d..021c16f0c5b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java @@ -49,8 +49,8 @@ import org.apache.hadoop.hbase.procedure.Subprocedure; import org.apache.hadoop.hbase.procedure.SubprocedureFactory; import org.apache.hadoop.hbase.procedure.ZKProcedureMemberRpcs; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; -import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.zookeeper.KeeperException; @@ -160,7 +160,7 @@ public class RegionServerSnapshotManager extends RegionServerProcedureManager { // check to see if this server is hosting any regions for the snapshots // check to see if we have regions for the snapshot - List involvedRegions; + List involvedRegions; try { involvedRegions = getRegionsToSnapshot(snapshot); } catch (IOException e1) { @@ -220,12 +220,12 @@ public class RegionServerSnapshotManager extends RegionServerProcedureManager { * the given snapshot. * @throws IOException */ - private List getRegionsToSnapshot(SnapshotDescription snapshot) throws IOException { - List onlineRegions = rss.getOnlineRegions(TableName.valueOf(snapshot.getTable())); - Iterator iterator = onlineRegions.iterator(); + private List getRegionsToSnapshot(SnapshotDescription snapshot) throws IOException { + List onlineRegions = rss.getOnlineRegions(TableName.valueOf(snapshot.getTable())); + Iterator iterator = onlineRegions.iterator(); // remove the non-default regions while (iterator.hasNext()) { - HRegion r = iterator.next(); + Region r = iterator.next(); if (!RegionReplicaUtil.isDefaultReplica(r.getRegionInfo())) { iterator.remove(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java index 147a13de59d..1ea9d4f43d4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java @@ -56,6 +56,7 @@ class FSWALEntry extends Entry { private final transient HTableDescriptor htd; private final transient HRegionInfo hri; private final transient List memstoreCells; + private final Set familyNames; FSWALEntry(final long sequence, final WALKey key, final WALEdit edit, final AtomicLong referenceToRegionSequenceId, final boolean inMemstore, @@ -67,6 +68,23 @@ class FSWALEntry extends Entry { this.hri = hri; this.sequence = sequence; this.memstoreCells = memstoreCells; + if (inMemstore) { + // construct familyNames here to reduce the work of log sinker. + ArrayList cells = this.getEdit().getCells(); + if (CollectionUtils.isEmpty(cells)) { + this.familyNames = Collections. emptySet(); + } else { + Set familySet = Sets.newTreeSet(Bytes.BYTES_COMPARATOR); + for (Cell cell : cells) { + if (!CellUtil.matchingFamily(cell, WALEdit.METAFAMILY)) { + familySet.add(CellUtil.cloneFamily(cell)); + } + } + this.familyNames = Collections.unmodifiableSet(familySet); + } + } else { + this.familyNames = Collections. emptySet(); + } } public String toString() { @@ -118,16 +136,6 @@ class FSWALEntry extends Entry { * @return the family names which are effected by this edit. */ Set getFamilyNames() { - ArrayList cells = this.getEdit().getCells(); - if (CollectionUtils.isEmpty(cells)) { - return Collections.emptySet(); - } - Set familySet = Sets.newTreeSet(Bytes.BYTES_COMPARATOR); - for (Cell cell : cells) { - if (!CellUtil.matchingFamily(cell, WALEdit.METAFAMILY)) { - familySet.add(CellUtil.cloneFamily(cell)); - } - } - return familySet; + return familyNames; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java index 285f69b6a40..af8b5bbcdb8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java @@ -91,6 +91,9 @@ public class ProtobufLogReader extends ReaderBase { static { writerClsNames.add(ProtobufLogWriter.class.getSimpleName()); } + + // cell codec classname + private String codecClsName = null; enum WALHdrResult { EOF, // stream is at EOF when method starts @@ -153,9 +156,16 @@ public class ProtobufLogReader extends ReaderBase { /* * Returns names of the accepted writer classes */ - protected List getWriterClsNames() { + public List getWriterClsNames() { return writerClsNames; } + + /* + * Returns the cell codec classname + */ + public String getCodecClsName() { + return codecClsName; + } protected WALHdrContext readHeader(Builder builder, FSDataInputStream stream) throws IOException { @@ -207,6 +217,9 @@ public class ProtobufLogReader extends ReaderBase { LOG.trace("After reading the trailer: walEditsStopOffset: " + this.walEditsStopOffset + ", fileLength: " + this.fileLength + ", " + "trailerPresent: " + trailerPresent); } + + codecClsName = hdrCtxt.getCellCodecClsName(); + return hdrCtxt.getCellCodecClsName(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ReplayHLogKey.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ReplayHLogKey.java new file mode 100644 index 00000000000..55c057b470b --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ReplayHLogKey.java @@ -0,0 +1,53 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.regionserver.wal; + +import java.io.IOException; +import java.util.List; +import java.util.UUID; + +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; + +/** + * An HLogKey specific to WalEdits coming from replay. + */ +@InterfaceAudience.Private +public class ReplayHLogKey extends HLogKey { + + public ReplayHLogKey(final byte [] encodedRegionName, final TableName tablename, + final long now, List clusterIds, long nonceGroup, long nonce) { + super(encodedRegionName, tablename, now, clusterIds, nonceGroup, nonce); + } + + public ReplayHLogKey(final byte [] encodedRegionName, final TableName tablename, + long logSeqNum, final long now, List clusterIds, long nonceGroup, long nonce) { + super(encodedRegionName, tablename, logSeqNum, now, clusterIds, nonceGroup, nonce); + } + + /** + * Returns the original sequence id + * @return long the new assigned sequence number + * @throws InterruptedException + */ + @Override + public long getSequenceId() throws IOException { + return this.getOrigLogSeqNum(); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureProtobufLogReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureProtobufLogReader.java index 041dfe2683a..0d052d4c068 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureProtobufLogReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureProtobufLogReader.java @@ -51,7 +51,7 @@ public class SecureProtobufLogReader extends ProtobufLogReader { } @Override - protected List getWriterClsNames() { + public List getWriterClsNames() { return writerClsNames; } @@ -78,7 +78,7 @@ public class SecureProtobufLogReader extends ProtobufLogReader { // First try the WAL key, if one is configured if (walKeyName != null) { try { - key = EncryptionUtil.unwrapKey(conf, walKeyName, keyBytes); + key = EncryptionUtil.unwrapWALKey(conf, walKeyName, keyBytes); } catch (KeyException e) { if (LOG.isDebugEnabled()) { LOG.debug("Unable to unwrap key with WAL key '" + walKeyName + "'"); @@ -91,7 +91,7 @@ public class SecureProtobufLogReader extends ProtobufLogReader { User.getCurrent().getShortName()); try { // Then, try the cluster master key - key = EncryptionUtil.unwrapKey(conf, masterKeyName, keyBytes); + key = EncryptionUtil.unwrapWALKey(conf, masterKeyName, keyBytes); } catch (KeyException e) { // If the current master key fails to unwrap, try the alternate, if // one is configured @@ -102,7 +102,7 @@ public class SecureProtobufLogReader extends ProtobufLogReader { conf.get(HConstants.CRYPTO_MASTERKEY_ALTERNATE_NAME_CONF_KEY); if (alternateKeyName != null) { try { - key = EncryptionUtil.unwrapKey(conf, alternateKeyName, keyBytes); + key = EncryptionUtil.unwrapWALKey(conf, alternateKeyName, keyBytes); } catch (KeyException ex) { throw new IOException(ex); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureProtobufLogWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureProtobufLogWriter.java index e850485bb95..c352770a1c8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureProtobufLogWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureProtobufLogWriter.java @@ -43,8 +43,6 @@ import org.apache.hadoop.hbase.security.User; public class SecureProtobufLogWriter extends ProtobufLogWriter { private static final Log LOG = LogFactory.getLog(SecureProtobufLogWriter.class); - private static final String DEFAULT_CIPHER = "AES"; - private Encryptor encryptor = null; @Override @@ -56,7 +54,8 @@ public class SecureProtobufLogWriter extends ProtobufLogWriter { EncryptionTest.testCipherProvider(conf); // Get an instance of our cipher - final String cipherName = conf.get(HConstants.CRYPTO_WAL_ALGORITHM_CONF_KEY, DEFAULT_CIPHER); + final String cipherName = + conf.get(HConstants.CRYPTO_WAL_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); Cipher cipher = Encryption.getCipher(conf, cipherName); if (cipher == null) { throw new RuntimeException("Cipher '" + cipherName + "' is not available"); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java index 39d05362f71..5d0573fdbd4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java @@ -126,6 +126,15 @@ public class WALEdit implements Writable, HeapSize { return CellUtil.matchingFamily(cell, METAFAMILY); } + public boolean isMetaEdit() { + for (Cell cell: cells) { + if (!isMetaEditFamily(cell)) { + return false; + } + } + return true; + } + /** * @return True when current WALEdit is created by log replay. Replication skips WALEdits from * replay. @@ -345,7 +354,7 @@ public class WALEdit implements Writable, HeapSize { bulkLoadDescriptor.toByteArray()); return new WALEdit().add(kv); } - + /** * Deserialized and returns a BulkLoadDescriptor from the passed in Cell * @param cell the key value @@ -357,4 +366,4 @@ public class WALEdit implements Writable, HeapSize { } return null; } -} \ No newline at end of file +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BaseWALEntryFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BaseWALEntryFilter.java new file mode 100644 index 00000000000..42b3b7bc39b --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/BaseWALEntryFilter.java @@ -0,0 +1,29 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.replication; + +import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceAudience; + +/** + * A base class WALEntryFilter implementations. Protects against changes in the interface signature. + */ +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.REPLICATION) +public abstract class BaseWALEntryFilter implements WALEntryFilter { +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationEndpoint.java index c3ec976ac27..978e85326fc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationEndpoint.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.hadoop.hbase.TableDescriptors; import org.apache.hadoop.hbase.wal.WAL.Entry; import org.apache.hadoop.hbase.replication.regionserver.MetricsSource; @@ -51,6 +52,7 @@ public interface ReplicationEndpoint extends Service { class Context { private final Configuration conf; private final FileSystem fs; + private final TableDescriptors tableDescriptors; private final ReplicationPeerConfig peerConfig; private final ReplicationPeer replicationPeer; private final String peerId; @@ -65,7 +67,8 @@ public interface ReplicationEndpoint extends Service { final String peerId, final UUID clusterId, final ReplicationPeer replicationPeer, - final MetricsSource metrics) { + final MetricsSource metrics, + final TableDescriptors tableDescriptors) { this.peerConfig = peerConfig; this.conf = conf; this.fs = fs; @@ -73,6 +76,7 @@ public interface ReplicationEndpoint extends Service { this.peerId = peerId; this.replicationPeer = replicationPeer; this.metrics = metrics; + this.tableDescriptors = tableDescriptors; } public Configuration getConfiguration() { return conf; @@ -95,6 +99,9 @@ public interface ReplicationEndpoint extends Service { public MetricsSource getMetrics() { return metrics; } + public TableDescriptors getTableDescriptors() { + return tableDescriptors; + } } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java index 397044d6f7f..1a53c2475eb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java @@ -30,8 +30,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableNotFoundException; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.HConnection; -import org.apache.hadoop.hbase.client.HConnectionManager; import org.apache.hadoop.hbase.protobuf.ReplicationProtbufUtil; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService.BlockingInterface; import org.apache.hadoop.hbase.wal.WAL.Entry; @@ -76,13 +76,13 @@ public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoi super.init(context); this.conf = HBaseConfiguration.create(ctx.getConfiguration()); decorateConf(); - this.maxRetriesMultiplier = this.conf.getInt("replication.source.maxretriesmultiplier", 10); + this.maxRetriesMultiplier = this.conf.getInt("replication.source.maxretriesmultiplier", 300); this.socketTimeoutMultiplier = this.conf.getInt("replication.source.socketTimeoutMultiplier", - maxRetriesMultiplier * maxRetriesMultiplier); + maxRetriesMultiplier); // TODO: This connection is replication specific or we should make it particular to // replication and make replication specific settings such as compression or codec to use // passing Cells. - this.conn = HConnectionManager.createConnection(this.conf); + this.conn = (HConnection) ConnectionFactory.createConnection(this.conf); this.sleepForRetries = this.conf.getLong("replication.source.sleepforretries", 1000); this.metrics = context.getMetrics(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSink.java index 0c9d0169124..37dc1dd4e4e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSink.java @@ -71,4 +71,21 @@ public class MetricsSink { mss.incrAppliedOps(batchSize); } + /** + * Get the Age of Last Applied Op + * @return ageOfLastAppliedOp + */ + public long getAgeOfLastAppliedOp() { + return mss.getLastAppliedOpAge(); + } + + /** + * Get the TimeStampOfLastAppliedOp. If no replication Op applied yet, the value is the timestamp + * at which hbase instance starts + * @return timeStampsOfLastAppliedOp; + */ + public long getTimeStampOfLastAppliedOp() { + return this.lastTimestampForAge; + } + } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java index a734b9ce07f..04c3d2d4b8d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java @@ -36,6 +36,7 @@ public class MetricsSource { private long lastTimestamp = 0; private int lastQueueSize = 0; + private String id; private final MetricsReplicationSourceSource singleSourceSource; private final MetricsReplicationSourceSource globalSourceSource; @@ -46,6 +47,7 @@ public class MetricsSource { * @param id Name of the source this class is monitoring */ public MetricsSource(String id) { + this.id = id; singleSourceSource = CompatibilitySingletonFactory.getInstance(MetricsReplicationSourceFactory.class) .getSource(id); @@ -105,7 +107,7 @@ public class MetricsSource { * * @param delta the number filtered. */ - private void incrLogEditsFiltered(long delta) { + public void incrLogEditsFiltered(long delta) { singleSourceSource.incrLogEditsFiltered(delta); globalSourceSource.incrLogEditsFiltered(delta); } @@ -143,4 +145,36 @@ public class MetricsSource { globalSourceSource.decrSizeOfLogQueue(lastQueueSize); lastQueueSize = 0; } + + /** + * Get AgeOfLastShippedOp + * @return AgeOfLastShippedOp + */ + public Long getAgeOfLastShippedOp() { + return singleSourceSource.getLastShippedAge(); + } + + /** + * Get the sizeOfLogQueue + * @return sizeOfLogQueue + */ + public int getSizeOfLogQueue() { + return this.lastQueueSize; + } + + /** + * Get the timeStampsOfLastShippedOp + * @return lastTimestampForAge + */ + public long getTimeStampOfLastShippedOp() { + return lastTimestamp; + } + + /** + * Get the slave peer ID + * @return peerID + */ + public String getPeerID() { + return id; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java index c3d4e5a5cc6..c75f81f1b77 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java @@ -38,20 +38,20 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.CellScanner; -import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; +import org.apache.hadoop.hbase.TableDescriptors; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.RegionAdminServiceCallable; import org.apache.hadoop.hbase.client.ClusterConnection; -import org.apache.hadoop.hbase.client.HConnectionManager; import org.apache.hadoop.hbase.client.RegionReplicaUtil; -import org.apache.hadoop.hbase.client.RetriesExhaustedException; import org.apache.hadoop.hbase.client.RetryingCallable; import org.apache.hadoop.hbase.client.RpcRetryingCallerFactory; import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController; @@ -60,13 +60,14 @@ import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.ReplicationProtbufUtil; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse; -import org.apache.hadoop.hbase.regionserver.wal.WALCellCodec; import org.apache.hadoop.hbase.wal.WAL.Entry; import org.apache.hadoop.hbase.wal.WALSplitter.EntryBuffers; import org.apache.hadoop.hbase.wal.WALSplitter.OutputSink; import org.apache.hadoop.hbase.wal.WALSplitter.PipelineController; import org.apache.hadoop.hbase.wal.WALSplitter.RegionEntryBuffer; import org.apache.hadoop.hbase.wal.WALSplitter.SinkWriter; +import org.apache.hadoop.hbase.replication.BaseWALEntryFilter; +import org.apache.hadoop.hbase.replication.ChainWALEntryFilter; import org.apache.hadoop.hbase.replication.HBaseReplicationEndpoint; import org.apache.hadoop.hbase.replication.WALEntryFilter; import org.apache.hadoop.hbase.util.Bytes; @@ -76,11 +77,12 @@ import org.apache.hadoop.util.StringUtils; import com.google.common.cache.Cache; import com.google.common.cache.CacheBuilder; +import com.google.common.collect.Lists; import com.google.protobuf.ServiceException; /** - * A {@link org.apache.hadoop.hbase.replication.ReplicationEndpoint} endpoint - * which receives the WAL edits from the WAL, and sends the edits to replicas + * A {@link org.apache.hadoop.hbase.replication.ReplicationEndpoint} endpoint + * which receives the WAL edits from the WAL, and sends the edits to replicas * of regions. */ @InterfaceAudience.Private @@ -88,8 +90,13 @@ public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint { private static final Log LOG = LogFactory.getLog(RegionReplicaReplicationEndpoint.class); + // Can be configured differently than hbase.client.retries.number + private static String CLIENT_RETRIES_NUMBER + = "hbase.region.replica.replication.client.retries.number"; + private Configuration conf; private ClusterConnection connection; + private TableDescriptors tableDescriptors; // Reuse WALSplitter constructs as a WAL pipe private PipelineController controller; @@ -103,15 +110,64 @@ public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint { private ExecutorService pool; + /** + * Skips the entries which has original seqId. Only entries persisted via distributed log replay + * have their original seq Id fields set. + */ + private class SkipReplayedEditsFilter extends BaseWALEntryFilter { + @Override + public Entry filter(Entry entry) { + // if orig seq id is set, skip replaying the entry + if (entry.getKey().getOrigLogSeqNum() > 0) { + return null; + } + return entry; + } + } + + @Override + public WALEntryFilter getWALEntryfilter() { + WALEntryFilter superFilter = super.getWALEntryfilter(); + WALEntryFilter skipReplayedEditsFilter = getSkipReplayedEditsFilter(); + + if (superFilter == null) { + return skipReplayedEditsFilter; + } + + if (skipReplayedEditsFilter == null) { + return superFilter; + } + + ArrayList filters = Lists.newArrayList(); + filters.add(superFilter); + filters.add(skipReplayedEditsFilter); + return new ChainWALEntryFilter(filters); + } + + protected WALEntryFilter getSkipReplayedEditsFilter() { + return new SkipReplayedEditsFilter(); + } + @Override public void init(Context context) throws IOException { super.init(context); this.conf = HBaseConfiguration.create(context.getConfiguration()); + this.tableDescriptors = context.getTableDescriptors(); - String codecClassName = conf - .get(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, WALCellCodec.class.getName()); - conf.set(HConstants.RPC_CODEC_CONF_KEY, codecClassName); + // HRS multiplies client retries by 10 globally for meta operations, but we do not want this. + // We are resetting it here because we want default number of retries (35) rather than 10 times + // that which makes very long retries for disabled tables etc. + int defaultNumRetries = conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, + HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER); + if (defaultNumRetries > 10) { + int mult = conf.getInt("hbase.client.serverside.retries.multiplier", 10); + defaultNumRetries = defaultNumRetries / mult; // reset if HRS has multiplied this already + } + + conf.setInt("hbase.client.serverside.retries.multiplier", 1); + int numRetries = conf.getInt(CLIENT_RETRIES_NUMBER, defaultNumRetries); + conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, numRetries); this.numWriterThreads = this.conf.getInt( "hbase.region.replica.replication.writer.threads", 3); @@ -128,10 +184,10 @@ public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint { @Override protected void doStart() { try { - connection = (ClusterConnection) HConnectionManager.createConnection(ctx.getConfiguration()); + connection = (ClusterConnection) ConnectionFactory.createConnection(this.conf); this.pool = getDefaultThreadPool(conf); - outputSink = new RegionReplicaOutputSink(controller, entryBuffers, connection, pool, - numWriterThreads, operationTimeout); + outputSink = new RegionReplicaOutputSink(controller, tableDescriptors, entryBuffers, + connection, pool, numWriterThreads, operationTimeout); outputSink.startWriterThreads(); super.doStart(); } catch (IOException ex) { @@ -196,7 +252,7 @@ public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint { keepAliveTime, TimeUnit.SECONDS, workQueue, - Threads.newDaemonThreadFactory(this.getClass().toString() + "-rpc-shared-")); + Threads.newDaemonThreadFactory(this.getClass().getSimpleName() + "-rpc-shared-")); tpe.allowCoreThreadTimeOut(true); return tpe; } @@ -232,6 +288,8 @@ public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint { entryBuffers.appendEntry(entry); } outputSink.flush(); // make sure everything is flushed + ctx.getMetrics().incrLogEditsFiltered( + outputSink.getSkippedEditsCounter().getAndSet(0)); return true; } catch (InterruptedException e) { Thread.currentThread().interrupt(); @@ -257,12 +315,28 @@ public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint { } static class RegionReplicaOutputSink extends OutputSink { - private RegionReplicaSinkWriter sinkWriter; + private final RegionReplicaSinkWriter sinkWriter; + private final TableDescriptors tableDescriptors; + private final Cache memstoreReplicationEnabled; - public RegionReplicaOutputSink(PipelineController controller, EntryBuffers entryBuffers, - ClusterConnection connection, ExecutorService pool, int numWriters, int operationTimeout) { + public RegionReplicaOutputSink(PipelineController controller, TableDescriptors tableDescriptors, + EntryBuffers entryBuffers, ClusterConnection connection, ExecutorService pool, + int numWriters, int operationTimeout) { super(controller, entryBuffers, numWriters); this.sinkWriter = new RegionReplicaSinkWriter(this, connection, pool, operationTimeout); + this.tableDescriptors = tableDescriptors; + + // A cache for the table "memstore replication enabled" flag. + // It has a default expiry of 5 sec. This means that if the table is altered + // with a different flag value, we might miss to replicate for that amount of + // time. But this cache avoid the slow lookup and parsing of the TableDescriptor. + int memstoreReplicationEnabledCacheExpiryMs = connection.getConfiguration() + .getInt("hbase.region.replica.replication.cache.memstoreReplicationEnabled.expiryMs", 5000); + this.memstoreReplicationEnabled = CacheBuilder.newBuilder() + .expireAfterWrite(memstoreReplicationEnabledCacheExpiryMs, TimeUnit.MILLISECONDS) + .initialCapacity(10) + .maximumSize(1000) + .build(); } @Override @@ -273,6 +347,12 @@ public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint { return; } + // meta edits (e.g. flush) are always replicated. + // data edits (e.g. put) are replicated if the table requires them. + if (!requiresReplication(buffer.getTableName(), entries)) { + return; + } + sinkWriter.append(buffer.getTableName(), buffer.getEncodedRegionName(), entries.get(0).getEdit().getCells().get(0).getRow(), entries); } @@ -287,7 +367,7 @@ public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint { @Override public List finishWritingAndClose() throws IOException { - finishWriting(); + finishWriting(true); return null; } @@ -304,6 +384,44 @@ public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint { AtomicLong getSkippedEditsCounter() { return skippedEdits; } + + /** + * returns true if the specified entry must be replicated. + * We should always replicate meta operations (e.g. flush) + * and use the user HTD flag to decide whether or not replicate the memstore. + */ + private boolean requiresReplication(final TableName tableName, final List entries) + throws IOException { + // unit-tests may not the TableDescriptors, bypass the check and always replicate + if (tableDescriptors == null) return true; + + Boolean requiresReplication = memstoreReplicationEnabled.getIfPresent(tableName); + if (requiresReplication == null) { + // check if the table requires memstore replication + // some unit-test drop the table, so we should do a bypass check and always replicate. + HTableDescriptor htd = tableDescriptors.get(tableName); + requiresReplication = htd == null || htd.hasRegionMemstoreReplication(); + memstoreReplicationEnabled.put(tableName, requiresReplication); + } + + // if memstore replication is not required, check the entries. + // meta edits (e.g. flush) must be always replicated. + if (!requiresReplication) { + int skipEdits = 0; + java.util.Iterator it = entries.iterator(); + while (it.hasNext()) { + Entry entry = it.next(); + if (entry.getEdit().isMetaEdit()) { + requiresReplication = true; + } else { + it.remove(); + skipEdits++; + } + } + skippedEdits.addAndGet(skipEdits); + } + return requiresReplication; + } } static class RegionReplicaSinkWriter extends SinkWriter { @@ -341,24 +459,68 @@ public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint { List entries) throws IOException { if (disabledAndDroppedTables.getIfPresent(tableName) != null) { - sink.getSkippedEditsCounter().incrementAndGet(); + if (LOG.isTraceEnabled()) { + LOG.trace("Skipping " + entries.size() + " entries because table " + tableName + + " is cached as a disabled or dropped table"); + for (Entry entry : entries) { + LOG.trace("Skipping : " + entry); + } + } + sink.getSkippedEditsCounter().addAndGet(entries.size()); return; } - // get the replicas of the primary region + // If the table is disabled or dropped, we should not replay the entries, and we can skip + // replaying them. However, we might not know whether the table is disabled until we + // invalidate the cache and check from meta RegionLocations locations = null; - try { - locations = getRegionLocations(connection, tableName, row, true, 0); + boolean useCache = true; + while (true) { + // get the replicas of the primary region + try { + locations = RegionReplicaReplayCallable + .getRegionLocations(connection, tableName, row, useCache, 0); - if (locations == null) { - throw new HBaseIOException("Cannot locate locations for " - + tableName + ", row:" + Bytes.toStringBinary(row)); + if (locations == null) { + throw new HBaseIOException("Cannot locate locations for " + + tableName + ", row:" + Bytes.toStringBinary(row)); + } + } catch (TableNotFoundException e) { + if (LOG.isTraceEnabled()) { + LOG.trace("Skipping " + entries.size() + " entries because table " + tableName + + " is dropped. Adding table to cache."); + for (Entry entry : entries) { + LOG.trace("Skipping : " + entry); + } + } + disabledAndDroppedTables.put(tableName, Boolean.TRUE); // put to cache. Value ignored + // skip this entry + sink.getSkippedEditsCounter().addAndGet(entries.size()); + return; } - } catch (TableNotFoundException e) { - disabledAndDroppedTables.put(tableName, Boolean.TRUE); // put to cache. Value ignored - // skip this entry - sink.getSkippedEditsCounter().addAndGet(entries.size()); - return; + + // check whether we should still replay this entry. If the regions are changed, or the + // entry is not coming from the primary region, filter it out. + HRegionLocation primaryLocation = locations.getDefaultRegionLocation(); + if (!Bytes.equals(primaryLocation.getRegionInfo().getEncodedNameAsBytes(), + encodedRegionName)) { + if (useCache) { + useCache = false; + continue; // this will retry location lookup + } + if (LOG.isTraceEnabled()) { + LOG.trace("Skipping " + entries.size() + " entries in table " + tableName + + " because located region " + primaryLocation.getRegionInfo().getEncodedName() + + " is different than the original region " + Bytes.toStringBinary(encodedRegionName) + + " from WALEdit"); + for (Entry entry : entries) { + LOG.trace("Skipping : " + entry); + } + } + sink.getSkippedEditsCounter().addAndGet(entries.size()); + return; + } + break; } if (locations.size() == 1) { @@ -366,17 +528,7 @@ public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint { } ArrayList> tasks - = new ArrayList>(2); - - // check whether we should still replay this entry. If the regions are changed, or the - // entry is not coming form the primary region, filter it out. - HRegionLocation primaryLocation = locations.getDefaultRegionLocation(); - if (!Bytes.equals(primaryLocation.getRegionInfo().getEncodedNameAsBytes(), - encodedRegionName)) { - sink.getSkippedEditsCounter().addAndGet(entries.size()); - return; - } - + = new ArrayList>(locations.size() - 1); // All passed entries should belong to one region because it is coming from the EntryBuffers // split per region. But the regions might split and merge (unlike log recovery case). @@ -413,6 +565,13 @@ public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint { // check whether the table is dropped or disabled which might cause // SocketTimeoutException, or RetriesExhaustedException or similar if we get IOE. if (cause instanceof TableNotFoundException || connection.isTableDisabled(tableName)) { + if (LOG.isTraceEnabled()) { + LOG.trace("Skipping " + entries.size() + " entries in table " + tableName + + " because received exception for dropped or disabled table", cause); + for (Entry entry : entries) { + LOG.trace("Skipping : " + entry); + } + } disabledAndDroppedTables.put(tableName, Boolean.TRUE); // put to cache for later. if (!tasksCancelled) { sink.getSkippedEditsCounter().addAndGet(entries.size()); @@ -452,50 +611,21 @@ public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint { */ static class RegionReplicaReplayCallable extends RegionAdminServiceCallable { - // replicaId of the region replica that we want to replicate to - private final int replicaId; private final List entries; private final byte[] initialEncodedRegionName; private final AtomicLong skippedEntries; - private final RpcControllerFactory rpcControllerFactory; - private boolean skip; public RegionReplicaReplayCallable(ClusterConnection connection, RpcControllerFactory rpcControllerFactory, TableName tableName, HRegionLocation location, HRegionInfo regionInfo, byte[] row,List entries, AtomicLong skippedEntries) { - super(connection, location, tableName, row); - this.replicaId = regionInfo.getReplicaId(); + super(connection, rpcControllerFactory, location, tableName, row, regionInfo.getReplicaId()); this.entries = entries; - this.rpcControllerFactory = rpcControllerFactory; this.skippedEntries = skippedEntries; this.initialEncodedRegionName = regionInfo.getEncodedNameAsBytes(); } - @Override - public HRegionLocation getLocation(boolean useCache) throws IOException { - RegionLocations rl = getRegionLocations(connection, tableName, row, useCache, replicaId); - if (rl == null) { - throw new HBaseIOException(getExceptionMessage()); - } - location = rl.getRegionLocation(replicaId); - if (location == null) { - throw new HBaseIOException(getExceptionMessage()); - } - - // check whether we should still replay this entry. If the regions are changed, or the - // entry is not coming form the primary region, filter it out because we do not need it. - // Regions can change because of (1) region split (2) region merge (3) table recreated - if (!Bytes.equals(location.getRegionInfo().getEncodedNameAsBytes(), - initialEncodedRegionName)) { - skip = true; - return null; - } - - return location; - } - @Override public ReplicateWALEntryResponse call(int timeout) throws IOException { return replayToServer(this.entries, timeout); @@ -503,55 +633,46 @@ public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint { private ReplicateWALEntryResponse replayToServer(List entries, int timeout) throws IOException { - if (entries.isEmpty() || skip) { - skippedEntries.incrementAndGet(); - return ReplicateWALEntryResponse.newBuilder().build(); + // check whether we should still replay this entry. If the regions are changed, or the + // entry is not coming form the primary region, filter it out because we do not need it. + // Regions can change because of (1) region split (2) region merge (3) table recreated + boolean skip = false; + + if (!Bytes.equals(location.getRegionInfo().getEncodedNameAsBytes(), + initialEncodedRegionName)) { + skip = true; + } + if (!entries.isEmpty() && !skip) { + Entry[] entriesArray = new Entry[entries.size()]; + entriesArray = entries.toArray(entriesArray); + + // set the region name for the target region replica + Pair p = + ReplicationProtbufUtil.buildReplicateWALEntryRequest( + entriesArray, location.getRegionInfo().getEncodedNameAsBytes()); + try { + PayloadCarryingRpcController controller = rpcControllerFactory.newController(p.getSecond()); + controller.setCallTimeout(timeout); + controller.setPriority(tableName); + return stub.replay(controller, p.getFirst()); + } catch (ServiceException se) { + throw ProtobufUtil.getRemoteException(se); + } } - Entry[] entriesArray = new Entry[entries.size()]; - entriesArray = entries.toArray(entriesArray); - - // set the region name for the target region replica - Pair p = - ReplicationProtbufUtil.buildReplicateWALEntryRequest( - entriesArray, location.getRegionInfo().getEncodedNameAsBytes()); - try { - PayloadCarryingRpcController controller = rpcControllerFactory.newController(p.getSecond()); - controller.setCallTimeout(timeout); - controller.setPriority(tableName); - return stub.replay(controller, p.getFirst()); - } catch (ServiceException se) { - throw ProtobufUtil.getRemoteException(se); + if (skip) { + if (LOG.isTraceEnabled()) { + LOG.trace("Skipping " + entries.size() + " entries in table " + tableName + + " because located region " + location.getRegionInfo().getEncodedName() + + " is different than the original region " + + Bytes.toStringBinary(initialEncodedRegionName) + " from WALEdit"); + for (Entry entry : entries) { + LOG.trace("Skipping : " + entry); + } + } + skippedEntries.addAndGet(entries.size()); } + return ReplicateWALEntryResponse.newBuilder().build(); } - - @Override - protected String getExceptionMessage() { - return super.getExceptionMessage() + " table=" + tableName - + " ,replica=" + replicaId + ", row=" + Bytes.toStringBinary(row); - } - } - - private static RegionLocations getRegionLocations( - ClusterConnection connection, TableName tableName, byte[] row, - boolean useCache, int replicaId) - throws RetriesExhaustedException, DoNotRetryIOException, InterruptedIOException { - RegionLocations rl; - try { - rl = connection.locateRegion(tableName, row, useCache, true, replicaId); - } catch (DoNotRetryIOException e) { - throw e; - } catch (RetriesExhaustedException e) { - throw e; - } catch (InterruptedIOException e) { - throw e; - } catch (IOException e) { - throw new RetriesExhaustedException("Can't get the location", e); - } - if (rl == null) { - throw new RetriesExhaustedException("Can't get the locations"); - } - - return rl; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java index b30698caf78..5b0f469a04a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java @@ -23,6 +23,7 @@ import static org.apache.hadoop.hbase.HConstants.REPLICATION_ENABLE_KEY; import static org.apache.hadoop.hbase.HConstants.REPLICATION_SCOPE_LOCAL; import java.io.IOException; +import java.util.ArrayList; import java.util.List; import java.util.NavigableMap; import java.util.TreeMap; @@ -65,7 +66,7 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder; * Gateway to Replication. Used by {@link org.apache.hadoop.hbase.regionserver.HRegionServer}. */ @InterfaceAudience.Private -public class Replication extends WALActionsListener.Base implements +public class Replication extends WALActionsListener.Base implements ReplicationSourceService, ReplicationSinkService { private static final Log LOG = LogFactory.getLog(Replication.class); @@ -81,6 +82,8 @@ public class Replication extends WALActionsListener.Base implements /** Statistics thread schedule pool */ private ScheduledExecutorService scheduleThreadPool; private int statsThreadPeriod; + // ReplicationLoad to access replication metrics + private ReplicationLoad replicationLoad; /** * Instantiate the replication management (if rep is enabled). @@ -137,11 +140,13 @@ public class Replication extends WALActionsListener.Base implements this.statsThreadPeriod = this.conf.getInt("replication.stats.thread.period.seconds", 5 * 60); LOG.debug("ReplicationStatisticsThread " + this.statsThreadPeriod); + this.replicationLoad = new ReplicationLoad(); } else { this.replicationManager = null; this.replicationQueues = null; this.replicationPeers = null; this.replicationTracker = null; + this.replicationLoad = null; } } @@ -309,4 +314,29 @@ public class Replication extends WALActionsListener.Base implements } } } + + @Override + public ReplicationLoad refreshAndGetReplicationLoad() { + if (this.replicationLoad == null) { + return null; + } + // always build for latest data + buildReplicationLoad(); + return this.replicationLoad; + } + + private void buildReplicationLoad() { + // get source + List sources = this.replicationManager.getSources(); + List sourceMetricsList = new ArrayList(); + + for (ReplicationSourceInterface source : sources) { + if (source instanceof ReplicationSource) { + sourceMetricsList.add(((ReplicationSource) source).getSourceMetrics()); + } + } + // get sink + MetricsSink sinkMetrics = this.replicationSink.getSinkMetrics(); + this.replicationLoad.buildReplicationLoad(sourceMetricsList, sinkMetrics); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationLoad.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationLoad.java new file mode 100644 index 00000000000..b3f3ecbcc0e --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationLoad.java @@ -0,0 +1,151 @@ +/** + * Copyright 2014 The Apache Software Foundation Licensed to the Apache Software Foundation (ASF) + * under one or more contributor license agreements. See the NOTICE file distributed with this work + * for additional information regarding copyright ownership. The ASF licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in + * writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific + * language governing permissions and limitations under the License. + */ +package org.apache.hadoop.hbase.replication.regionserver; + +import java.util.Date; +import java.util.List; +import java.util.ArrayList; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.util.Strings; + +/** + * This class is used for exporting some of the info from replication metrics + */ +@InterfaceAudience.Private +public class ReplicationLoad { + + // Empty load instance. + public static final ReplicationLoad EMPTY_REPLICATIONLOAD = new ReplicationLoad(); + + private List sourceMetricsList; + private MetricsSink sinkMetrics; + + private List replicationLoadSourceList; + private ClusterStatusProtos.ReplicationLoadSink replicationLoadSink; + + /** default constructor */ + public ReplicationLoad() { + super(); + } + + /** + * buildReplicationLoad + * @param srMetricsList + * @param skMetrics + */ + + public void buildReplicationLoad(final List srMetricsList, + final MetricsSink skMetrics) { + this.sourceMetricsList = srMetricsList; + this.sinkMetrics = skMetrics; + + // build the SinkLoad + ClusterStatusProtos.ReplicationLoadSink.Builder rLoadSinkBuild = + ClusterStatusProtos.ReplicationLoadSink.newBuilder(); + rLoadSinkBuild.setAgeOfLastAppliedOp(sinkMetrics.getAgeOfLastAppliedOp()); + rLoadSinkBuild.setTimeStampsOfLastAppliedOp(sinkMetrics.getTimeStampOfLastAppliedOp()); + this.replicationLoadSink = rLoadSinkBuild.build(); + + // build the SourceLoad List + this.replicationLoadSourceList = new ArrayList(); + for (MetricsSource sm : this.sourceMetricsList) { + long ageOfLastShippedOp = sm.getAgeOfLastShippedOp(); + int sizeOfLogQueue = sm.getSizeOfLogQueue(); + long timeStampOfLastShippedOp = sm.getTimeStampOfLastShippedOp(); + long replicationLag; + long timePassedAfterLastShippedOp = + EnvironmentEdgeManager.currentTime() - timeStampOfLastShippedOp; + if (sizeOfLogQueue != 0) { + // err on the large side + replicationLag = Math.max(ageOfLastShippedOp, timePassedAfterLastShippedOp); + } else if (timePassedAfterLastShippedOp < 2 * ageOfLastShippedOp) { + replicationLag = ageOfLastShippedOp; // last shipped happen recently + } else { + // last shipped may happen last night, + // so NO real lag although ageOfLastShippedOp is non-zero + replicationLag = 0; + } + + ClusterStatusProtos.ReplicationLoadSource.Builder rLoadSourceBuild = + ClusterStatusProtos.ReplicationLoadSource.newBuilder(); + rLoadSourceBuild.setPeerID(sm.getPeerID()); + rLoadSourceBuild.setAgeOfLastShippedOp(ageOfLastShippedOp); + rLoadSourceBuild.setSizeOfLogQueue(sizeOfLogQueue); + rLoadSourceBuild.setTimeStampOfLastShippedOp(timeStampOfLastShippedOp); + rLoadSourceBuild.setReplicationLag(replicationLag); + + this.replicationLoadSourceList.add(rLoadSourceBuild.build()); + } + + } + + /** + * sourceToString + * @return a string contains sourceReplicationLoad information + */ + public String sourceToString() { + if (this.sourceMetricsList == null) return null; + + StringBuilder sb = new StringBuilder(); + + for (ClusterStatusProtos.ReplicationLoadSource rls : this.replicationLoadSourceList) { + + sb = Strings.appendKeyValue(sb, "\n PeerID", rls.getPeerID()); + sb = Strings.appendKeyValue(sb, "AgeOfLastShippedOp", rls.getAgeOfLastShippedOp()); + sb = Strings.appendKeyValue(sb, "SizeOfLogQueue", rls.getSizeOfLogQueue()); + sb = + Strings.appendKeyValue(sb, "TimeStampsOfLastShippedOp", + (new Date(rls.getTimeStampOfLastShippedOp()).toString())); + sb = Strings.appendKeyValue(sb, "Replication Lag", rls.getReplicationLag()); + } + + return sb.toString(); + } + + /** + * sinkToString + * @return a string contains sinkReplicationLoad information + */ + public String sinkToString() { + if (this.replicationLoadSink == null) return null; + + StringBuilder sb = new StringBuilder(); + sb = + Strings.appendKeyValue(sb, "AgeOfLastAppliedOp", + this.replicationLoadSink.getAgeOfLastAppliedOp()); + sb = + Strings.appendKeyValue(sb, "TimeStampsOfLastAppliedOp", + (new Date(this.replicationLoadSink.getTimeStampsOfLastAppliedOp()).toString())); + + return sb.toString(); + } + + public ClusterStatusProtos.ReplicationLoadSink getReplicationLoadSink() { + return this.replicationLoadSink; + } + + public List getReplicationLoadSourceList() { + return this.replicationLoadSourceList; + } + + /** + * @see java.lang.Object#toString() + */ + @Override + public String toString() { + return this.sourceToString() + System.getProperty("line.separator") + this.sinkToString(); + } + +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java index 9a6013188da..32764180aa4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java @@ -254,4 +254,12 @@ public class ReplicationSink { "age in ms of last applied edit: " + this.metrics.refreshAgeOfLastAppliedOp() + ", total replicated edits: " + this.totalReplicatedEdits; } + + /** + * Get replication Sink Metrics + * @return MetricsSink + */ + public MetricsSink getSinkMetrics() { + return this.metrics; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java index ee43956f482..794a3e155c6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java @@ -691,8 +691,10 @@ public class ReplicationSource extends Thread } replicateContext.setEntries(entries).setSize(currentSize); + long startTimeNs = System.nanoTime(); // send the edits to the endpoint. Will block until the edits are shipped and acknowledged boolean replicated = replicationEndpoint.replicate(replicateContext); + long endTimeNs = System.nanoTime(); if (!replicated) { continue; @@ -713,7 +715,8 @@ public class ReplicationSource extends Thread this.metrics.setAgeOfLastShippedOp(entries.get(entries.size()-1).getKey().getWriteTime()); if (LOG.isTraceEnabled()) { LOG.trace("Replicated " + this.totalReplicatedEdits + " entries in total, or " - + this.totalReplicatedOperations + " operations"); + + this.totalReplicatedOperations + " operations in " + + ((endTimeNs - startTimeNs)/1000000) + " ms"); } break; } catch (Exception ex) { @@ -869,4 +872,12 @@ public class ReplicationSource extends Thread ", currently replicating from: " + this.currentPath + " at position: " + position; } + + /** + * Get Replication Source Metrics + * @return sourceMetrics + */ + public MetricsSource getSourceMetrics() { + return this.metrics; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java index 4908ebc7c9a..4d9725702f9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java @@ -43,6 +43,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.TableDescriptors; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.RegionServerCoprocessorHost; @@ -200,7 +201,7 @@ public class ReplicationSourceManager implements ReplicationListener { } } } - + private void cleanOldLogs(SortedSet wals, String key, String id) { SortedSet walSet = wals.headSet(key); LOG.debug("Removing " + walSet.size() + " logs in the list: " + walSet); @@ -302,7 +303,7 @@ public class ReplicationSourceManager implements ReplicationListener { protected Map> getWALs() { return Collections.unmodifiableMap(walsById); } - + /** * Get a copy of the wals of the recovered sources on this rs * @return a sorted set of wal names @@ -375,8 +376,10 @@ public class ReplicationSourceManager implements ReplicationListener { final ReplicationPeerConfig peerConfig, final ReplicationPeer replicationPeer) throws IOException { RegionServerCoprocessorHost rsServerHost = null; + TableDescriptors tableDescriptors = null; if (server instanceof HRegionServer) { rsServerHost = ((HRegionServer) server).getRegionServerCoprocessorHost(); + tableDescriptors = ((HRegionServer) server).getTableDescriptors(); } ReplicationSourceInterface src; try { @@ -420,7 +423,7 @@ public class ReplicationSourceManager implements ReplicationListener { // init replication endpoint replicationEndpoint.init(new ReplicationEndpoint.Context(replicationPeer.getConfiguration(), - fs, peerConfig, peerId, clusterId, replicationPeer, metrics)); + fs, peerConfig, peerId, clusterId, replicationPeer, metrics, tableDescriptors)); return src; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java index a2648e9f680..fafc5a5b0d6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java @@ -61,8 +61,8 @@ import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos; import org.apache.hadoop.hbase.regionserver.BloomType; -import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.InternalScanner; +import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; @@ -327,7 +327,7 @@ public class AccessControlLists { * Returns {@code true} if the given region is part of the {@code _acl_} * metadata table. */ - static boolean isAclRegion(HRegion region) { + static boolean isAclRegion(Region region) { return ACL_TABLE_NAME.equals(region.getTableDesc().getTableName()); } @@ -346,8 +346,7 @@ public class AccessControlLists { * @return a map of the permissions for this table. * @throws IOException */ - static Map> loadAll( - HRegion aclRegion) + static Map> loadAll(Region aclRegion) throws IOException { if (!isAclRegion(aclRegion)) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java index ca1fba893df..03b5e39508e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java @@ -75,7 +75,7 @@ import org.apache.hadoop.hbase.filter.CompareFilter; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.filter.FilterList; import org.apache.hadoop.hbase.io.hfile.HFile; -import org.apache.hadoop.hbase.ipc.RequestContext; +import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.ResponseConverter; @@ -86,11 +86,12 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptio import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas; import org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.CleanupBulkLoadRequest; import org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.PrepareBulkLoadRequest; -import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.InternalScanner; import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress; +import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.RegionScanner; import org.apache.hadoop.hbase.regionserver.ScanType; +import org.apache.hadoop.hbase.regionserver.ScannerContext; import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; import org.apache.hadoop.hbase.replication.ReplicationEndpoint; @@ -164,11 +165,11 @@ public class AccessController extends BaseMasterAndRegionObserver TableAuthManager authManager = null; - // flags if we are running on a region of the _acl_ table + /** flags if we are running on a region of the _acl_ table */ boolean aclRegion = false; - // defined only for Endpoint implementation, so it can have way to - // access region services. + /** defined only for Endpoint implementation, so it can have way to + access region services */ private RegionCoprocessorEnvironment regionEnv; /** Mapping of scanner instances to the user who created them */ @@ -177,28 +178,33 @@ public class AccessController extends BaseMasterAndRegionObserver private Map> tableAcls; - // Provider for mapping principal names to Users + /** Provider for mapping principal names to Users */ private UserProvider userProvider; - // The list of users with superuser authority + /** The list of users with superuser authority */ private List superusers; - // if we are able to support cell ACLs + /** if we are active, usually true, only not true if "hbase.security.authorization" + has been set to false in site configuration */ + boolean authorizationEnabled; + + /** if we are able to support cell ACLs */ boolean cellFeaturesEnabled; - // if we should check EXEC permissions + /** if we should check EXEC permissions */ boolean shouldCheckExecPermission; - // if we should terminate access checks early as soon as table or CF grants - // allow access; pre-0.98 compatible behavior + /** if we should terminate access checks early as soon as table or CF grants + allow access; pre-0.98 compatible behavior */ boolean compatibleEarlyTermination; + /** if we have been successfully initialized */ private volatile boolean initialized = false; - // This boolean having relevance only in the Master. + /** if the ACL table is available, only relevant in the master */ private volatile boolean aclTabAvailable = false; - public HRegion getRegion() { + public Region getRegion() { return regionEnv != null ? regionEnv.getRegion() : null; } @@ -207,7 +213,7 @@ public class AccessController extends BaseMasterAndRegionObserver } void initialize(RegionCoprocessorEnvironment e) throws IOException { - final HRegion region = e.getRegion(); + final Region region = e.getRegion(); Configuration conf = e.getConfiguration(); Map> tables = AccessControlLists.loadAll(region); @@ -371,11 +377,7 @@ public class AccessController extends BaseMasterAndRegionObserver private void logResult(AuthResult result) { if (AUDITLOG.isTraceEnabled()) { - RequestContext ctx = RequestContext.get(); - InetAddress remoteAddr = null; - if (ctx != null) { - remoteAddr = ctx.getRemoteAddress(); - } + InetAddress remoteAddr = RpcServer.getRemoteAddress(); AUDITLOG.trace("Access " + (result.isAllowed() ? "allowed" : "denied") + " for user " + (result.getUser() != null ? result.getUser().getShortName() : "UNKNOWN") + "; reason: " + result.getReason() + @@ -391,8 +393,8 @@ public class AccessController extends BaseMasterAndRegionObserver * otherwise the currently logged in user is used. */ private User getActiveUser() throws IOException { - User user = RequestContext.getRequestUser(); - if (!RequestContext.isInRequestContext()) { + User user = RpcServer.getRequestUser(); + if (user == null) { // for non-rpc handling, fallback to system user user = userProvider.getCurrent(); } @@ -408,8 +410,8 @@ public class AccessController extends BaseMasterAndRegionObserver * @throws IOException if obtaining the current user fails * @throws AccessDeniedException if user has no authorization */ - private void requirePermission(String request, TableName tableName, byte[] family, byte[] qualifier, - Action... permissions) throws IOException { + private void requirePermission(String request, TableName tableName, byte[] family, + byte[] qualifier, Action... permissions) throws IOException { User user = getActiveUser(); AuthResult result = null; @@ -425,7 +427,40 @@ public class AccessController extends BaseMasterAndRegionObserver } } logResult(result); - if (!result.isAllowed()) { + if (authorizationEnabled && !result.isAllowed()) { + throw new AccessDeniedException("Insufficient permissions " + result.toContextString()); + } + } + + /** + * Authorizes that the current user has any of the given permissions for the + * given table, column family and column qualifier. + * @param tableName Table requested + * @param family Column family param + * @param qualifier Column qualifier param + * @throws IOException if obtaining the current user fails + * @throws AccessDeniedException if user has no authorization + */ + private void requireTablePermission(String request, TableName tableName, byte[] family, + byte[] qualifier, Action... permissions) throws IOException { + User user = getActiveUser(); + AuthResult result = null; + + for (Action permission : permissions) { + if (authManager.authorize(user, tableName, null, null, permission)) { + result = AuthResult.allow(request, "Table permission granted", user, + permission, tableName, null, null); + result.getParams().setFamily(family).setQualifier(qualifier); + break; + } else { + // rest of the world + result = AuthResult.deny(request, "Insufficient permissions", user, + permission, tableName, family, qualifier); + result.getParams().setFamily(family).setQualifier(qualifier); + } + } + logResult(result); + if (authorizationEnabled && !result.isAllowed()) { throw new AccessDeniedException("Insufficient permissions " + result.toContextString()); } } @@ -455,7 +490,7 @@ public class AccessController extends BaseMasterAndRegionObserver } } logResult(result); - if (!result.isAllowed()) { + if (authorizationEnabled && !result.isAllowed()) { throw new AccessDeniedException("Insufficient permissions " + result.toContextString()); } } @@ -470,31 +505,6 @@ public class AccessController extends BaseMasterAndRegionObserver requireGlobalPermission(request, perm, null, null); } - /** - * Authorizes that the current user has permission to perform the given - * action on the set of table column families. - * @param perm Action that is required - * @param env The current coprocessor environment - * @param families The map of column families-qualifiers. - * @throws AccessDeniedException if the authorization check failed - */ - private void requirePermission(String request, Action perm, - RegionCoprocessorEnvironment env, - Map> families) - throws IOException { - User user = getActiveUser(); - AuthResult result = permissionGranted(request, user, perm, env, families); - logResult(result); - - if (!result.isAllowed()) { - throw new AccessDeniedException("Insufficient permissions (table=" + - env.getRegion().getTableDesc().getTableName()+ - ((families != null && families.size() > 0) ? ", family: " + - result.toFamilyString() : "") + ", action=" + - perm.toString() + ")"); - } - } - /** * Checks that the user has the given global permission. The generated * audit log message will contain context information for the operation @@ -506,13 +516,20 @@ public class AccessController extends BaseMasterAndRegionObserver private void requireGlobalPermission(String request, Action perm, TableName tableName, Map> familyMap) throws IOException { User user = getActiveUser(); + AuthResult result = null; if (authManager.authorize(user, perm)) { - logResult(AuthResult.allow(request, "Global check allowed", user, perm, tableName, familyMap)); + result = AuthResult.allow(request, "Global check allowed", user, perm, tableName, familyMap); + result.getParams().setTableName(tableName).setFamilies(familyMap); + logResult(result); } else { - logResult(AuthResult.deny(request, "Global check failed", user, perm, tableName, familyMap)); - throw new AccessDeniedException("Insufficient permissions for user '" + + result = AuthResult.deny(request, "Global check failed", user, perm, tableName, familyMap); + result.getParams().setTableName(tableName).setFamilies(familyMap); + logResult(result); + if (authorizationEnabled) { + throw new AccessDeniedException("Insufficient permissions for user '" + (user != null ? user.getShortName() : "null") +"' (global, action=" + perm.toString() + ")"); + } } } @@ -526,13 +543,20 @@ public class AccessController extends BaseMasterAndRegionObserver private void requireGlobalPermission(String request, Action perm, String namespace) throws IOException { User user = getActiveUser(); + AuthResult authResult = null; if (authManager.authorize(user, perm)) { - logResult(AuthResult.allow(request, "Global check allowed", user, perm, namespace)); + authResult = AuthResult.allow(request, "Global check allowed", user, perm, null); + authResult.getParams().setNamespace(namespace); + logResult(authResult); } else { - logResult(AuthResult.deny(request, "Global check failed", user, perm, namespace)); - throw new AccessDeniedException("Insufficient permissions for user '" + + authResult = AuthResult.deny(request, "Global check failed", user, perm, null); + authResult.getParams().setNamespace(namespace); + logResult(authResult); + if (authorizationEnabled) { + throw new AccessDeniedException("Insufficient permissions for user '" + (user != null ? user.getShortName() : "null") +"' (global, action=" + perm.toString() + ")"); + } } } @@ -558,7 +582,38 @@ public class AccessController extends BaseMasterAndRegionObserver } } logResult(result); - if (!result.isAllowed()) { + if (authorizationEnabled && !result.isAllowed()) { + throw new AccessDeniedException("Insufficient permissions " + + result.toContextString()); + } + } + + /** + * Checks that the user has the given global or namespace permission. + * @param namespace + * @param permissions Actions being requested + */ + public void requireNamespacePermission(String request, String namespace, TableName tableName, + Map> familyMap, Action... permissions) + throws IOException { + User user = getActiveUser(); + AuthResult result = null; + + for (Action permission : permissions) { + if (authManager.authorize(user, namespace, permission)) { + result = AuthResult.allow(request, "Namespace permission granted", + user, permission, namespace); + result.getParams().setTableName(tableName).setFamilies(familyMap); + break; + } else { + // rest of the world + result = AuthResult.deny(request, "Insufficient permissions", user, + permission, namespace); + result.getParams().setTableName(tableName).setFamilies(familyMap); + } + } + logResult(result); + if (authorizationEnabled && !result.isAllowed()) { throw new AccessDeniedException("Insufficient permissions " + result.toContextString()); } @@ -693,6 +748,8 @@ public class AccessController extends BaseMasterAndRegionObserver } } } + } else if (entry.getValue() == null) { + get.addFamily(col); } else { throw new RuntimeException("Unhandled collection type " + entry.getValue().getClass().getName()); @@ -736,10 +793,12 @@ public class AccessController extends BaseMasterAndRegionObserver boolean foundColumn = false; try { boolean more = false; + ScannerContext scannerContext = ScannerContext.newBuilder().setBatchLimit(1).build(); + do { cells.clear(); // scan with limit as 1 to hold down memory use on wide rows - more = scanner.next(cells, 1); + more = scanner.next(cells, scannerContext); for (Cell cell: cells) { if (LOG.isTraceEnabled()) { LOG.trace("Found cell " + cell); @@ -826,8 +885,14 @@ public class AccessController extends BaseMasterAndRegionObserver // Checks whether incoming cells contain any tag with type as ACL_TAG_TYPE. This tag // type is reserved and should not be explicitly set by user. private void checkForReservedTagPresence(User user, Mutation m) throws IOException { + // No need to check if we're not going to throw + if (!authorizationEnabled) { + m.setAttribute(TAG_CHECK_PASSED, TRUE); + return; + } // Superusers are allowed to store cells unconditionally. if (superusers.contains(user.getShortName())) { + m.setAttribute(TAG_CHECK_PASSED, TRUE); return; } // We already checked (prePut vs preBatchMutation) @@ -855,6 +920,11 @@ public class AccessController extends BaseMasterAndRegionObserver CompoundConfiguration conf = new CompoundConfiguration(); conf.add(env.getConfiguration()); + authorizationEnabled = conf.getBoolean(User.HBASE_SECURITY_AUTHORIZATION_CONF_KEY, true); + if (!authorizationEnabled) { + LOG.warn("The AccessController has been loaded with authorization checks disabled."); + } + shouldCheckExecPermission = conf.getBoolean(AccessControlConstants.EXEC_PERMISSION_CHECKS_KEY, AccessControlConstants.DEFAULT_EXEC_PERMISSION_CHECKS); @@ -918,7 +988,8 @@ public class AccessController extends BaseMasterAndRegionObserver for (byte[] family: families) { familyMap.put(family, null); } - requireNamespacePermission("createTable", desc.getTableName().getNamespaceAsString(), Action.CREATE); + requireNamespacePermission("createTable", desc.getTableName().getNamespaceAsString(), + desc.getTableName(), familyMap, Action.CREATE); } @Override @@ -990,6 +1061,7 @@ public class AccessController extends BaseMasterAndRegionObserver public void preTruncateTable(ObserverContext c, final TableName tableName) throws IOException { requirePermission("truncateTable", tableName, null, null, Action.ADMIN, Action.CREATE); + final Configuration conf = c.getEnvironment().getConfiguration(); User.runAsLoginUser(new PrivilegedExceptionAction() { @Override @@ -1049,7 +1121,8 @@ public class AccessController extends BaseMasterAndRegionObserver @Override public void preAddColumn(ObserverContext c, TableName tableName, HColumnDescriptor column) throws IOException { - requirePermission("addColumn", tableName, null, null, Action.ADMIN, Action.CREATE); + requireTablePermission("addColumn", tableName, column.getName(), null, Action.ADMIN, + Action.CREATE); } @Override @@ -1088,8 +1161,12 @@ public class AccessController extends BaseMasterAndRegionObserver public void preDisableTable(ObserverContext c, TableName tableName) throws IOException { if (Bytes.equals(tableName.getName(), AccessControlLists.ACL_GLOBAL_NAME)) { + // We have to unconditionally disallow disable of the ACL table when we are installed, + // even if not enforcing authorizations. We are still allowing grants and revocations, + // checking permissions and logging audit messages, etc. If the ACL table is not + // available we will fail random actions all over the place. throw new AccessDeniedException("Not allowed to disable " - + AccessControlLists.ACL_TABLE_NAME + " table."); + + AccessControlLists.ACL_TABLE_NAME + " table with AccessController installed"); } requirePermission("disableTable", tableName, null, null, Action.ADMIN, Action.CREATE); } @@ -1168,6 +1245,7 @@ public class AccessController extends BaseMasterAndRegionObserver final SnapshotDescription snapshot) throws IOException { if (SnapshotDescriptionUtils.isSnapshotOwner(snapshot, getActiveUser())) { // list it, if user is the owner of snapshot + // TODO: We are not logging this for audit } else { requirePermission("listSnapshot", Action.ADMIN); } @@ -1197,6 +1275,7 @@ public class AccessController extends BaseMasterAndRegionObserver final SnapshotDescription snapshot) throws IOException { if (SnapshotDescriptionUtils.isSnapshotOwner(snapshot, getActiveUser())) { // Snapshot owner is allowed to delete the snapshot + // TODO: We are not logging this for audit } else { requirePermission("deleteSnapshot", Action.ADMIN); } @@ -1225,7 +1304,8 @@ public class AccessController extends BaseMasterAndRegionObserver return null; } }); - LOG.info(namespace + "entry deleted in " + AccessControlLists.ACL_TABLE_NAME + " table."); + this.authManager.getZKPermissionWatcher().deleteNamespaceACLNode(namespace); + LOG.info(namespace + " entry deleted in " + AccessControlLists.ACL_TABLE_NAME + " table."); } @Override @@ -1270,7 +1350,7 @@ public class AccessController extends BaseMasterAndRegionObserver public void preOpen(ObserverContext e) throws IOException { RegionCoprocessorEnvironment env = e.getEnvironment(); - final HRegion region = env.getRegion(); + final Region region = env.getRegion(); if (region == null) { LOG.error("NULL region from RegionCoprocessorEnvironment in preOpen()"); } else { @@ -1286,7 +1366,7 @@ public class AccessController extends BaseMasterAndRegionObserver @Override public void postOpen(ObserverContext c) { RegionCoprocessorEnvironment env = c.getEnvironment(); - final HRegion region = env.getRegion(); + final Region region = env.getRegion(); if (region == null) { LOG.error("NULL region from RegionCoprocessorEnvironment in postOpen()"); return; @@ -1363,8 +1443,9 @@ public class AccessController extends BaseMasterAndRegionObserver authResult.setReason("Covering cell set"); } logResult(authResult); - if (!authResult.isAllowed()) { - throw new AccessDeniedException("Insufficient permissions " + authResult.toContextString()); + if (authorizationEnabled && !authResult.isAllowed()) { + throw new AccessDeniedException("Insufficient permissions " + + authResult.toContextString()); } } @@ -1390,7 +1471,7 @@ public class AccessController extends BaseMasterAndRegionObserver throw new RuntimeException("Unhandled operation " + opType); } AuthResult authResult = permissionGranted(opType, user, env, families, Action.READ); - HRegion region = getRegion(env); + Region region = getRegion(env); TableName table = getTableName(region); Map cfVsMaxVersions = Maps.newHashMap(); for (HColumnDescriptor hcd : region.getTableDesc().getFamilies()) { @@ -1407,26 +1488,29 @@ public class AccessController extends BaseMasterAndRegionObserver // grants three times (permissionGranted above, here, and in the // filter) but that's the price of backwards compatibility. if (hasFamilyQualifierPermission(user, Action.READ, env, families)) { - Filter ourFilter = new AccessControlFilter(authManager, user, table, - AccessControlFilter.Strategy.CHECK_TABLE_AND_CF_ONLY, - cfVsMaxVersions); - // wrap any existing filter - if (filter != null) { - ourFilter = new FilterList(FilterList.Operator.MUST_PASS_ALL, - Lists.newArrayList(ourFilter, filter)); - } authResult.setAllowed(true); authResult.setReason("Access allowed with filter"); - switch (opType) { - case GET: - case EXISTS: - ((Get)query).setFilter(ourFilter); - break; - case SCAN: - ((Scan)query).setFilter(ourFilter); - break; - default: - throw new RuntimeException("Unhandled operation " + opType); + // Only wrap the filter if we are enforcing authorizations + if (authorizationEnabled) { + Filter ourFilter = new AccessControlFilter(authManager, user, table, + AccessControlFilter.Strategy.CHECK_TABLE_AND_CF_ONLY, + cfVsMaxVersions); + // wrap any existing filter + if (filter != null) { + ourFilter = new FilterList(FilterList.Operator.MUST_PASS_ALL, + Lists.newArrayList(ourFilter, filter)); + } + switch (opType) { + case GET: + case EXISTS: + ((Get)query).setFilter(ourFilter); + break; + case SCAN: + ((Scan)query).setFilter(ourFilter); + break; + default: + throw new RuntimeException("Unhandled operation " + opType); + } } } } else { @@ -1434,33 +1518,37 @@ public class AccessController extends BaseMasterAndRegionObserver // than whole table or CF. Simply inject a filter and return what is // allowed. We will not throw an AccessDeniedException. This is a // behavioral change since 0.96. - Filter ourFilter = new AccessControlFilter(authManager, user, table, - AccessControlFilter.Strategy.CHECK_CELL_DEFAULT, cfVsMaxVersions); - // wrap any existing filter - if (filter != null) { - ourFilter = new FilterList(FilterList.Operator.MUST_PASS_ALL, - Lists.newArrayList(ourFilter, filter)); - } authResult.setAllowed(true); authResult.setReason("Access allowed with filter"); - switch (opType) { - case GET: - case EXISTS: - ((Get)query).setFilter(ourFilter); - break; - case SCAN: - ((Scan)query).setFilter(ourFilter); - break; - default: - throw new RuntimeException("Unhandled operation " + opType); + // Only wrap the filter if we are enforcing authorizations + if (authorizationEnabled) { + Filter ourFilter = new AccessControlFilter(authManager, user, table, + AccessControlFilter.Strategy.CHECK_CELL_DEFAULT, cfVsMaxVersions); + // wrap any existing filter + if (filter != null) { + ourFilter = new FilterList(FilterList.Operator.MUST_PASS_ALL, + Lists.newArrayList(ourFilter, filter)); + } + switch (opType) { + case GET: + case EXISTS: + ((Get)query).setFilter(ourFilter); + break; + case SCAN: + ((Scan)query).setFilter(ourFilter); + break; + default: + throw new RuntimeException("Unhandled operation " + opType); + } } } } logResult(authResult); - if (!authResult.isAllowed()) { - throw new AccessDeniedException("Insufficient permissions (table=" + table + - ", action=READ)"); + if (authorizationEnabled && !authResult.isAllowed()) { + throw new AccessDeniedException("Insufficient permissions for user '" + + (user != null ? user.getShortName() : "null") + + "' (table=" + table + ", action=READ)"); } } @@ -1481,14 +1569,15 @@ public class AccessController extends BaseMasterAndRegionObserver public void prePut(final ObserverContext c, final Put put, final WALEdit edit, final Durability durability) throws IOException { + User user = getActiveUser(); + checkForReservedTagPresence(user, put); + // Require WRITE permission to the table, CF, or top visible value, if any. // NOTE: We don't need to check the permissions for any earlier Puts // because we treat the ACLs in each Put as timestamped like any other // HBase value. A new ACL in a new Put applies to that Put. It doesn't // change the ACL of any previous Put. This allows simple evolution of // security policy over time without requiring expensive updates. - User user = getActiveUser(); - checkForReservedTagPresence(user, put); RegionCoprocessorEnvironment env = c.getEnvironment(); Map> families = put.getFamilyCellMap(); AuthResult authResult = permissionGranted(OpType.PUT, user, env, families, Action.WRITE); @@ -1496,10 +1585,11 @@ public class AccessController extends BaseMasterAndRegionObserver if (!authResult.isAllowed()) { if (cellFeaturesEnabled && !compatibleEarlyTermination) { put.setAttribute(CHECK_COVERING_PERM, TRUE); - } else { + } else if (authorizationEnabled) { throw new AccessDeniedException("Insufficient permissions " + authResult.toContextString()); } } + // Add cell ACLs from the operation to the cells themselves byte[] bytes = put.getAttribute(AccessControlConstants.OP_ATTRIBUTE_ACL); if (bytes != null) { @@ -1540,8 +1630,9 @@ public class AccessController extends BaseMasterAndRegionObserver if (!authResult.isAllowed()) { if (cellFeaturesEnabled && !compatibleEarlyTermination) { delete.setAttribute(CHECK_COVERING_PERM, TRUE); - } else { - throw new AccessDeniedException("Insufficient permissions " + authResult.toContextString()); + } else if (authorizationEnabled) { + throw new AccessDeniedException("Insufficient permissions " + + authResult.toContextString()); } } } @@ -1564,18 +1655,18 @@ public class AccessController extends BaseMasterAndRegionObserver opType = OpType.DELETE; } AuthResult authResult = null; - if (checkCoveringPermission(opType, c.getEnvironment(), m.getRow(), m.getFamilyCellMap(), - m.getTimeStamp(), Action.WRITE)) { - authResult = AuthResult.allow(opType.toString(), "Covering cell set", getActiveUser(), - Action.WRITE, table, m.getFamilyCellMap()); + if (checkCoveringPermission(opType, c.getEnvironment(), m.getRow(), + m.getFamilyCellMap(), m.getTimeStamp(), Action.WRITE)) { + authResult = AuthResult.allow(opType.toString(), "Covering cell set", + getActiveUser(), Action.WRITE, table, m.getFamilyCellMap()); } else { - authResult = AuthResult.deny(opType.toString(), "Covering cell set", getActiveUser(), - Action.WRITE, table, m.getFamilyCellMap()); + authResult = AuthResult.deny(opType.toString(), "Covering cell set", + getActiveUser(), Action.WRITE, table, m.getFamilyCellMap()); } logResult(authResult); - if (!authResult.isAllowed()) { + if (authorizationEnabled && !authResult.isAllowed()) { throw new AccessDeniedException("Insufficient permissions " - + authResult.toContextString()); + + authResult.toContextString()); } } } @@ -1597,9 +1688,10 @@ public class AccessController extends BaseMasterAndRegionObserver final CompareFilter.CompareOp compareOp, final ByteArrayComparable comparator, final Put put, final boolean result) throws IOException { - // Require READ and WRITE permissions on the table, CF, and KV to update User user = getActiveUser(); checkForReservedTagPresence(user, put); + + // Require READ and WRITE permissions on the table, CF, and KV to update RegionCoprocessorEnvironment env = c.getEnvironment(); Map> families = makeFamilyMap(family, qualifier); AuthResult authResult = permissionGranted(OpType.CHECK_AND_PUT, user, env, families, @@ -1608,10 +1700,12 @@ public class AccessController extends BaseMasterAndRegionObserver if (!authResult.isAllowed()) { if (cellFeaturesEnabled && !compatibleEarlyTermination) { put.setAttribute(CHECK_COVERING_PERM, TRUE); - } else { - throw new AccessDeniedException("Insufficient permissions " + authResult.toContextString()); + } else if (authorizationEnabled) { + throw new AccessDeniedException("Insufficient permissions " + + authResult.toContextString()); } } + byte[] bytes = put.getAttribute(AccessControlConstants.OP_ATTRIBUTE_ACL); if (bytes != null) { if (cellFeaturesEnabled) { @@ -1643,7 +1737,7 @@ public class AccessController extends BaseMasterAndRegionObserver getActiveUser(), Action.READ, table, families); } logResult(authResult); - if (!authResult.isAllowed()) { + if (authorizationEnabled && !authResult.isAllowed()) { throw new AccessDeniedException("Insufficient permissions " + authResult.toContextString()); } } @@ -1672,8 +1766,9 @@ public class AccessController extends BaseMasterAndRegionObserver if (!authResult.isAllowed()) { if (cellFeaturesEnabled && !compatibleEarlyTermination) { delete.setAttribute(CHECK_COVERING_PERM, TRUE); - } else { - throw new AccessDeniedException("Insufficient permissions " + authResult.toContextString()); + } else if (authorizationEnabled) { + throw new AccessDeniedException("Insufficient permissions " + + authResult.toContextString()); } } return result; @@ -1700,7 +1795,7 @@ public class AccessController extends BaseMasterAndRegionObserver getActiveUser(), Action.READ, table, families); } logResult(authResult); - if (!authResult.isAllowed()) { + if (authorizationEnabled && !authResult.isAllowed()) { throw new AccessDeniedException("Insufficient permissions " + authResult.toContextString()); } } @@ -1725,7 +1820,7 @@ public class AccessController extends BaseMasterAndRegionObserver authResult.setReason("Covering cell set"); } logResult(authResult); - if (!authResult.isAllowed()) { + if (authorizationEnabled && !authResult.isAllowed()) { throw new AccessDeniedException("Insufficient permissions " + authResult.toContextString()); } return -1; @@ -1734,9 +1829,10 @@ public class AccessController extends BaseMasterAndRegionObserver @Override public Result preAppend(ObserverContext c, Append append) throws IOException { - // Require WRITE permission to the table, CF, and the KV to be appended User user = getActiveUser(); checkForReservedTagPresence(user, append); + + // Require WRITE permission to the table, CF, and the KV to be appended RegionCoprocessorEnvironment env = c.getEnvironment(); Map> families = append.getFamilyCellMap(); AuthResult authResult = permissionGranted(OpType.APPEND, user, env, families, Action.WRITE); @@ -1744,10 +1840,12 @@ public class AccessController extends BaseMasterAndRegionObserver if (!authResult.isAllowed()) { if (cellFeaturesEnabled && !compatibleEarlyTermination) { append.setAttribute(CHECK_COVERING_PERM, TRUE); - } else { - throw new AccessDeniedException("Insufficient permissions " + authResult.toContextString()); + } else if (authorizationEnabled) { + throw new AccessDeniedException("Insufficient permissions " + + authResult.toContextString()); } } + byte[] bytes = append.getAttribute(AccessControlConstants.OP_ATTRIBUTE_ACL); if (bytes != null) { if (cellFeaturesEnabled) { @@ -1756,6 +1854,7 @@ public class AccessController extends BaseMasterAndRegionObserver throw new DoNotRetryIOException("Cell ACLs cannot be persisted"); } } + return null; } @@ -1776,8 +1875,9 @@ public class AccessController extends BaseMasterAndRegionObserver getActiveUser(), Action.WRITE, table, append.getFamilyCellMap()); } logResult(authResult); - if (!authResult.isAllowed()) { - throw new AccessDeniedException("Insufficient permissions " + authResult.toContextString()); + if (authorizationEnabled && !authResult.isAllowed()) { + throw new AccessDeniedException("Insufficient permissions " + + authResult.toContextString()); } } return null; @@ -1787,10 +1887,11 @@ public class AccessController extends BaseMasterAndRegionObserver public Result preIncrement(final ObserverContext c, final Increment increment) throws IOException { - // Require WRITE permission to the table, CF, and the KV to be replaced by - // the incremented value User user = getActiveUser(); checkForReservedTagPresence(user, increment); + + // Require WRITE permission to the table, CF, and the KV to be replaced by + // the incremented value RegionCoprocessorEnvironment env = c.getEnvironment(); Map> families = increment.getFamilyCellMap(); AuthResult authResult = permissionGranted(OpType.INCREMENT, user, env, families, @@ -1799,10 +1900,12 @@ public class AccessController extends BaseMasterAndRegionObserver if (!authResult.isAllowed()) { if (cellFeaturesEnabled && !compatibleEarlyTermination) { increment.setAttribute(CHECK_COVERING_PERM, TRUE); - } else { - throw new AccessDeniedException("Insufficient permissions " + authResult.toContextString()); + } else if (authorizationEnabled) { + throw new AccessDeniedException("Insufficient permissions " + + authResult.toContextString()); } } + byte[] bytes = increment.getAttribute(AccessControlConstants.OP_ATTRIBUTE_ACL); if (bytes != null) { if (cellFeaturesEnabled) { @@ -1811,6 +1914,7 @@ public class AccessController extends BaseMasterAndRegionObserver throw new DoNotRetryIOException("Cell ACLs cannot be persisted"); } } + return null; } @@ -1831,8 +1935,9 @@ public class AccessController extends BaseMasterAndRegionObserver getActiveUser(), Action.WRITE, table, increment.getFamilyCellMap()); } logResult(authResult); - if (!authResult.isAllowed()) { - throw new AccessDeniedException("Insufficient permissions " + authResult.toContextString()); + if (authorizationEnabled && !authResult.isAllowed()) { + throw new AccessDeniedException("Insufficient permissions " + + authResult.toContextString()); } } return null; @@ -1914,7 +2019,8 @@ public class AccessController extends BaseMasterAndRegionObserver public RegionScanner postScannerOpen(final ObserverContext c, final Scan scan, final RegionScanner s) throws IOException { User user = getActiveUser(); - if (user != null && user.getShortName() != null) { // store reference to scanner owner for later checks + if (user != null && user.getShortName() != null) { + // store reference to scanner owner for later checks scannerOwners.put(s, user.getShortName()); } return s; @@ -1946,14 +2052,11 @@ public class AccessController extends BaseMasterAndRegionObserver * If so, we assume that access control is correctly enforced based on * the checks performed in preScannerOpen() */ - private void requireScannerOwner(InternalScanner s) - throws AccessDeniedException { - if (RequestContext.isInRequestContext()) { - String requestUserName = RequestContext.getRequestUserName(); - String owner = scannerOwners.get(s); - if (owner != null && !owner.equals(requestUserName)) { - throw new AccessDeniedException("User '"+ requestUserName +"' is not the scanner owner!"); - } + private void requireScannerOwner(InternalScanner s) throws AccessDeniedException { + String requestUserName = RpcServer.getRequestUserName(); + String owner = scannerOwners.get(s); + if (authorizationEnabled && owner != null && !owner.equals(requestUserName)) { + throw new AccessDeniedException("User '"+ requestUserName +"' is not the scanner owner!"); } } @@ -2046,11 +2149,11 @@ public class AccessController extends BaseMasterAndRegionObserver case Global : case Table : requirePermission("grant", perm.getTableName(), perm.getFamily(), - perm.getQualifier(), Action.ADMIN); + perm.getQualifier(), Action.ADMIN); break; case Namespace : requireGlobalPermission("grant", Action.ADMIN, perm.getNamespace()); - break; + break; } User.runAsLoginUser(new PrivilegedExceptionAction() { @@ -2097,7 +2200,7 @@ public class AccessController extends BaseMasterAndRegionObserver case Global : case Table : requirePermission("revoke", perm.getTableName(), perm.getFamily(), - perm.getQualifier(), Action.ADMIN); + perm.getQualifier(), Action.ADMIN); break; case Namespace : requireGlobalPermission("revoke", Action.ADMIN, perm.getNamespace()); @@ -2191,9 +2294,12 @@ public class AccessController extends BaseMasterAndRegionObserver } AccessControlProtos.CheckPermissionsResponse response = null; try { + User user = getActiveUser(); TableName tableName = regionEnv.getRegion().getTableDesc().getTableName(); for (Permission permission : permissions) { if (permission instanceof TablePermission) { + // Check table permissions + TablePermission tperm = (TablePermission) permission; for (Action action : permission.getActions()) { if (!tperm.getTableName().equals(tableName)) { @@ -2203,7 +2309,8 @@ public class AccessController extends BaseMasterAndRegionObserver tperm.getTableName())); } - Map> familyMap = new TreeMap>(Bytes.BYTES_COMPARATOR); + Map> familyMap = + new TreeMap>(Bytes.BYTES_COMPARATOR); if (tperm.getFamily() != null) { if (tperm.getQualifier() != null) { Set qualifiers = Sets.newTreeSet(Bytes.BYTES_COMPARATOR); @@ -2214,12 +2321,37 @@ public class AccessController extends BaseMasterAndRegionObserver } } - requirePermission("checkPermissions", action, regionEnv, familyMap); + AuthResult result = permissionGranted("checkPermissions", user, action, regionEnv, + familyMap); + logResult(result); + if (!result.isAllowed()) { + // Even if passive we need to throw an exception here, we support checking + // effective permissions, so throw unconditionally + throw new AccessDeniedException("Insufficient permissions (table=" + tableName + + (familyMap.size() > 0 ? ", family: " + result.toFamilyString() : "") + + ", action=" + action.toString() + ")"); + } } } else { + // Check global permissions + for (Action action : permission.getActions()) { - requirePermission("checkPermissions", action); + AuthResult result; + if (authManager.authorize(user, action)) { + result = AuthResult.allow("checkPermissions", "Global action allowed", user, + action, null, null); + } else { + result = AuthResult.deny("checkPermissions", "Global action denied", user, action, + null, null); + } + logResult(result); + if (!result.isAllowed()) { + // Even if passive we need to throw an exception here, we support checking + // effective permissions, so throw unconditionally + throw new AccessDeniedException("Insufficient permissions (action=" + + action.toString() + ")"); + } } } } @@ -2235,19 +2367,19 @@ public class AccessController extends BaseMasterAndRegionObserver return AccessControlProtos.AccessControlService.newReflectiveService(this); } - private HRegion getRegion(RegionCoprocessorEnvironment e) { + private Region getRegion(RegionCoprocessorEnvironment e) { return e.getRegion(); } private TableName getTableName(RegionCoprocessorEnvironment e) { - HRegion region = e.getRegion(); + Region region = e.getRegion(); if (region != null) { return getTableName(region); } return null; } - private TableName getTableName(HRegion region) { + private TableName getTableName(Region region) { HRegionInfo regionInfo = region.getRegionInfo(); if (regionInfo != null) { return regionInfo.getTable(); @@ -2262,6 +2394,10 @@ public class AccessController extends BaseMasterAndRegionObserver } private void isSystemOrSuperUser(Configuration conf) throws IOException { + // No need to check if we're not going to throw + if (!authorizationEnabled) { + return; + } User user = userProvider.getCurrent(); if (user == null) { throw new IOException("Unable to obtain the current user, " + @@ -2351,31 +2487,31 @@ public class AccessController extends BaseMasterAndRegionObserver } @Override - public void preMerge(ObserverContext ctx, HRegion regionA, - HRegion regionB) throws IOException { + public void preMerge(ObserverContext ctx, Region regionA, + Region regionB) throws IOException { requirePermission("mergeRegions", regionA.getTableDesc().getTableName(), null, null, Action.ADMIN); } @Override - public void postMerge(ObserverContext c, HRegion regionA, - HRegion regionB, HRegion mergedRegion) throws IOException { } + public void postMerge(ObserverContext c, Region regionA, + Region regionB, Region mergedRegion) throws IOException { } @Override public void preMergeCommit(ObserverContext ctx, - HRegion regionA, HRegion regionB, List metaEntries) throws IOException { } + Region regionA, Region regionB, List metaEntries) throws IOException { } @Override public void postMergeCommit(ObserverContext ctx, - HRegion regionA, HRegion regionB, HRegion mergedRegion) throws IOException { } + Region regionA, Region regionB, Region mergedRegion) throws IOException { } @Override public void preRollBackMerge(ObserverContext ctx, - HRegion regionA, HRegion regionB) throws IOException { } + Region regionA, Region regionB) throws IOException { } @Override public void postRollBackMerge(ObserverContext ctx, - HRegion regionA, HRegion regionB) throws IOException { } + Region regionA, Region regionB) throws IOException { } @Override public void preRollWALWriterRequest(ObserverContext ctx) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AuthResult.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AuthResult.java index df4fb72e189..bf05dc10b89 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AuthResult.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AuthResult.java @@ -27,6 +27,8 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.Bytes; +import com.google.common.base.Joiner; + /** * Represents the result of an authorization check for logging and error * reporting. @@ -40,6 +42,7 @@ public class AuthResult { private final String request; private String reason; private final User user; + private AuthResult.Params params; // "family" and "qualifier" should only be used if "families" is null. private final byte[] family; @@ -58,6 +61,7 @@ public class AuthResult { this.action = action; this.families = null; this.namespace = null; + this.params = new Params().setTableName(table).setFamily(family).setQualifier(qualifier); } public AuthResult(boolean allowed, String request, String reason, User user, @@ -73,6 +77,7 @@ public class AuthResult { this.action = action; this.families = families; this.namespace = null; + this.params = new Params().setTableName(table).setFamilies(families); } public AuthResult(boolean allowed, String request, String reason, User user, @@ -87,6 +92,7 @@ public class AuthResult { this.family = null; this.qualifier = null; this.families = null; + this.params = new Params().setNamespace(namespace); } public boolean isAllowed() { @@ -121,6 +127,8 @@ public class AuthResult { return request; } + public Params getParams() { return this.params;} + public void setAllowed(boolean allowed) { this.allowed = allowed; } @@ -129,7 +137,8 @@ public class AuthResult { this.reason = reason; } - String toFamilyString() { + private static String toFamiliesString(Map> families, + byte[] family, byte[] qual) { StringBuilder sb = new StringBuilder(); if (families != null) { boolean first = true; @@ -164,8 +173,8 @@ public class AuthResult { } } else if (family != null) { sb.append(Bytes.toString(family)); - if (qualifier != null) { - sb.append(":").append(Bytes.toString(qualifier)); + if (qual != null) { + sb.append(":").append(Bytes.toString(qual)); } } return sb.toString(); @@ -173,16 +182,24 @@ public class AuthResult { public String toContextString() { StringBuilder sb = new StringBuilder(); + String familiesString = toFamiliesString(families, family, qualifier); sb.append("(user=") .append(user != null ? user.getName() : "UNKNOWN") .append(", "); sb.append("scope=") - .append(namespace != null ? namespace : table == null ? "GLOBAL" : table); - if(namespace == null) { - sb.append(", ") - .append("family=") - .append(toFamilyString()) + .append(namespace != null ? namespace : + table == null ? "GLOBAL" : table.getNameWithNamespaceInclAsString()) .append(", "); + if(namespace == null && familiesString.length() > 0) { + sb.append("family=") + .append(familiesString) + .append(", "); + } + String paramsString = params.toString(); + if(paramsString.length() > 0) { + sb.append("params=[") + .append(paramsString) + .append("],"); } sb.append("action=") .append(action != null ? action.toString() : "") @@ -225,4 +242,52 @@ public class AuthResult { Map> families) { return new AuthResult(false, request, reason, user, action, table, families); } + + public String toFamilyString() { + return toFamiliesString(families, family, qualifier); + } + + public static class Params { + private String namespace = null; + private TableName tableName = null; + private Map> families = null; + byte[] family = null; + byte[] qualifier = null; + + public Params setNamespace(String namespace) { + this.namespace = namespace; + return this; + } + + public Params setTableName(TableName table) { + this.tableName = table; + return this; + } + + public Params setFamilies(Map> families) { + this.families = families; + return this; + } + + public Params setFamily(byte[] family) { + this.family = family; + return this; + } + + public Params setQualifier(byte[] qualifier) { + this.qualifier = qualifier; + return this; + } + + public String toString() { + String familiesString = toFamiliesString(families, family, qualifier); + String[] params = new String[] { + namespace != null ? "namespace=" + namespace : null, + tableName != null ? "table=" + tableName.getNameWithNamespaceInclAsString() : null, + familiesString.length() > 0 ? "family=" + familiesString : null + }; + return Joiner.on(",").skipNulls().join(params); + } + + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java index 058992fc3e7..6abc3584972 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java @@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.security.access; import com.google.protobuf.RpcCallback; import com.google.protobuf.RpcController; import com.google.protobuf.Service; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.classification.InterfaceAudience; @@ -38,7 +39,7 @@ import org.apache.hadoop.hbase.coprocessor.BulkLoadObserver; import org.apache.hadoop.hbase.coprocessor.CoprocessorService; import org.apache.hadoop.hbase.coprocessor.ObserverContext; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; -import org.apache.hadoop.hbase.ipc.RequestContext; +import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.ResponseConverter; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; @@ -49,7 +50,8 @@ import org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.CleanupBu import org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.CleanupBulkLoadResponse; import org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.SecureBulkLoadHFilesRequest; import org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.SecureBulkLoadHFilesResponse; -import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.regionserver.Region; +import org.apache.hadoop.hbase.regionserver.Region.BulkLoadListener; import org.apache.hadoop.hbase.security.SecureBulkLoadUtil; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.UserProvider; @@ -236,7 +238,7 @@ public class SecureBulkLoadEndpoint extends SecureBulkLoadService return; } - HRegion region = env.getRegion(); + Region region = env.getRegion(); boolean bypass = false; if (region.getCoprocessorHost() != null) { try { @@ -334,8 +336,8 @@ public class SecureBulkLoadEndpoint extends SecureBulkLoadService } private User getActiveUser() { - User user = RequestContext.getRequestUser(); - if (!RequestContext.isInRequestContext()) { + User user = RpcServer.getRequestUser(); + if (user == null) { return null; } @@ -353,7 +355,7 @@ public class SecureBulkLoadEndpoint extends SecureBulkLoadService return this; } - private static class SecureBulkLoadListener implements HRegion.BulkLoadListener { + private static class SecureBulkLoadListener implements BulkLoadListener { // Target filesystem private FileSystem fs; private String stagingDir; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/TableAuthManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/TableAuthManager.java index 6ca40e670b3..d043735d429 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/TableAuthManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/TableAuthManager.java @@ -23,6 +23,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentSkipListMap; +import java.util.concurrent.atomic.AtomicLong; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -105,7 +106,7 @@ public class TableAuthManager { private Configuration conf; private ZKPermissionWatcher zkperms; - private volatile long mtime; + private final AtomicLong mtime = new AtomicLong(0L); private TableAuthManager(ZooKeeperWatcher watcher, Configuration conf) throws IOException { @@ -212,7 +213,7 @@ public class TableAuthManager { } } globalCache = newCache; - mtime++; + mtime.incrementAndGet(); } catch (IOException e) { // Never happens LOG.error("Error occured while updating the global cache", e); @@ -240,7 +241,7 @@ public class TableAuthManager { } tableCache.put(table, newTablePerms); - mtime++; + mtime.incrementAndGet(); } /** @@ -264,7 +265,7 @@ public class TableAuthManager { } nsCache.put(namespace, newTablePerms); - mtime++; + mtime.incrementAndGet(); } private PermissionCache getTablePermissions(TableName table) { @@ -295,7 +296,7 @@ public class TableAuthManager { } } } else if (LOG.isDebugEnabled()) { - LOG.debug("No permissions found"); + LOG.debug("No permissions found for " + action); } return false; @@ -488,20 +489,26 @@ public class TableAuthManager { * permissions. */ public boolean authorizeGroup(String groupName, Permission.Action action) { - return authorize(globalCache.getGroup(groupName), action); + List perms = globalCache.getGroup(groupName); + if (LOG.isDebugEnabled()) { + LOG.debug("authorizing " + (perms != null && !perms.isEmpty() ? perms.get(0) : "") + + " for " + action); + } + return authorize(perms, action); } /** - * Checks authorization to a given table and column family for a group, based + * Checks authorization to a given table, column family and column for a group, based * on the stored permissions. * @param groupName * @param table * @param family + * @param qualifier * @param action * @return true if known and authorized, false otherwise */ public boolean authorizeGroup(String groupName, TableName table, byte[] family, - Permission.Action action) { + byte[] qualifier, Permission.Action action) { // Global authorization supercedes table level if (authorizeGroup(groupName, action)) { return true; @@ -513,7 +520,13 @@ public class TableAuthManager { return true; } // Check table level - return authorize(getTablePermissions(table).getGroup(groupName), table, family, action); + List tblPerms = getTablePermissions(table).getGroup(groupName); + if (LOG.isDebugEnabled()) { + LOG.debug("authorizing " + (tblPerms != null && !tblPerms.isEmpty() ? tblPerms.get(0) : "") + + " for " +groupName + " on " + table + "." + Bytes.toString(family) + "." + + Bytes.toString(qualifier) + " with " + action); + } + return authorize(tblPerms, table, family, qualifier, action); } /** @@ -548,7 +561,7 @@ public class TableAuthManager { String[] groups = user.getGroupNames(); if (groups != null) { for (String group : groups) { - if (authorizeGroup(group, table, family, action)) { + if (authorizeGroup(group, table, family, qualifier, action)) { return true; } } @@ -729,7 +742,7 @@ public class TableAuthManager { } public long getMTime() { - return mtime; + return mtime.get(); } static Map managerMap = diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java index 53de50fa539..2c051ea7ea7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java @@ -211,4 +211,21 @@ public class ZKPermissionWatcher extends ZooKeeperListener { watcher.abort("Failed deleting node " + zkNode, e); } } + + /*** + * Delete the acl notify node of namespace + */ + public void deleteNamespaceACLNode(final String namespace) { + String zkNode = ZKUtil.joinZNode(watcher.baseZNode, ACL_NODE); + zkNode = ZKUtil.joinZNode(zkNode, AccessControlLists.NAMESPACE_PREFIX + namespace); + + try { + ZKUtil.deleteNode(watcher, zkNode); + } catch (KeeperException.NoNodeException e) { + LOG.warn("No acl notify node of namespace '" + namespace + "'"); + } catch (KeeperException e) { + LOG.error("Failed deleting acl node of namespace '" + namespace + "'", e); + watcher.abort("Failed deleting node " + zkNode, e); + } + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenProvider.java index 568d6fd0972..6548194214e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenProvider.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenProvider.java @@ -31,7 +31,6 @@ import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.CoprocessorService; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.ipc.RpcServer; -import org.apache.hadoop.hbase.ipc.RequestContext; import org.apache.hadoop.hbase.ipc.RpcServerInterface; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.ResponseConverter; @@ -111,7 +110,7 @@ public class TokenProvider implements AuthenticationProtos.AuthenticationService "No secret manager configured for token authentication"); } - User currentUser = RequestContext.getRequestUser(); + User currentUser = RpcServer.getRequestUser(); UserGroupInformation ugi = null; if (currentUser != null) { ugi = currentUser.getUGI(); @@ -137,7 +136,7 @@ public class TokenProvider implements AuthenticationProtos.AuthenticationService @Override public void whoAmI(RpcController controller, AuthenticationProtos.WhoAmIRequest request, RpcCallback done) { - User requestUser = RequestContext.getRequestUser(); + User requestUser = RpcServer.getRequestUser(); AuthenticationProtos.WhoAmIResponse.Builder response = AuthenticationProtos.WhoAmIResponse.newBuilder(); if (requestUser != null) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.java index 6b9a358a60c..34ccb4a25eb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.java @@ -44,6 +44,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants.OperationStatusCode; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.TagType; @@ -55,8 +56,8 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.io.util.StreamUtils; -import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.OperationStatus; +import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.RegionScanner; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.access.AccessControlLists; @@ -76,7 +77,7 @@ public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService private AtomicInteger ordinalCounter = new AtomicInteger(-1); private Configuration conf; - private HRegion labelsRegion; + private Region labelsRegion; private VisibilityLabelsCache labelsCache; private List scanLabelGenerators; private List superUsers; @@ -196,7 +197,7 @@ public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService return new Pair, Map>>(labels, userAuths); } - protected void addSystemLabel(HRegion region, Map labels, + protected void addSystemLabel(Region region, Map labels, Map> userAuths) throws IOException { if (!labels.containsKey(SYSTEM_LABEL)) { Put p = new Put(Bytes.toBytes(SYSTEM_LABEL_ORDINAL)); @@ -307,7 +308,7 @@ public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService private boolean mutateLabelsRegion(List mutations, OperationStatus[] finalOpStatus) throws IOException { OperationStatus[] opStatus = this.labelsRegion.batchMutate(mutations - .toArray(new Mutation[mutations.size()])); + .toArray(new Mutation[mutations.size()]), HConstants.NO_NONCE, HConstants.NO_NONCE); int i = 0; boolean updateZk = false; for (OperationStatus status : opStatus) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java index f65494c9da3..6e659ef47c0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java @@ -73,7 +73,7 @@ import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.filter.FilterBase; import org.apache.hadoop.hbase.filter.FilterList; import org.apache.hadoop.hbase.io.hfile.HFile; -import org.apache.hadoop.hbase.ipc.RequestContext; +import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.protobuf.ResponseConverter; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionActionResult; @@ -90,10 +90,10 @@ import org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.Visibil import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.regionserver.DeleteTracker; import org.apache.hadoop.hbase.regionserver.DisabledRegionSplitPolicy; -import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.InternalScanner; import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress; import org.apache.hadoop.hbase.regionserver.OperationStatus; +import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.RegionScanner; import org.apache.hadoop.hbase.replication.ReplicationEndpoint; import org.apache.hadoop.hbase.security.AccessDeniedException; @@ -125,7 +125,7 @@ public class VisibilityController extends BaseMasterAndRegionObserver implements // flags if we are running on a region of the 'labels' table private boolean labelsRegion = false; // Flag denoting whether AcessController is available or not. - private boolean acOn = false; + private boolean accessControllerAvailable = false; private Configuration conf; private volatile boolean initialized = false; private boolean checkAuths = false; @@ -137,6 +137,10 @@ public class VisibilityController extends BaseMasterAndRegionObserver implements private List superGroups; private VisibilityLabelService visibilityLabelService; + /** if we are active, usually true, only not true if "hbase.security.authorization" + has been set to false in site configuration */ + boolean authorizationEnabled; + // Add to this list if there are any reserved tag types private static ArrayList RESERVED_VIS_TAG_TYPES = new ArrayList(); static { @@ -148,6 +152,12 @@ public class VisibilityController extends BaseMasterAndRegionObserver implements @Override public void start(CoprocessorEnvironment env) throws IOException { this.conf = env.getConfiguration(); + + authorizationEnabled = conf.getBoolean(User.HBASE_SECURITY_AUTHORIZATION_CONF_KEY, true); + if (!authorizationEnabled) { + LOG.warn("The VisibilityController has been loaded with authorization checks disabled."); + } + if (HFile.getFormatVersion(conf) < HFile.MIN_FORMAT_VERSION_WITH_TAGS) { throw new RuntimeException("A minimum HFile version of " + HFile.MIN_FORMAT_VERSION_WITH_TAGS + " is required to persist visibility labels. Consider setting " + HFile.FORMAT_VERSION_KEY @@ -200,6 +210,9 @@ public class VisibilityController extends BaseMasterAndRegionObserver implements @Override public void preModifyTable(ObserverContext ctx, TableName tableName, HTableDescriptor htd) throws IOException { + if (!authorizationEnabled) { + return; + } if (LABELS_TABLE_NAME.equals(tableName)) { throw new ConstraintException("Cannot alter " + LABELS_TABLE_NAME); } @@ -208,6 +221,9 @@ public class VisibilityController extends BaseMasterAndRegionObserver implements @Override public void preAddColumn(ObserverContext ctx, TableName tableName, HColumnDescriptor column) throws IOException { + if (!authorizationEnabled) { + return; + } if (LABELS_TABLE_NAME.equals(tableName)) { throw new ConstraintException("Cannot alter " + LABELS_TABLE_NAME); } @@ -216,6 +232,9 @@ public class VisibilityController extends BaseMasterAndRegionObserver implements @Override public void preModifyColumn(ObserverContext ctx, TableName tableName, HColumnDescriptor descriptor) throws IOException { + if (!authorizationEnabled) { + return; + } if (LABELS_TABLE_NAME.equals(tableName)) { throw new ConstraintException("Cannot alter " + LABELS_TABLE_NAME); } @@ -224,6 +243,9 @@ public class VisibilityController extends BaseMasterAndRegionObserver implements @Override public void preDeleteColumn(ObserverContext ctx, TableName tableName, byte[] c) throws IOException { + if (!authorizationEnabled) { + return; + } if (LABELS_TABLE_NAME.equals(tableName)) { throw new ConstraintException("Cannot alter " + LABELS_TABLE_NAME); } @@ -232,6 +254,9 @@ public class VisibilityController extends BaseMasterAndRegionObserver implements @Override public void preDisableTable(ObserverContext ctx, TableName tableName) throws IOException { + if (!authorizationEnabled) { + return; + } if (LABELS_TABLE_NAME.equals(tableName)) { throw new ConstraintException("Cannot disable " + LABELS_TABLE_NAME); } @@ -244,7 +269,8 @@ public class VisibilityController extends BaseMasterAndRegionObserver implements // Read the entire labels table and populate the zk if (e.getEnvironment().getRegion().getRegionInfo().getTable().equals(LABELS_TABLE_NAME)) { this.labelsRegion = true; - this.acOn = CoprocessorHost.getLoadedCoprocessors().contains(AccessController.class.getName()); + this.accessControllerAvailable = CoprocessorHost.getLoadedCoprocessors() + .contains(AccessController.class.getName()); // Defer the init of VisibilityLabelService on labels region until it is in recovering state. if (!e.getEnvironment().getRegion().isRecovering()) { initVisibilityLabelService(e.getEnvironment()); @@ -298,9 +324,12 @@ public class VisibilityController extends BaseMasterAndRegionObserver implements for (CellScanner cellScanner = m.cellScanner(); cellScanner.advance();) { pair = checkForReservedVisibilityTagPresence(cellScanner.current(), pair); if (!pair.getFirst()) { - miniBatchOp.setOperationStatus(i, new OperationStatus(SANITY_CHECK_FAILURE, + // Don't disallow reserved tags if authorization is disabled + if (authorizationEnabled) { + miniBatchOp.setOperationStatus(i, new OperationStatus(SANITY_CHECK_FAILURE, "Mutation contains cell with reserved type tag")); - sanityFailure = true; + sanityFailure = true; + } break; } else { // Indicates that the cell has a the tag which was modified in the src replication cluster @@ -319,7 +348,7 @@ public class VisibilityController extends BaseMasterAndRegionObserver implements List visibilityTags = labelCache.get(labelsExp); if (visibilityTags == null) { // Don't check user auths for labels with Mutations when the user is super user - boolean authCheck = this.checkAuths && !(isSystemOrSuperUser()); + boolean authCheck = authorizationEnabled && checkAuths && !(isSystemOrSuperUser()); try { visibilityTags = this.visibilityLabelService.createVisibilityExpTags(labelsExp, true, authCheck); @@ -366,6 +395,11 @@ public class VisibilityController extends BaseMasterAndRegionObserver implements public void prePrepareTimeStampForDeleteVersion( ObserverContext ctx, Mutation delete, Cell cell, byte[] byteNow, Get get) throws IOException { + // Nothing to do if we are not filtering by visibility + if (!authorizationEnabled) { + return; + } + CellVisibility cellVisibility = null; try { cellVisibility = delete.getCellVisibility(); @@ -513,7 +547,11 @@ public class VisibilityController extends BaseMasterAndRegionObserver implements if (!initialized) { throw new VisibilityControllerNotReadyException("VisibilityController not yet initialized!"); } - HRegion region = e.getEnvironment().getRegion(); + // Nothing to do if authorization is not enabled + if (!authorizationEnabled) { + return s; + } + Region region = e.getEnvironment().getRegion(); Authorizations authorizations = null; try { authorizations = scan.getAuthorizations(); @@ -547,7 +585,11 @@ public class VisibilityController extends BaseMasterAndRegionObserver implements public DeleteTracker postInstantiateDeleteTracker( ObserverContext ctx, DeleteTracker delTracker) throws IOException { - HRegion region = ctx.getEnvironment().getRegion(); + // Nothing to do if we are not filtering by visibility + if (!authorizationEnabled) { + return delTracker; + } + Region region = ctx.getEnvironment().getRegion(); TableName table = region.getRegionInfo().getTable(); if (table.isSystemTable()) { return delTracker; @@ -596,22 +638,25 @@ public class VisibilityController extends BaseMasterAndRegionObserver implements * access control is correctly enforced based on the checks performed in preScannerOpen() */ private void requireScannerOwner(InternalScanner s) throws AccessDeniedException { - if (RequestContext.isInRequestContext()) { - String requestUName = RequestContext.getRequestUserName(); - String owner = scannerOwners.get(s); - if (owner != null && !owner.equals(requestUName)) { - throw new AccessDeniedException("User '" + requestUName + "' is not the scanner owner!"); - } + // This is duplicated code! + String requestUName = RpcServer.getRequestUserName(); + String owner = scannerOwners.get(s); + if (authorizationEnabled && owner != null && !owner.equals(requestUName)) { + throw new AccessDeniedException("User '" + requestUName + "' is not the scanner owner!"); } } @Override - public void preGetOp(ObserverContext e, Get get, List results) - throws IOException { + public void preGetOp(ObserverContext e, Get get, + List results) throws IOException { if (!initialized) { - throw new VisibilityControllerNotReadyException("VisibilityController not yet initialized!"); + throw new VisibilityControllerNotReadyException("VisibilityController not yet initialized"); } - HRegion region = e.getEnvironment().getRegion(); + // Nothing useful to do if authorization is not enabled + if (!authorizationEnabled) { + return; + } + Region region = e.getEnvironment().getRegion(); Authorizations authorizations = null; try { authorizations = get.getAuthorizations(); @@ -658,6 +703,10 @@ public class VisibilityController extends BaseMasterAndRegionObserver implements @Override public Result preAppend(ObserverContext e, Append append) throws IOException { + // If authorization is not enabled, we don't care about reserved tags + if (!authorizationEnabled) { + return null; + } for (CellScanner cellScanner = append.cellScanner(); cellScanner.advance();) { if (!checkForReservedVisibilityTagPresence(cellScanner.current())) { throw new FailedSanityCheckException("Append contains cell with reserved type tag"); @@ -669,6 +718,10 @@ public class VisibilityController extends BaseMasterAndRegionObserver implements @Override public Result preIncrement(ObserverContext e, Increment increment) throws IOException { + // If authorization is not enabled, we don't care about reserved tags + if (!authorizationEnabled) { + return null; + } for (CellScanner cellScanner = increment.cellScanner(); cellScanner.advance();) { if (!checkForReservedVisibilityTagPresence(cellScanner.current())) { throw new FailedSanityCheckException("Increment contains cell with reserved type tag"); @@ -692,7 +745,7 @@ public class VisibilityController extends BaseMasterAndRegionObserver implements } // Prepend new visibility tags to a new list of tags for the cell // Don't check user auths for labels with Mutations when the user is super user - boolean authCheck = this.checkAuths && !(isSystemOrSuperUser()); + boolean authCheck = authorizationEnabled && checkAuths && !(isSystemOrSuperUser()); tags.addAll(this.visibilityLabelService.createVisibilityExpTags(cellVisibility.getExpression(), true, authCheck)); // Save an object allocation where we can @@ -731,7 +784,9 @@ public class VisibilityController extends BaseMasterAndRegionObserver implements } else { List labels = new ArrayList(visLabels.size()); try { - checkCallingUserAuth(); + if (authorizationEnabled) { + checkCallingUserAuth(); + } RegionActionResult successResult = RegionActionResult.newBuilder().build(); for (VisibilityLabel visLabel : visLabels) { byte[] label = visLabel.getLabel().toByteArray(); @@ -791,8 +846,9 @@ public class VisibilityController extends BaseMasterAndRegionObserver implements byte[] user = request.getUser().toByteArray(); List labelAuths = new ArrayList(auths.size()); try { - checkCallingUserAuth(); - + if (authorizationEnabled) { + checkCallingUserAuth(); + } for (ByteString authBS : auths) { labelAuths.add(authBS.toByteArray()); } @@ -825,12 +881,8 @@ public class VisibilityController extends BaseMasterAndRegionObserver implements private void logResult(boolean isAllowed, String request, String reason, byte[] user, List labelAuths, String regex) { if (AUDITLOG.isTraceEnabled()) { - RequestContext ctx = RequestContext.get(); - InetAddress remoteAddr = null; - if (ctx != null) { - remoteAddr = ctx.getRemoteAddress(); - } - + // This is more duplicated code! + InetAddress remoteAddr = RpcServer.getRemoteAddress(); List labelAuthsStr = new ArrayList<>(); if (labelAuths != null) { int labelAuthsSize = labelAuths.size(); @@ -867,7 +919,7 @@ public class VisibilityController extends BaseMasterAndRegionObserver implements try { // We do ACL check here as we create scanner directly on region. It will not make calls to // AccessController CP methods. - if (this.acOn && !isSystemOrSuperUser()) { + if (authorizationEnabled && accessControllerAvailable && !isSystemOrSuperUser()) { User requestingUser = VisibilityUtils.getActiveUser(); throw new AccessDeniedException("User '" + (requestingUser != null ? requestingUser.getShortName() : "null") @@ -910,13 +962,15 @@ public class VisibilityController extends BaseMasterAndRegionObserver implements List labelAuths = new ArrayList(auths.size()); try { // When AC is ON, do AC based user auth check - if (this.acOn && !isSystemOrSuperUser()) { + if (authorizationEnabled && accessControllerAvailable && !isSystemOrSuperUser()) { User user = VisibilityUtils.getActiveUser(); throw new AccessDeniedException("User '" + (user != null ? user.getShortName() : "null") + " is not authorized to perform this action."); } - checkCallingUserAuth(); // When AC is not in place the calling user should have SYSTEM_LABEL - // auth to do this action. + if (authorizationEnabled) { + checkCallingUserAuth(); // When AC is not in place the calling user should have + // SYSTEM_LABEL auth to do this action. + } for (ByteString authBS : auths) { labelAuths.add(authBS.toByteArray()); } @@ -960,7 +1014,7 @@ public class VisibilityController extends BaseMasterAndRegionObserver implements try { // We do ACL check here as we create scanner directly on region. It will not make calls to // AccessController CP methods. - if (this.acOn && !isSystemOrSuperUser()) { + if (authorizationEnabled && accessControllerAvailable && !isSystemOrSuperUser()) { User requestingUser = VisibilityUtils.getActiveUser(); throw new AccessDeniedException("User '" + (requestingUser != null ? requestingUser.getShortName() : "null") @@ -984,7 +1038,10 @@ public class VisibilityController extends BaseMasterAndRegionObserver implements } private void checkCallingUserAuth() throws IOException { - if (!this.acOn) { + if (!authorizationEnabled) { // Redundant, but just in case + return; + } + if (!accessControllerAvailable) { User user = VisibilityUtils.getActiveUser(); if (user == null) { throw new IOException("Unable to retrieve calling user"); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java index ebff5ffa98a..916a34c2821 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java @@ -44,13 +44,13 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.io.util.StreamUtils; -import org.apache.hadoop.hbase.ipc.RequestContext; +import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.MultiUserAuthorizations; import org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.UserAuthorizations; import org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.VisibilityLabel; import org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsRequest; -import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.security.AccessDeniedException; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.access.AccessControlLists; @@ -308,7 +308,7 @@ public class VisibilityUtils { return false; } - public static Filter createVisibilityLabelFilter(HRegion region, Authorizations authorizations) + public static Filter createVisibilityLabelFilter(Region region, Authorizations authorizations) throws IOException { Map cfVsMaxVersions = new HashMap(); for (HColumnDescriptor hcd : region.getTableDesc().getFamilies()) { @@ -326,8 +326,8 @@ public class VisibilityUtils { * @throws IOException When there is IOE in getting the system user (During non-RPC handling). */ public static User getActiveUser() throws IOException { - User user = RequestContext.getRequestUser(); - if (!RequestContext.isInRequestContext()) { + User user = RpcServer.getRequestUser(); + if (user == null) { user = User.getCurrent(); } if (LOG.isTraceEnabled()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java index 19875aa3a4a..175e8d89259 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java @@ -224,7 +224,7 @@ public class SnapshotManifest { // 2. iterate through all the stores in the region LOG.debug("Creating references for hfiles"); - for (Store store : region.getStores().values()) { + for (Store store : region.getStores()) { // 2.1. build the snapshot reference for the store Object familyData = visitor.familyOpen(regionData, store.getFamily().getName()); monitor.rethrowException(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java index 309a1c2751d..3e4d35b3a3b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java @@ -24,10 +24,17 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; +import java.util.LinkedList; import java.util.List; import java.util.Map; +import java.util.Random; import java.util.Set; import java.util.TreeSet; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Future; +import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -56,6 +63,8 @@ import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter; +import org.apache.hadoop.hbase.util.ReflectionUtils; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; @@ -119,6 +128,172 @@ public final class Canary implements Tool { } } + /** + * For each column family of the region tries to get one row and outputs the latency, or the + * failure. + */ + static class RegionTask implements Callable { + private Connection connection; + private HRegionInfo region; + private Sink sink; + + RegionTask(Connection connection, HRegionInfo region, Sink sink) { + this.connection = connection; + this.region = region; + this.sink = sink; + } + + @Override + public Void call() { + Table table = null; + HTableDescriptor tableDesc = null; + try { + table = connection.getTable(region.getTable()); + tableDesc = table.getTableDescriptor(); + } catch (IOException e) { + LOG.debug("sniffRegion failed", e); + sink.publishReadFailure(region, e); + if (table != null) { + try { + table.close(); + } catch (IOException ioe) { + } + } + return null; + } + + byte[] startKey = null; + Get get = null; + Scan scan = null; + ResultScanner rs = null; + StopWatch stopWatch = new StopWatch(); + for (HColumnDescriptor column : tableDesc.getColumnFamilies()) { + stopWatch.reset(); + startKey = region.getStartKey(); + // Can't do a get on empty start row so do a Scan of first element if any instead. + if (startKey.length > 0) { + get = new Get(startKey); + get.setCacheBlocks(false); + get.setFilter(new FirstKeyOnlyFilter()); + get.addFamily(column.getName()); + } else { + scan = new Scan(); + scan.setRaw(true); + scan.setCaching(1); + scan.setCacheBlocks(false); + scan.setFilter(new FirstKeyOnlyFilter()); + scan.addFamily(column.getName()); + scan.setMaxResultSize(1L); + } + + try { + if (startKey.length > 0) { + stopWatch.start(); + table.get(get); + stopWatch.stop(); + sink.publishReadTiming(region, column, stopWatch.getTime()); + } else { + stopWatch.start(); + rs = table.getScanner(scan); + stopWatch.stop(); + sink.publishReadTiming(region, column, stopWatch.getTime()); + } + } catch (Exception e) { + sink.publishReadFailure(region, column, e); + } finally { + if (rs != null) { + rs.close(); + } + scan = null; + get = null; + startKey = null; + } + } + try { + table.close(); + } catch (IOException e) { + } + return null; + } + } + + /** + * Get one row from a region on the regionserver and outputs the latency, or the failure. + */ + static class RegionServerTask implements Callable { + private Connection connection; + private String serverName; + private HRegionInfo region; + private ExtendedSink sink; + + RegionServerTask(Connection connection, String serverName, HRegionInfo region, + ExtendedSink sink) { + this.connection = connection; + this.serverName = serverName; + this.region = region; + this.sink = sink; + } + + @Override + public Void call() { + TableName tableName = null; + Table table = null; + Get get = null; + byte[] startKey = null; + Scan scan = null; + StopWatch stopWatch = new StopWatch(); + // monitor one region on every region server + stopWatch.reset(); + try { + tableName = region.getTable(); + table = connection.getTable(tableName); + startKey = region.getStartKey(); + // Can't do a get on empty start row so do a Scan of first element if any instead. + if (startKey.length > 0) { + get = new Get(startKey); + get.setCacheBlocks(false); + get.setFilter(new FirstKeyOnlyFilter()); + stopWatch.start(); + table.get(get); + stopWatch.stop(); + } else { + scan = new Scan(); + scan.setCacheBlocks(false); + scan.setFilter(new FirstKeyOnlyFilter()); + scan.setCaching(1); + scan.setMaxResultSize(1L); + stopWatch.start(); + ResultScanner s = table.getScanner(scan); + s.close(); + stopWatch.stop(); + } + sink.publishReadTiming(tableName.getNameAsString(), serverName, stopWatch.getTime()); + } catch (TableNotFoundException tnfe) { + // This is ignored because it doesn't imply that the regionserver is dead + } catch (TableNotEnabledException tnee) { + // This is considered a success since we got a response. + LOG.debug("The targeted table was disabled. Assuming success."); + } catch (DoNotRetryIOException dnrioe) { + sink.publishReadFailure(tableName.getNameAsString(), serverName); + LOG.error(dnrioe); + } catch (IOException e) { + sink.publishReadFailure(tableName.getNameAsString(), serverName); + LOG.error(e); + } finally { + if (table != null) { + try { + table.close(); + } catch (IOException e) {/* DO NOTHING */ + } + } + scan = null; + get = null; + startKey = null; + } + return null; + } + } + private static final int USAGE_EXIT_CODE = 1; private static final int INIT_ERROR_EXIT_CODE = 2; private static final int TIMEOUT_ERROR_EXIT_CODE = 3; @@ -128,6 +303,8 @@ public final class Canary implements Tool { private static final long DEFAULT_TIMEOUT = 600000; // 10 mins + private static final int MAX_THREADS_NUM = 16; // #threads to contact regions + private static final Log LOG = LogFactory.getLog(Canary.class); private Configuration conf = null; @@ -138,12 +315,14 @@ public final class Canary implements Tool { private long timeout = DEFAULT_TIMEOUT; private boolean failOnError = true; private boolean regionServerMode = false; + private ExecutorService executor; // threads to retrieve data from regionservers public Canary() { - this(new RegionServerStdOutSink()); + this(new ScheduledThreadPoolExecutor(1), new RegionServerStdOutSink()); } - public Canary(Sink sink) { + public Canary(ExecutorService executor, Sink sink) { + this.executor = executor; this.sink = sink; } @@ -160,6 +339,7 @@ public final class Canary implements Tool { @Override public int run(String[] args) throws Exception { int index = -1; + ChoreService choreService = null; // Process command line args for (int i = 0; i < args.length; i++) { @@ -233,6 +413,15 @@ public final class Canary implements Tool { } } + // Launches chore for refreshing kerberos credentials if security is enabled. + // Please see http://hbase.apache.org/book.html#_running_canary_in_a_kerberos_enabled_cluster + // for more details. + final ScheduledChore authChore = AuthUtil.getAuthChore(conf); + if (authChore != null) { + choreService = new ChoreService("CANARY_TOOL"); + choreService.scheduleChore(authChore); + } + // Start to prepare the stuffs Monitor monitor = null; Thread monitorThread = null; @@ -287,6 +476,9 @@ public final class Canary implements Tool { } while (interval > 0); } // try-with-resources close + if (choreService != null) { + choreService.shutdown(); + } return(monitor.errorCode); } @@ -325,14 +517,13 @@ public final class Canary implements Tool { System.arraycopy(args, index, monitorTargets, 0, length); } - if(this.regionServerMode) { - monitor = new RegionServerMonitor( - connection, - monitorTargets, - this.useRegExp, - (ExtendedSink)this.sink); + if (this.regionServerMode) { + monitor = + new RegionServerMonitor(connection, monitorTargets, this.useRegExp, + (ExtendedSink) this.sink, this.executor); } else { - monitor = new RegionMonitor(connection, monitorTargets, this.useRegExp, this.sink); + monitor = + new RegionMonitor(connection, monitorTargets, this.useRegExp, this.sink, this.executor); } return monitor; } @@ -349,6 +540,7 @@ public final class Canary implements Tool { protected boolean done = false; protected int errorCode = 0; protected Sink sink; + protected ExecutorService executor; public boolean isDone() { return done; @@ -363,14 +555,15 @@ public final class Canary implements Tool { if (this.admin != null) this.admin.close(); } - protected Monitor(Connection connection, String[] monitorTargets, - boolean useRegExp, Sink sink) { + protected Monitor(Connection connection, String[] monitorTargets, boolean useRegExp, Sink sink, + ExecutorService executor) { if (null == connection) throw new IllegalArgumentException("connection shall not be null"); this.connection = connection; this.targets = monitorTargets; this.useRegExp = useRegExp; this.sink = sink; + this.executor = executor; } public abstract void run(); @@ -394,23 +587,31 @@ public final class Canary implements Tool { // a monitor for region mode private static class RegionMonitor extends Monitor { - public RegionMonitor(Connection connection, String[] monitorTargets, - boolean useRegExp, Sink sink) { - super(connection, monitorTargets, useRegExp, sink); + public RegionMonitor(Connection connection, String[] monitorTargets, boolean useRegExp, + Sink sink, ExecutorService executor) { + super(connection, monitorTargets, useRegExp, sink, executor); } @Override public void run() { - if(this.initAdmin()) { + if (this.initAdmin()) { try { + List> taskFutures = new LinkedList>(); if (this.targets != null && this.targets.length > 0) { String[] tables = generateMonitorTables(this.targets); this.initialized = true; for (String table : tables) { - Canary.sniff(admin, sink, table); + taskFutures.addAll(Canary.sniff(admin, sink, table, executor)); } } else { - sniff(); + taskFutures.addAll(sniff()); + } + for (Future future : taskFutures) { + try { + future.get(); + } catch (ExecutionException e) { + LOG.error("Sniff region failed!", e); + } } } catch (Exception e) { LOG.error("Run regionMonitor failed", e); @@ -423,7 +624,7 @@ public final class Canary implements Tool { private String[] generateMonitorTables(String[] monitorTargets) throws IOException { String[] returnTables = null; - if(this.useRegExp) { + if (this.useRegExp) { Pattern pattern = null; HTableDescriptor[] tds = null; Set tmpTables = new TreeSet(); @@ -437,16 +638,15 @@ public final class Canary implements Tool { } } } - } catch(IOException e) { + } catch (IOException e) { LOG.error("Communicate with admin failed", e); throw e; } - if(tmpTables.size() > 0) { + if (tmpTables.size() > 0) { returnTables = tmpTables.toArray(new String[tmpTables.size()]); } else { - String msg = "No HTable found, tablePattern:" - + Arrays.toString(monitorTargets); + String msg = "No HTable found, tablePattern:" + Arrays.toString(monitorTargets); LOG.error(msg); this.errorCode = INIT_ERROR_EXIT_CODE; throw new TableNotFoundException(msg); @@ -461,12 +661,15 @@ public final class Canary implements Tool { /* * canary entry point to monitor all the tables. */ - private void sniff() throws Exception { + private List> sniff() throws Exception { + List> taskFutures = new LinkedList>(); for (HTableDescriptor table : admin.listTables()) { - Canary.sniff(admin, sink, table); + if (admin.isTableEnabled(table.getTableName())) { + taskFutures.addAll(Canary.sniff(admin, sink, table, executor)); + } } + return taskFutures; } - } /** @@ -474,108 +677,56 @@ public final class Canary implements Tool { * @throws Exception */ public static void sniff(final Admin admin, TableName tableName) throws Exception { - sniff(admin, new StdOutSink(), tableName.getNameAsString()); + List> taskFutures = + Canary.sniff(admin, new StdOutSink(), tableName.getNameAsString(), + new ScheduledThreadPoolExecutor(1)); + for (Future future : taskFutures) { + future.get(); + } } /** * Canary entry point for specified table. * @throws Exception */ - private static void sniff(final Admin admin, final Sink sink, String tableName) - throws Exception { - if (admin.isTableAvailable(TableName.valueOf(tableName))) { - sniff(admin, sink, admin.getTableDescriptor(TableName.valueOf(tableName))); + private static List> sniff(final Admin admin, final Sink sink, String tableName, + ExecutorService executor) throws Exception { + if (admin.isTableEnabled(TableName.valueOf(tableName))) { + return Canary.sniff(admin, sink, admin.getTableDescriptor(TableName.valueOf(tableName)), + executor); } else { - LOG.warn(String.format("Table %s is not available", tableName)); + LOG.warn(String.format("Table %s is not enabled", tableName)); } + return new LinkedList>(); } /* * Loops over regions that owns this table, and output some information abouts the state. */ - private static void sniff(final Admin admin, final Sink sink, HTableDescriptor tableDesc) - throws Exception { + private static List> sniff(final Admin admin, final Sink sink, + HTableDescriptor tableDesc, ExecutorService executor) throws Exception { Table table = null; - try { table = admin.getConnection().getTable(tableDesc.getTableName()); } catch (TableNotFoundException e) { - return; + return new ArrayList>(); } - + List tasks = new ArrayList(); try { for (HRegionInfo region : admin.getTableRegions(tableDesc.getTableName())) { - try { - sniffRegion(admin, sink, region, table); - } catch (Exception e) { - sink.publishReadFailure(region, e); - LOG.debug("sniffRegion failed", e); - } + tasks.add(new RegionTask(admin.getConnection(), region, sink)); } } finally { table.close(); } + return executor.invokeAll(tasks); } - - /* - * For each column family of the region tries to get one row and outputs the latency, or the - * failure. - */ - private static void sniffRegion( - final Admin admin, - final Sink sink, - HRegionInfo region, - Table table) throws Exception { - HTableDescriptor tableDesc = table.getTableDescriptor(); - byte[] startKey = null; - Get get = null; - Scan scan = null; - ResultScanner rs = null; - StopWatch stopWatch = new StopWatch(); - for (HColumnDescriptor column : tableDesc.getColumnFamilies()) { - stopWatch.reset(); - startKey = region.getStartKey(); - // Can't do a get on empty start row so do a Scan of first element if any instead. - if (startKey.length > 0) { - get = new Get(startKey); - get.addFamily(column.getName()); - } else { - scan = new Scan(); - scan.setCaching(1); - scan.addFamily(column.getName()); - scan.setMaxResultSize(1L); - } - - try { - if (startKey.length > 0) { - stopWatch.start(); - table.get(get); - stopWatch.stop(); - sink.publishReadTiming(region, column, stopWatch.getTime()); - } else { - stopWatch.start(); - rs = table.getScanner(scan); - stopWatch.stop(); - sink.publishReadTiming(region, column, stopWatch.getTime()); - } - } catch (Exception e) { - sink.publishReadFailure(region, column, e); - } finally { - if (rs != null) { - rs.close(); - } - scan = null; - get = null; - startKey = null; - } - } - } - //a monitor for regionserver mode + // a monitor for regionserver mode private static class RegionServerMonitor extends Monitor { - public RegionServerMonitor(Connection connection, String[] monitorTargets, - boolean useRegExp, ExtendedSink sink) { - super(connection, monitorTargets, useRegExp, sink); + public RegionServerMonitor(Connection connection, String[] monitorTargets, boolean useRegExp, + ExtendedSink sink, ExecutorService executor) { + super(connection, monitorTargets, useRegExp, sink, executor); } private ExtendedSink getSink() { @@ -623,64 +774,27 @@ public final class Canary implements Tool { } private void monitorRegionServers(Map> rsAndRMap) { - String serverName = null; - TableName tableName = null; - HRegionInfo region = null; - Table table = null; - Get get = null; - byte[] startKey = null; - Scan scan = null; - StopWatch stopWatch = new StopWatch(); + List tasks = new ArrayList(); + Random rand =new Random(); // monitor one region on every region server for (Map.Entry> entry : rsAndRMap.entrySet()) { - stopWatch.reset(); - serverName = entry.getKey(); - // always get the first region - region = entry.getValue().get(0); - try { - tableName = region.getTable(); - table = admin.getConnection().getTable(tableName); - startKey = region.getStartKey(); - // Can't do a get on empty start row so do a Scan of first element if any instead. - if(startKey.length > 0) { - get = new Get(startKey); - stopWatch.start(); - table.get(get); - stopWatch.stop(); - } else { - scan = new Scan(); - scan.setCaching(1); - scan.setMaxResultSize(1L); - stopWatch.start(); - ResultScanner s = table.getScanner(scan); - s.close(); - stopWatch.stop(); + String serverName = entry.getKey(); + // random select a region + HRegionInfo region = entry.getValue().get(rand.nextInt(entry.getValue().size())); + tasks.add(new RegionServerTask(this.connection, serverName, region, getSink())); + } + try { + for (Future future : this.executor.invokeAll(tasks)) { + try { + future.get(); + } catch (ExecutionException e) { + LOG.error("Sniff regionserver failed!", e); + this.errorCode = ERROR_EXIT_CODE; } - this.getSink().publishReadTiming(tableName.getNameAsString(), - serverName, stopWatch.getTime()); - } catch (TableNotFoundException tnfe) { - // This is ignored because it doesn't imply that the regionserver is dead - } catch (TableNotEnabledException tnee) { - // This is considered a success since we got a response. - LOG.debug("The targeted table was disabled. Assuming success."); - } catch (DoNotRetryIOException dnrioe) { - this.getSink().publishReadFailure(tableName.getNameAsString(), serverName); - LOG.error(dnrioe); - } catch (IOException e) { - this.getSink().publishReadFailure(tableName.getNameAsString(), serverName); - LOG.error(e); - this.errorCode = ERROR_EXIT_CODE; - } finally { - if (table != null) { - try { - table.close(); - } catch (IOException e) {/* DO NOTHING */ - } - } - scan = null; - get = null; - startKey = null; } + } catch (InterruptedException e) { + this.errorCode = ERROR_EXIT_CODE; + LOG.error("Sniff regionserver failed!", e); } } @@ -701,7 +815,7 @@ public final class Canary implements Tool { table = this.admin.getConnection().getTable(tableDesc.getTableName()); regionLocator = this.admin.getConnection().getRegionLocator(tableDesc.getTableName()); - for (HRegionLocation location: regionLocator.getAllRegionLocations()) { + for (HRegionLocation location : regionLocator.getAllRegionLocations()) { ServerName rs = location.getServerName(); String rsName = rs.getHostname(); HRegionInfo r = location.getRegionInfo(); @@ -748,7 +862,7 @@ public final class Canary implements Tool { if (this.useRegExp) { regExpFound = false; pattern = Pattern.compile(rsName); - for (Map.Entry> entry : fullRsAndRMap.entrySet()) { + for (Map.Entry> entry : fullRsAndRMap.entrySet()) { matcher = pattern.matcher(entry.getKey()); if (matcher.matches()) { filteredRsAndRMap.put(entry.getKey(), entry.getValue()); @@ -775,15 +889,15 @@ public final class Canary implements Tool { public static void main(String[] args) throws Exception { final Configuration conf = HBaseConfiguration.create(); - final ChoreService choreService = new ChoreService("CANARY_TOOL"); - final ScheduledChore authChore = AuthUtil.getAuthChore(conf); - if (authChore != null) { - choreService.scheduleChore(authChore); - } + int numThreads = conf.getInt("hbase.canary.threads.num", MAX_THREADS_NUM); + ExecutorService executor = new ScheduledThreadPoolExecutor(numThreads); - int exitCode = ToolRunner.run(conf, new Canary(), args); + Class sinkClass = + conf.getClass("hbase.canary.sink.class", StdOutSink.class, Sink.class); + Sink sink = ReflectionUtils.newInstance(sinkClass); - choreService.shutdown(); + int exitCode = ToolRunner.run(conf, new Canary(executor, sink), args); + executor.shutdown(); System.exit(exitCode); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/WriteSinkCoprocessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/WriteSinkCoprocessor.java index fff1374ab71..92ab4d1498c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/WriteSinkCoprocessor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/WriteSinkCoprocessor.java @@ -60,7 +60,7 @@ public class WriteSinkCoprocessor extends BaseRegionObserver { @Override public void preOpen(ObserverContext e) throws IOException { - regionName = e.getEnvironment().getRegion().getRegionNameAsString(); + regionName = e.getEnvironment().getRegion().getRegionInfo().getRegionNameAsString(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java index 5ec13f4861d..a9cc1c65ed4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java @@ -31,10 +31,11 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.io.compress.Compression; -import org.apache.hadoop.hbase.io.hfile.AbstractHFileWriter; +import org.apache.hadoop.hbase.io.hfile.HFileWriterImpl; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.hfile.HFileContext; @@ -87,7 +88,7 @@ public class CompressionTest { return ; // already passed test, dont do it again. } else { // failed. - throw new IOException("Compression algorithm '" + algo.getName() + "'" + + throw new DoNotRetryIOException("Compression algorithm '" + algo.getName() + "'" + " previously failed test."); } } @@ -98,7 +99,7 @@ public class CompressionTest { compressionTestResults[algo.ordinal()] = true; // passes } catch (Throwable t) { compressionTestResults[algo.ordinal()] = false; // failure - throw new IOException(t); + throw new DoNotRetryIOException(t); } } @@ -119,7 +120,7 @@ public class CompressionTest { throws Exception { Configuration conf = HBaseConfiguration.create(); HFileContext context = new HFileContextBuilder() - .withCompression(AbstractHFileWriter.compressionByName(codec)).build(); + .withCompression(HFileWriterImpl.compressionByName(codec)).build(); HFile.Writer writer = HFile.getWriterFactoryNoCache(conf) .withPath(fs, path) .withFileContext(context) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ConnectionCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ConnectionCache.java index 21714af358f..af90e32a29d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ConnectionCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ConnectionCache.java @@ -27,11 +27,13 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ChoreService; import org.apache.hadoop.hbase.ScheduledChore; import org.apache.hadoop.hbase.Stoppable; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.client.HBaseAdmin; -import org.apache.hadoop.hbase.client.HConnection; -import org.apache.hadoop.hbase.client.HConnectionManager; -import org.apache.hadoop.hbase.client.HTableInterface; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.RegionLocator; +import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.UserProvider; import org.apache.hadoop.security.UserGroupInformation; @@ -127,14 +129,13 @@ public class ConnectionCache { * Caller doesn't close the admin afterwards. * We need to manage it and close it properly. */ - @SuppressWarnings("deprecation") - public HBaseAdmin getAdmin() throws IOException { + public Admin getAdmin() throws IOException { ConnectionInfo connInfo = getCurrentConnection(); if (connInfo.admin == null) { Lock lock = locker.acquireLock(getEffectiveUser()); try { if (connInfo.admin == null) { - connInfo.admin = new HBaseAdmin(connInfo.connection); + connInfo.admin = connInfo.connection.getAdmin(); } } finally { lock.unlock(); @@ -146,9 +147,16 @@ public class ConnectionCache { /** * Caller closes the table afterwards. */ - public HTableInterface getTable(String tableName) throws IOException { + public Table getTable(String tableName) throws IOException { ConnectionInfo connInfo = getCurrentConnection(); - return connInfo.connection.getTable(tableName); + return connInfo.connection.getTable(TableName.valueOf(tableName)); + } + + /** + * Retrieve a regionLocator for the table. The user should close the RegionLocator. + */ + public RegionLocator getRegionLocator(byte[] tableName) throws IOException { + return getCurrentConnection().connection.getRegionLocator(TableName.valueOf(tableName)); } /** @@ -168,7 +176,7 @@ public class ConnectionCache { ugi = UserGroupInformation.createProxyUser(userName, realUser); } User user = userProvider.create(ugi); - HConnection conn = HConnectionManager.createConnection(conf, user); + Connection conn = ConnectionFactory.createConnection(conf, user); connInfo = new ConnectionInfo(conn, userName); connections.put(userName, connInfo); } @@ -180,14 +188,14 @@ public class ConnectionCache { } class ConnectionInfo { - final HConnection connection; + final Connection connection; final String userName; - volatile HBaseAdmin admin; + volatile Admin admin; private long lastAccessTime; private boolean closed; - ConnectionInfo(HConnection conn, String user) { + ConnectionInfo(Connection conn, String user) { lastAccessTime = EnvironmentEdgeManager.currentTime(); connection = conn; closed = false; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSHDFSUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSHDFSUtils.java index 8bdac1521f8..0fffcc674b0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSHDFSUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSHDFSUtils.java @@ -180,9 +180,11 @@ public class FSHDFSUtils extends FSUtils { long firstPause = conf.getInt("hbase.lease.recovery.first.pause", 4000); // This should be set to how long it'll take for us to timeout against primary datanode if it // is dead. We set it to 61 seconds, 1 second than the default READ_TIMEOUT in HDFS, the - // default value for DFS_CLIENT_SOCKET_TIMEOUT_KEY. - long subsequentPause = conf.getInt("hbase.lease.recovery.dfs.timeout", 61 * 1000); - + // default value for DFS_CLIENT_SOCKET_TIMEOUT_KEY. If recovery is still failing after this + // timeout, then further recovery will take liner backoff with this base, to avoid endless + // preemptions when this value is not properly configured. + long subsequentPauseBase = conf.getLong("hbase.lease.recovery.dfs.timeout", 61 * 1000); + Method isFileClosedMeth = null; // whether we need to look for isFileClosed method boolean findIsFileClosedMeth = true; @@ -198,11 +200,11 @@ public class FSHDFSUtils extends FSUtils { if (nbAttempt == 0) { Thread.sleep(firstPause); } else { - // Cycle here until subsequentPause elapses. While spinning, check isFileClosed if - // available (should be in hadoop 2.0.5... not in hadoop 1 though. + // Cycle here until (subsequentPause * nbAttempt) elapses. While spinning, check + // isFileClosed if available (should be in hadoop 2.0.5... not in hadoop 1 though. long localStartWaiting = EnvironmentEdgeManager.currentTime(); while ((EnvironmentEdgeManager.currentTime() - localStartWaiting) < - subsequentPause) { + subsequentPauseBase * nbAttempt) { Thread.sleep(conf.getInt("hbase.lease.recovery.pause", 1000)); if (findIsFileClosedMeth) { try { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java index 0d0912eccd8..e86054be7ec 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java @@ -66,6 +66,7 @@ import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.regionserver.StoreFileInfo; import org.apache.hadoop.hbase.security.AccessDeniedException; +import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.FSProtos; import org.apache.hadoop.hbase.regionserver.HRegion; @@ -1545,6 +1546,28 @@ public abstract class FSUtils { */ public static Map getTableStoreFilePathMap(Map map, final FileSystem fs, final Path hbaseRootDir, TableName tableName) + throws IOException { + return getTableStoreFilePathMap(map, fs, hbaseRootDir, tableName, null); + } + + /** + * Runs through the HBase rootdir/tablename and creates a reverse lookup map for + * table StoreFile names to the full Path. + *
      + * Example...
      + * Key = 3944417774205889744
      + * Value = hdfs://localhost:51169/user/userid/-ROOT-/70236052/info/3944417774205889744 + * + * @param map map to add values. If null, this method will create and populate one to return + * @param fs The file system to use. + * @param hbaseRootDir The root directory to scan. + * @param tableName name of the table to scan. + * @param errors ErrorReporter instance or null + * @return Map keyed by StoreFile name with a value of the full Path. + * @throws IOException When scanning the directory fails. + */ + public static Map getTableStoreFilePathMap(Map map, + final FileSystem fs, final Path hbaseRootDir, TableName tableName, ErrorReporter errors) throws IOException { if (map == null) { map = new HashMap(); @@ -1557,10 +1580,16 @@ public abstract class FSUtils { PathFilter familyFilter = new FamilyDirFilter(fs); FileStatus[] regionDirs = fs.listStatus(tableDir, new RegionDirFilter(fs)); for (FileStatus regionDir : regionDirs) { + if (null != errors) { + errors.progress(); + } Path dd = regionDir.getPath(); // else its a region name, now look in region for families FileStatus[] familyDirs = fs.listStatus(dd, familyFilter); for (FileStatus familyDir : familyDirs) { + if (null != errors) { + errors.progress(); + } Path family = familyDir.getPath(); if (family.getName().equals(HConstants.RECOVERED_EDITS_DIR)) { continue; @@ -1569,6 +1598,9 @@ public abstract class FSUtils { // put in map FileStatus[] familyStatus = fs.listStatus(family); for (FileStatus sfStatus : familyStatus) { + if (null != errors) { + errors.progress(); + } Path sf = sfStatus.getPath(); map.put( sf.getName(), sf); } @@ -1589,7 +1621,6 @@ public abstract class FSUtils { return result; } - /** * Runs through the HBase rootdir and creates a reverse lookup map for * table StoreFile names to the full Path. @@ -1605,6 +1636,26 @@ public abstract class FSUtils { */ public static Map getTableStoreFilePathMap( final FileSystem fs, final Path hbaseRootDir) + throws IOException { + return getTableStoreFilePathMap(fs, hbaseRootDir, null); + } + + /** + * Runs through the HBase rootdir and creates a reverse lookup map for + * table StoreFile names to the full Path. + *
      + * Example...
      + * Key = 3944417774205889744
      + * Value = hdfs://localhost:51169/user/userid/-ROOT-/70236052/info/3944417774205889744 + * + * @param fs The file system to use. + * @param hbaseRootDir The root directory to scan. + * @param errors ErrorReporter instance or null + * @return Map keyed by StoreFile name with a value of the full Path. + * @throws IOException When scanning the directory fails. + */ + public static Map getTableStoreFilePathMap( + final FileSystem fs, final Path hbaseRootDir, ErrorReporter errors) throws IOException { Map map = new HashMap(); @@ -1614,7 +1665,7 @@ public abstract class FSUtils { // only include the directory paths to tables for (Path tableDir : FSUtils.getTableDirs(fs, hbaseRootDir)) { getTableStoreFilePathMap(map, fs, hbaseRootDir, - FSUtils.getTableName(tableDir)); + FSUtils.getTableName(tableDir), errors); } return map; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index a8b60cd054d..67e3411a3df 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.util; import java.io.Closeable; import java.io.FileNotFoundException; import java.io.IOException; +import java.io.InterruptedIOException; import java.io.PrintWriter; import java.io.StringWriter; import java.net.InetAddress; @@ -55,10 +56,13 @@ import java.util.concurrent.atomic.AtomicInteger; import com.google.common.base.Joiner; import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; import com.google.common.collect.Multimap; +import com.google.common.collect.Ordering; import com.google.common.collect.TreeMultimap; import com.google.protobuf.ServiceException; + import org.apache.commons.lang.RandomStringUtils; import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; @@ -98,9 +102,6 @@ import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HConnection; -import org.apache.hadoop.hbase.client.MetaScanner; -import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor; -import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitorBase; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.Result; @@ -197,7 +198,8 @@ public class HBaseFsck extends Configured implements Closeable { private static final int DEFAULT_MAX_MERGE = 5; private static final String TO_BE_LOADED = "to_be_loaded"; private static final String HBCK_LOCK_FILE = "hbase-hbck.lock"; - + private static final int DEFAULT_MAX_LOCK_FILE_ATTEMPTS = 5; + private static final int DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL = 200; /********************** * Internal resources @@ -290,6 +292,8 @@ public class HBaseFsck extends Configured implements Closeable { new HashMap>(); private Map tableStates = new HashMap(); + private final RetryCounterFactory lockFileRetryCounterFactory; + /** * Constructor @@ -311,6 +315,10 @@ public class HBaseFsck extends Configured implements Closeable { int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS); executor = new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck")); + lockFileRetryCounterFactory = new RetryCounterFactory( + getConf().getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS), + getConf().getInt("hbase.hbck.lockfile.attempt.sleep.interval", + DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL)); } /** @@ -328,9 +336,17 @@ public class HBaseFsck extends Configured implements Closeable { super(conf); errors = getErrorReporter(getConf()); this.executor = exec; + lockFileRetryCounterFactory = new RetryCounterFactory( + getConf().getInt("hbase.hbck.lockfile.attempts", DEFAULT_MAX_LOCK_FILE_ATTEMPTS), + getConf().getInt("hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL)); } private class FileLockCallable implements Callable { + RetryCounter retryCounter; + + public FileLockCallable(RetryCounter retryCounter) { + this.retryCounter = retryCounter; + } @Override public FSDataOutputStream call() throws IOException { try { @@ -340,7 +356,7 @@ public class HBaseFsck extends Configured implements Closeable { Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY); fs.mkdirs(tmpDir); HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE); - final FSDataOutputStream out = FSUtils.create(fs, HBCK_LOCK_PATH, defaultPerms, false); + final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms); out.writeBytes(InetAddress.getLocalHost().toString()); out.flush(); return out; @@ -352,6 +368,34 @@ public class HBaseFsck extends Configured implements Closeable { } } } + + private FSDataOutputStream createFileWithRetries(final FileSystem fs, + final Path hbckLockFilePath, final FsPermission defaultPerms) + throws IOException { + + IOException exception = null; + do { + try { + return FSUtils.create(fs, hbckLockFilePath, defaultPerms, false); + } catch (IOException ioe) { + LOG.info("Failed to create lock file " + hbckLockFilePath.getName() + + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of " + + retryCounter.getMaxAttempts()); + LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(), + ioe); + try { + exception = ioe; + retryCounter.sleepUntilNextRetry(); + } catch (InterruptedException ie) { + throw (InterruptedIOException) new InterruptedIOException( + "Can't create lock file " + hbckLockFilePath.getName()) + .initCause(ie); + } + } + } while (retryCounter.shouldRetry()); + + throw exception; + } } /** @@ -361,7 +405,8 @@ public class HBaseFsck extends Configured implements Closeable { * @throws IOException */ private FSDataOutputStream checkAndMarkRunningHbck() throws IOException { - FileLockCallable callable = new FileLockCallable(); + RetryCounter retryCounter = lockFileRetryCounterFactory.create(); + FileLockCallable callable = new FileLockCallable(retryCounter); ExecutorService executor = Executors.newFixedThreadPool(1); FutureTask futureTask = new FutureTask(callable); executor.execute(futureTask); @@ -385,14 +430,30 @@ public class HBaseFsck extends Configured implements Closeable { } private void unlockHbck() { - if(hbckLockCleanup.compareAndSet(true, false)){ - IOUtils.closeStream(hbckOutFd); - try{ - FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()), HBCK_LOCK_PATH, true); - } catch(IOException ioe) { - LOG.warn("Failed to delete " + HBCK_LOCK_PATH); - LOG.debug(ioe); - } + if (hbckLockCleanup.compareAndSet(true, false)) { + RetryCounter retryCounter = lockFileRetryCounterFactory.create(); + do { + try { + IOUtils.closeStream(hbckOutFd); + FSUtils.delete(FSUtils.getCurrentFileSystem(getConf()), + HBCK_LOCK_PATH, true); + return; + } catch (IOException ioe) { + LOG.info("Failed to delete " + HBCK_LOCK_PATH + ", try=" + + (retryCounter.getAttemptTimes() + 1) + " of " + + retryCounter.getMaxAttempts()); + LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe); + try { + retryCounter.sleepUntilNextRetry(); + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); + LOG.warn("Interrupted while deleting lock file" + + HBCK_LOCK_PATH); + return; + } + } + } while (retryCounter.shouldRetry()); + } } @@ -580,13 +641,17 @@ public class HBaseFsck extends Configured implements Closeable { // load regiondirs and regioninfos from HDFS if (shouldCheckHdfs()) { + LOG.info("Loading region directories from HDFS"); loadHdfsRegionDirs(); + LOG.info("Loading region information from HDFS"); loadHdfsRegionInfos(); } // fix the orphan tables fixOrphanTables(); + LOG.info("Checking and fixing region consistency"); + // Check and fix consistency checkAndFixConsistency(); @@ -640,6 +705,11 @@ public class HBaseFsck extends Configured implements Closeable { @Override public void close() throws IOException { + try { + unlockHbck(); + } catch (Exception io) { + LOG.warn(io); + } IOUtils.cleanup(null, admin, meta, connection); } @@ -662,7 +732,7 @@ public class HBaseFsck extends Configured implements Closeable { public void checkRegionBoundaries() { try { ByteArrayComparator comparator = new ByteArrayComparator(); - List regions = MetaScanner.listAllRegions(getConf(), connection, false); + List regions = MetaTableAccessor.getAllRegions(connection, true); final RegionBoundariesInformation currentRegionBoundariesInformation = new RegionBoundariesInformation(); Path hbaseRoot = FSUtils.getRootDir(getConf()); @@ -904,7 +974,10 @@ public class HBaseFsck extends Configured implements Closeable { Configuration conf = getConf(); Path hbaseRoot = FSUtils.getRootDir(conf); FileSystem fs = hbaseRoot.getFileSystem(conf); - Map allFiles = FSUtils.getTableStoreFilePathMap(fs, hbaseRoot); + LOG.info("Computing mapping of all store files"); + Map allFiles = FSUtils.getTableStoreFilePathMap(fs, hbaseRoot, errors); + errors.print(""); + LOG.info("Validating mapping using HDFS state"); for (Path path: allFiles.values()) { boolean isReference = false; try { @@ -1102,6 +1175,7 @@ public class HBaseFsck extends Configured implements Closeable { } loadTableInfosForTablesWithNoRegion(); + errors.print(""); return tablesInfo; } @@ -1292,6 +1366,7 @@ public class HBaseFsck extends Configured implements Closeable { */ private void suggestFixes( SortedMap tablesInfo) throws IOException { + logParallelMerge(); for (TableInfo tInfo : tablesInfo.values()) { TableIntegrityErrorHandler handler = tInfo.new IntegrityFixSuggester(tInfo, errors); tInfo.checkRegionChain(handler); @@ -1365,9 +1440,23 @@ public class HBaseFsck extends Configured implements Closeable { return true; } + /** + * Log an appropriate message about whether or not overlapping merges are computed in parallel. + */ + private void logParallelMerge() { + if (getConf().getBoolean("hbasefsck.overlap.merge.parallel", true)) { + LOG.info("Handling overlap merges in parallel. set hbasefsck.overlap.merge.parallel to" + + " false to run serially."); + } else { + LOG.info("Handling overlap merges serially. set hbasefsck.overlap.merge.parallel to" + + " true to run in parallel."); + } + } + private SortedMap checkHdfsIntegrity(boolean fixHoles, boolean fixOverlaps) throws IOException { LOG.info("Checking HBase region split map from HDFS data..."); + logParallelMerge(); for (TableInfo tInfo : tablesInfo.values()) { TableIntegrityErrorHandler handler; if (fixHoles || fixOverlaps) { @@ -1596,6 +1685,7 @@ public class HBaseFsck extends Configured implements Closeable { LOG.warn("Could not load region dir " , e.getCause()); } } + errors.print(""); } /** @@ -1703,20 +1793,28 @@ public class HBaseFsck extends Configured implements Closeable { throws IOException, KeeperException, InterruptedException { // Divide the checks in two phases. One for default/primary replicas and another // for the non-primary ones. Keeps code cleaner this way. + + List workItems = + new ArrayList(regionInfoMap.size()); for (java.util.Map.Entry e: regionInfoMap.entrySet()) { if (e.getValue().getReplicaId() == HRegionInfo.DEFAULT_REPLICA_ID) { - checkRegionConsistency(e.getKey(), e.getValue()); + workItems.add(new CheckRegionConsistencyWorkItem(e.getKey(), e.getValue())); } } + checkRegionConsistencyConcurrently(workItems); + boolean prevHdfsCheck = shouldCheckHdfs(); setCheckHdfs(false); //replicas don't have any hdfs data // Run a pass over the replicas and fix any assignment issues that exist on the currently // deployed/undeployed replicas. + List replicaWorkItems = + new ArrayList(regionInfoMap.size()); for (java.util.Map.Entry e: regionInfoMap.entrySet()) { if (e.getValue().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) { - checkRegionConsistency(e.getKey(), e.getValue()); + replicaWorkItems.add(new CheckRegionConsistencyWorkItem(e.getKey(), e.getValue())); } } + checkRegionConsistencyConcurrently(replicaWorkItems); setCheckHdfs(prevHdfsCheck); if (shouldCheckHdfs()) { @@ -1724,6 +1822,51 @@ public class HBaseFsck extends Configured implements Closeable { } } + /** + * Check consistency of all regions using mulitple threads concurrently. + */ + private void checkRegionConsistencyConcurrently( + final List workItems) + throws IOException, KeeperException, InterruptedException { + if (workItems.isEmpty()) { + return; // nothing to check + } + + List> workFutures = executor.invokeAll(workItems); + for(Future f: workFutures) { + try { + f.get(); + } catch(ExecutionException e1) { + LOG.warn("Could not check region consistency " , e1.getCause()); + if (e1.getCause() instanceof IOException) { + throw (IOException)e1.getCause(); + } else if (e1.getCause() instanceof KeeperException) { + throw (KeeperException)e1.getCause(); + } else if (e1.getCause() instanceof InterruptedException) { + throw (InterruptedException)e1.getCause(); + } else { + throw new IOException(e1.getCause()); + } + } + } + } + + class CheckRegionConsistencyWorkItem implements Callable { + private final String key; + private final HbckInfo hbi; + + CheckRegionConsistencyWorkItem(String key, HbckInfo hbi) { + this.key = key; + this.hbi = hbi; + } + + @Override + public synchronized Void call() throws Exception { + checkRegionConsistency(key, hbi); + return null; + } + } + /** * Check and fix table states, assumes full info available: * - tableInfos @@ -2064,16 +2207,8 @@ public class HBaseFsck extends Configured implements Closeable { HRegionInfo hri = hbi.getHdfsHRI(); TableInfo tableInfo = tablesInfo.get(hri.getTable()); - if (tableInfo.regionsFromMeta.isEmpty()) { - for (HbckInfo h : regionInfoMap.values()) { - if (hri.getTable().equals(h.getTableName())) { - if (h.metaEntry != null) tableInfo.regionsFromMeta - .add((HRegionInfo) h.metaEntry); - } - } - Collections.sort(tableInfo.regionsFromMeta); - } - for (HRegionInfo region : tableInfo.regionsFromMeta) { + + for (HRegionInfo region : tableInfo.getRegionsFromMeta()) { if (Bytes.compareTo(region.getStartKey(), hri.getStartKey()) <= 0 && (region.getEndKey().length == 0 || Bytes.compareTo(region.getEndKey(), hri.getEndKey()) >= 0) @@ -2284,6 +2419,7 @@ public class HBaseFsck extends Configured implements Closeable { loadTableInfosForTablesWithNoRegion(); + logParallelMerge(); for (TableInfo tInfo : tablesInfo.values()) { TableIntegrityErrorHandler handler = tInfo.new IntegrityFixSuggester(tInfo, errors); if (!tInfo.checkRegionChain(handler)) { @@ -2429,7 +2565,7 @@ public class HBaseFsck extends Configured implements Closeable { TreeMultimap.create(RegionSplitCalculator.BYTES_COMPARATOR, cmp); // list of regions derived from meta entries. - final List regionsFromMeta = new ArrayList(); + private ImmutableList regionsFromMeta = null; TableInfo(TableName name) { this.tableName = name; @@ -2486,6 +2622,23 @@ public class HBaseFsck extends Configured implements Closeable { return sc.getStarts().size() + backwards.size(); } + public synchronized ImmutableList getRegionsFromMeta() { + // lazy loaded, synchronized to ensure a single load + if (regionsFromMeta == null) { + List regions = new ArrayList(); + for (HbckInfo h : HBaseFsck.this.regionInfoMap.values()) { + if (tableName.equals(h.getTableName())) { + if (h.metaEntry != null) { + regions.add((HRegionInfo) h.metaEntry); + } + } + } + regionsFromMeta = Ordering.natural().immutableSortedCopy(regions); + } + + return regionsFromMeta; + } + private class IntegrityFixSuggester extends TableIntegrityErrorHandlerImpl { ErrorReporter errors; @@ -2883,15 +3036,11 @@ public class HBaseFsck extends Configured implements Closeable { // TODO fold this into the TableIntegrityHandler if (getConf().getBoolean("hbasefsck.overlap.merge.parallel", true)) { - LOG.info("Handling overlap merges in parallel. set hbasefsck.overlap.merge.parallel to" + - " false to run serially."); boolean ok = handleOverlapsParallel(handler, prevKey); if (!ok) { return false; } } else { - LOG.info("Handling overlap merges serially. set hbasefsck.overlap.merge.parallel to" + - " true to run in parallel."); for (Collection overlap : overlapGroups.asMap().values()) { handler.handleOverlapGroup(overlap); } @@ -3164,7 +3313,7 @@ public class HBaseFsck extends Configured implements Closeable { * @throws IOException if an error is encountered */ boolean loadMetaEntries() throws IOException { - MetaScannerVisitor visitor = new MetaScannerVisitorBase() { + MetaTableAccessor.Visitor visitor = new MetaTableAccessor.Visitor() { int countRecord = 1; // comparator to sort KeyValues with latest modtime @@ -3176,7 +3325,7 @@ public class HBaseFsck extends Configured implements Closeable { }; @Override - public boolean processRow(Result result) throws IOException { + public boolean visit(Result result) throws IOException { try { // record the latest modification of this META record @@ -3248,7 +3397,7 @@ public class HBaseFsck extends Configured implements Closeable { }; if (!checkMetaOnly) { // Scan hbase:meta to pick up user regions - MetaScanner.metaScan(connection, visitor); + MetaTableAccessor.fullScanRegions(connection, visitor); } errors.print(""); @@ -3617,6 +3766,8 @@ public class HBaseFsck extends Configured implements Closeable { static class PrintingErrorReporter implements ErrorReporter { public int errorCount = 0; private int showProgress; + // How frequently calls to progress() will create output + private static final int progressThreshold = 100; Set errorTables = new HashSet(); @@ -3731,7 +3882,7 @@ public class HBaseFsck extends Configured implements Closeable { @Override public synchronized void progress() { - if (showProgress++ == 10) { + if (showProgress++ == progressThreshold) { if (!summary) { System.out.print("."); } @@ -3828,6 +3979,7 @@ public class HBaseFsck extends Configured implements Closeable { // level 2: /

      /* FileStatus[] regionDirs = fs.listStatus(tableDir.getPath()); for (FileStatus regionDir : regionDirs) { + errors.progress(); String encodedName = regionDir.getPath().getName(); // ignore directories that aren't hexadecimal if (!encodedName.toLowerCase().matches("[0-9a-f]+")) { @@ -3855,6 +4007,7 @@ public class HBaseFsck extends Configured implements Closeable { FileStatus[] subDirs = fs.listStatus(regionDir.getPath()); Path ePath = WALSplitter.getRegionDirRecoveredEditsDir(regionDir.getPath()); for (FileStatus subDir : subDirs) { + errors.progress(); String sdName = subDir.getPath().getName(); if (!sdName.startsWith(".") && !sdName.equals(ePath.getName())) { he.hdfsOnlyEdits = false; @@ -3895,6 +4048,7 @@ public class HBaseFsck extends Configured implements Closeable { // only load entries that haven't been loaded yet. if (hbi.getHdfsHRI() == null) { try { + errors.progress(); hbck.loadHdfsRegioninfo(hbi); } catch (IOException ioe) { String msg = "Orphan region in HDFS: Unable to load .regioninfo from table " @@ -3921,7 +4075,7 @@ public class HBaseFsck extends Configured implements Closeable { * Display the full report from fsck. This displays all live and dead region * servers, and all known regions. */ - public void setDisplayFullReport() { + public static void setDisplayFullReport() { details = true; } @@ -3929,7 +4083,7 @@ public class HBaseFsck extends Configured implements Closeable { * Set summary mode. * Print only summary of the tables and status (OK or INCONSISTENT) */ - void setSummary() { + static void setSummary() { summary = true; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java index 78c7a065c56..fa1aa00e374 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java @@ -41,9 +41,7 @@ import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.HBaseAdmin; -import org.apache.hadoop.hbase.client.HConnectable; import org.apache.hadoop.hbase.client.HConnection; -import org.apache.hadoop.hbase.client.HConnectionManager; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Table; @@ -106,14 +104,16 @@ class HMerge { final TableName tableName, final boolean testMasterRunning) throws IOException { boolean masterIsRunning = false; + HConnection hConnection = null; if (testMasterRunning) { - masterIsRunning = HConnectionManager - .execute(new HConnectable(conf) { - @Override - public Boolean connect(HConnection connection) throws IOException { - return connection.isMasterRunning(); - } - }); + try { + hConnection = (HConnection) ConnectionFactory.createConnection(conf); + masterIsRunning = hConnection.isMasterRunning(); + } finally { + if (hConnection != null) { + hConnection.close(); + } + } } if (tableName.equals(TableName.META_TABLE_NAME)) { if (masterIsRunning) { @@ -206,16 +206,17 @@ class HMerge { if ((currentSize + nextSize) <= (maxFilesize / 2)) { // We merge two adjacent regions if their total size is less than // one half of the desired maximum size - LOG.info("Merging regions " + currentRegion.getRegionNameAsString() + - " and " + nextRegion.getRegionNameAsString()); + LOG.info("Merging regions " + currentRegion.getRegionInfo().getRegionNameAsString() + + " and " + nextRegion.getRegionInfo().getRegionNameAsString()); HRegion mergedRegion = HRegion.mergeAdjacent(currentRegion, nextRegion); - updateMeta(currentRegion.getRegionName(), nextRegion.getRegionName(), - mergedRegion); + updateMeta(currentRegion.getRegionInfo().getRegionName(), + nextRegion.getRegionInfo().getRegionName(), mergedRegion); break; } - LOG.info("not merging regions " + Bytes.toStringBinary(currentRegion.getRegionName()) - + " and " + Bytes.toStringBinary(nextRegion.getRegionName())); + LOG.info("not merging regions " + + Bytes.toStringBinary(currentRegion.getRegionInfo().getRegionName()) + + " and " + Bytes.toStringBinary(nextRegion.getRegionInfo().getRegionName())); currentRegion.close(); currentRegion = nextRegion; currentSize = nextSize; @@ -342,7 +343,7 @@ class HMerge { if(LOG.isDebugEnabled()) { LOG.debug("updated columns in row: " - + Bytes.toStringBinary(newRegion.getRegionName())); + + Bytes.toStringBinary(newRegion.getRegionInfo().getRegionName())); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdLock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdLock.java index b9d0983983b..fedf951c26f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdLock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdLock.java @@ -25,6 +25,8 @@ import java.util.concurrent.ConcurrentMap; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import com.google.common.annotations.VisibleForTesting; + /** * Allows multiple concurrent clients to lock on a numeric id with a minimal * memory overhead. The intended usage is as follows: @@ -119,4 +121,18 @@ public class IdLock { assert map.size() == 0; } + @VisibleForTesting + public void waitForWaiters(long id, int numWaiters) throws InterruptedException { + for (Entry entry;;) { + entry = map.get(id); + if (entry != null) { + synchronized (entry) { + if (entry.numWaiters >= numWaiters) { + return; + } + } + } + Thread.sleep(100); + } + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java index 6002f29c1d0..4f9658572e4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java @@ -173,11 +173,11 @@ public class Merge extends Configured implements Tool { throws IOException { if (info1 == null) { throw new IOException("Could not find " + Bytes.toStringBinary(region1) + " in " + - Bytes.toStringBinary(meta.getRegionName())); + Bytes.toStringBinary(meta.getRegionInfo().getRegionName())); } if (info2 == null) { throw new IOException("Could not find " + Bytes.toStringBinary(region2) + " in " + - Bytes.toStringBinary(meta.getRegionName())); + Bytes.toStringBinary(meta.getRegionInfo().getRegionName())); } HRegion merged = null; HRegion r1 = HRegion.openHRegion(info1, htd, utils.getLog(info1), getConf()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java index 95d8a174c37..347cad592df 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java @@ -67,6 +67,30 @@ public abstract class ModifyRegionUtils { void editRegion(final HRegionInfo region) throws IOException; } + public static HRegionInfo[] createHRegionInfos(HTableDescriptor hTableDescriptor, + byte[][] splitKeys) { + long regionId = System.currentTimeMillis(); + HRegionInfo[] hRegionInfos = null; + if (splitKeys == null || splitKeys.length == 0) { + hRegionInfos = new HRegionInfo[]{ + new HRegionInfo(hTableDescriptor.getTableName(), null, null, false, regionId) + }; + } else { + int numRegions = splitKeys.length + 1; + hRegionInfos = new HRegionInfo[numRegions]; + byte[] startKey = null; + byte[] endKey = null; + for (int i = 0; i < numRegions; i++) { + endKey = (i == splitKeys.length) ? null : splitKeys[i]; + hRegionInfos[i] = + new HRegionInfo(hTableDescriptor.getTableName(), startKey, endKey, + false, regionId); + startKey = endKey; + } + } + return hRegionInfos; + } + /** * Create new set of regions on the specified file-system. * NOTE: that you should add the regions to hbase:meta after this operation. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MultiHConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MultiHConnection.java index 81678aa36f0..a55c8765658 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MultiHConnection.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MultiHConnection.java @@ -35,8 +35,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.HConnection; -import org.apache.hadoop.hbase.client.HConnectionManager; import org.apache.hadoop.hbase.client.Row; import org.apache.hadoop.hbase.client.coprocessor.Batch; @@ -64,7 +64,7 @@ public class MultiHConnection { synchronized (this.hConnectionsLock) { hConnections = new HConnection[noOfConnections]; for (int i = 0; i < noOfConnections; i++) { - HConnection conn = HConnectionManager.createConnection(conf); + HConnection conn = (HConnection) ConnectionFactory.createConnection(conf); hConnections[i] = conn; } } @@ -130,11 +130,11 @@ public class MultiHConnection { } - // Copied from HConnectionImplementation.getBatchPool() + // Copied from ConnectionImplementation.getBatchPool() // We should get rid of this when HConnection.processBatchCallback is un-deprecated and provides // an API to manage a batch pool private void createBatchPool(Configuration conf) { - // Use the same config for keep alive as in HConnectionImplementation.getBatchPool(); + // Use the same config for keep alive as in ConnectionImplementation.getBatchPool(); int maxThreads = conf.getInt("hbase.multihconnection.threads.max", 256); int coreThreads = conf.getInt("hbase.multihconnection.threads.core", 256); if (maxThreads == 0) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSizeCalculator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSizeCalculator.java index 4f7c0a5d12d..9cd24f63da6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSizeCalculator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSizeCalculator.java @@ -88,6 +88,11 @@ public class RegionSizeCalculator { return; } + if (regionLocator.getName().isSystemTable()) { + LOG.info("Region size calculation disabled for system tables."); + return; + } + LOG.info("Calculating region sizes for table \"" + regionLocator.getName() + "\"."); //get regions for table diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java index cf8721967f7..67f8e84f24c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java @@ -21,8 +21,8 @@ package org.apache.hadoop.hbase.util; import java.io.IOException; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.replication.ReplicationAdmin; @@ -55,6 +55,25 @@ public class ServerRegionReplicaUtil extends RegionReplicaUtil { private static final boolean DEFAULT_REGION_REPLICA_REPLICATION = false; private static final String REGION_REPLICA_REPLICATION_PEER = "region_replica_replication"; + /** + * Enables or disables refreshing store files of secondary region replicas when the memory is + * above the global memstore lower limit. Refreshing the store files means that we will do a file + * list of the primary regions store files, and pick up new files. Also depending on the store + * files, we can drop some memstore contents which will free up memory. + */ + public static final String REGION_REPLICA_STORE_FILE_REFRESH + = "hbase.region.replica.storefile.refresh"; + private static final boolean DEFAULT_REGION_REPLICA_STORE_FILE_REFRESH = true; + + /** + * The multiplier to use when we want to refresh a secondary region instead of flushing a primary + * region. Default value assumes that for doing the file refresh, the biggest secondary should be + * 4 times bigger than the biggest primary. + */ + public static final String REGION_REPLICA_STORE_FILE_REFRESH_MEMSTORE_MULTIPLIER + = "hbase.region.replica.storefile.refresh.memstore.multiplier"; + private static final double DEFAULT_REGION_REPLICA_STORE_FILE_REFRESH_MEMSTORE_MULTIPLIER = 4; + /** * Returns the regionInfo object to use for interacting with the file system. * @return An HRegionInfo object to interact with the filesystem @@ -96,23 +115,24 @@ public class ServerRegionReplicaUtil extends RegionReplicaUtil { * @throws IOException */ public static StoreFileInfo getStoreFileInfo(Configuration conf, FileSystem fs, - HRegionInfo regionInfo, HRegionInfo regionInfoForFs, String familyName, FileStatus status) + HRegionInfo regionInfo, HRegionInfo regionInfoForFs, String familyName, Path path) throws IOException { // if this is a primary region, just return the StoreFileInfo constructed from path if (regionInfo.equals(regionInfoForFs)) { - return new StoreFileInfo(conf, fs, status); - } - - if (StoreFileInfo.isReference(status.getPath())) { - Reference reference = Reference.read(fs, status.getPath()); - return new StoreFileInfo(conf, fs, status, reference); + return new StoreFileInfo(conf, fs, path); } // else create a store file link. The link file does not exists on filesystem though. HFileLink link = HFileLink.build(conf, regionInfoForFs.getTable(), - regionInfoForFs.getEncodedName(), familyName, status.getPath().getName()); - return new StoreFileInfo(conf, fs, status, link); + regionInfoForFs.getEncodedName(), familyName, path.getName()); + + if (StoreFileInfo.isReference(path)) { + Reference reference = Reference.read(fs, path); + return new StoreFileInfo(conf, fs, link.getFileStatus(fs), reference); + } + + return new StoreFileInfo(conf, fs, link.getFileStatus(fs), link); } /** @@ -121,7 +141,7 @@ public class ServerRegionReplicaUtil extends RegionReplicaUtil { * @throws IOException */ public static void setupRegionReplicaReplication(Configuration conf) throws IOException { - if (!conf.getBoolean(REGION_REPLICA_REPLICATION_CONF_KEY, DEFAULT_REGION_REPLICA_REPLICATION)) { + if (!isRegionReplicaReplicationEnabled(conf)) { return; } ReplicationAdmin repAdmin = new ReplicationAdmin(conf); @@ -139,6 +159,26 @@ public class ServerRegionReplicaUtil extends RegionReplicaUtil { } } + public static boolean isRegionReplicaReplicationEnabled(Configuration conf) { + return conf.getBoolean(REGION_REPLICA_REPLICATION_CONF_KEY, + DEFAULT_REGION_REPLICA_REPLICATION); + } + + public static boolean isRegionReplicaWaitForPrimaryFlushEnabled(Configuration conf) { + return conf.getBoolean(REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY, + DEFAULT_REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH); + } + + public static boolean isRegionReplicaStoreFileRefreshEnabled(Configuration conf) { + return conf.getBoolean(REGION_REPLICA_STORE_FILE_REFRESH, + DEFAULT_REGION_REPLICA_STORE_FILE_REFRESH); + } + + public static double getRegionReplicaStoreFileRefreshMultiplier(Configuration conf) { + return conf.getDouble(REGION_REPLICA_STORE_FILE_REFRESH_MEMSTORE_MULTIPLIER, + DEFAULT_REGION_REPLICA_STORE_FILE_REFRESH_MEMSTORE_MULTIPLIER); + } + /** * Return the peer id used for replicating to secondary region replicas */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRepair.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRepair.java index efc141aec46..8cda518ac86 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRepair.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRepair.java @@ -81,7 +81,7 @@ public class OfflineMetaRepair { for (int i = 0; i < args.length; i++) { String cmd = args[i]; if (cmd.equals("-details")) { - fsck.setDisplayFullReport(); + HBaseFsck.setDisplayFullReport(); } else if (cmd.equals("-base")) { if (i == args.length - 1) { System.err.println("OfflineMetaRepair: -base needs an HDFS path."); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/TableLockChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/TableLockChecker.java index 3e069d971a1..49594bc4bf7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/TableLockChecker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/TableLockChecker.java @@ -59,8 +59,10 @@ public class TableLockChecker { String msg = "Table lock acquire attempt found:"; if (data != null) { msg = msg + - String.format("[tableName=%s, lockOwner=%s, threadId=%s, " + - "purpose=%s, isShared=%s, createTime=%s]", Bytes.toString(data.getTableName().toByteArray()), + String.format("[tableName=%s:%s, lockOwner=%s, threadId=%s, " + + "purpose=%s, isShared=%s, createTime=%s]", + data.getTableName().getNamespace().toStringUtf8(), + data.getTableName().getQualifier().toStringUtf8(), ProtobufUtil.toServerName(data.getLockOwner()), data.getThreadId(), data.getPurpose(), data.getIsShared(), data.getCreateTime()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java index 720cedcee3c..e579164d501 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java @@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; import org.codehaus.jackson.map.ObjectMapper; +import org.apache.hadoop.hbase.regionserver.wal.ProtobufLogReader; // imports for things that haven't moved yet. import org.apache.hadoop.hbase.regionserver.wal.WALEdit; @@ -242,11 +243,33 @@ public class WALPrettyPrinter { if (!fs.isFile(p)) { throw new IOException(p + " is not a file"); } + + WAL.Reader log = WALFactory.createReader(fs, p, conf); + + if (log instanceof ProtobufLogReader) { + List writerClsNames = ((ProtobufLogReader) log).getWriterClsNames(); + if (writerClsNames != null && writerClsNames.size() > 0) { + out.print("Writer Classes: "); + for (int i = 0; i < writerClsNames.size(); i++) { + out.print(writerClsNames.get(i)); + if (i != writerClsNames.size() - 1) { + out.print(" "); + } + } + out.println(); + } + + String cellCodecClsName = ((ProtobufLogReader) log).getCodecClsName(); + if (cellCodecClsName != null) { + out.println("Cell Codec Class: " + cellCodecClsName); + } + } + if (outputJSON && !persistentOutput) { out.print("["); firstTxn = true; } - WAL.Reader log = WALFactory.createReader(fs, p, conf); + try { WAL.Entry entry; while ((entry = log.next()) != null) { @@ -350,7 +373,7 @@ public class WALPrettyPrinter { options.addOption("j", "json", false, "Output JSON"); options.addOption("p", "printvals", false, "Print values"); options.addOption("r", "region", true, - "Region to filter by. Pass region name; e.g. 'hbase:meta,,1'"); + "Region to filter by. Pass encoded region name; e.g. '9192caead6a5a20acb4454ffbc79fa14'"); options.addOption("s", "sequence", true, "Sequence to filter by. Pass sequence number."); options.addOption("w", "row", true, "Row to filter by. Pass row name."); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java index 4d8cc2dcc2b..517b67eb50b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java @@ -70,11 +70,12 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.ConnectionUtils; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.HConnection; -import org.apache.hadoop.hbase.client.HConnectionManager; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.TableState; @@ -92,11 +93,11 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoReque import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType; +import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds; +import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.LastSequenceId; // imports for things that haven't moved from regionserver.wal yet. @@ -326,7 +327,14 @@ public class WALSplitter { lastFlushedSequenceId = ids.getLastFlushedSequenceId(); } } else if (sequenceIdChecker != null) { - lastFlushedSequenceId = sequenceIdChecker.getLastSequenceId(region); + RegionStoreSequenceIds ids = sequenceIdChecker.getLastSequenceId(region); + Map maxSeqIdInStores = new TreeMap(Bytes.BYTES_COMPARATOR); + for (StoreSequenceId storeSeqId : ids.getStoreSequenceIdList()) { + maxSeqIdInStores.put(storeSeqId.getFamilyName().toByteArray(), + storeSeqId.getSequenceId()); + } + regionMaxSeqIdInStores.put(key, maxSeqIdInStores); + lastFlushedSequenceId = ids.getLastFlushedSequenceId(); } if (lastFlushedSequenceId == null) { lastFlushedSequenceId = -1L; @@ -1186,12 +1194,18 @@ public class WALSplitter { * @return true when there is no error * @throws IOException */ - protected boolean finishWriting() throws IOException { + protected boolean finishWriting(boolean interrupt) throws IOException { LOG.debug("Waiting for split writer threads to finish"); boolean progress_failed = false; for (WriterThread t : writerThreads) { t.finish(); } + if (interrupt) { + for (WriterThread t : writerThreads) { + t.interrupt(); // interrupt the writer threads. We are stopping now. + } + } + for (WriterThread t : writerThreads) { if (!progress_failed && reporter != null && !reporter.progress()) { progress_failed = true; @@ -1260,7 +1274,7 @@ public class WALSplitter { boolean isSuccessful = false; List result = null; try { - isSuccessful = finishWriting(); + isSuccessful = finishWriting(false); } finally { result = close(); List thrown = closeLogWriters(null); @@ -1473,6 +1487,29 @@ public class WALSplitter { return (new WriterAndPath(regionedits, w)); } + private void filterCellByStore(Entry logEntry) { + Map maxSeqIdInStores = + regionMaxSeqIdInStores.get(Bytes.toString(logEntry.getKey().getEncodedRegionName())); + if (maxSeqIdInStores == null || maxSeqIdInStores.isEmpty()) { + return; + } + List skippedCells = new ArrayList(); + for (Cell cell : logEntry.getEdit().getCells()) { + if (!CellUtil.matchingFamily(cell, WALEdit.METAFAMILY)) { + byte[] family = CellUtil.cloneFamily(cell); + Long maxSeqId = maxSeqIdInStores.get(family); + // Do not skip cell even if maxSeqId is null. Maybe we are in a rolling upgrade, + // or the master was crashed before and we can not get the information. + if (maxSeqId != null && maxSeqId.longValue() >= logEntry.getKey().getLogSeqNum()) { + skippedCells.add(cell); + } + } + } + if (!skippedCells.isEmpty()) { + logEntry.getEdit().getCells().removeAll(skippedCells); + } + } + @Override public void append(RegionEntryBuffer buffer) throws IOException { List entries = buffer.entryBuffer; @@ -1497,7 +1534,10 @@ public class WALSplitter { return; } } - wap.w.append(logEntry); + filterCellByStore(logEntry); + if (!logEntry.getEdit().isEmpty()) { + wap.w.append(logEntry); + } this.updateRegionMaximumEditLogSeqNum(logEntry); editsCount++; } @@ -1689,8 +1729,8 @@ public class WALSplitter { HConnection hconn = this.getConnectionByTableName(table); for (Cell cell : cells) { - byte[] row = cell.getRow(); - byte[] family = cell.getFamily(); + byte[] row = CellUtil.cloneRow(cell); + byte[] family = CellUtil.cloneFamily(cell); boolean isCompactionEntry = false; if (CellUtil.matchingFamily(cell, WALEdit.METAFAMILY)) { CompactionDescriptor compaction = WALEdit.getCompaction(cell); @@ -1960,7 +2000,7 @@ public class WALSplitter { @Override public List finishWritingAndClose() throws IOException { try { - if (!finishWriting()) { + if (!finishWriting(false)) { return null; } if (hasEditsInDisablingOrDisabledTables) { @@ -2101,7 +2141,7 @@ public class WALSplitter { synchronized (this.tableNameToHConnectionMap) { hconn = this.tableNameToHConnectionMap.get(tableName); if (hconn == null) { - hconn = HConnectionManager.getConnection(conf); + hconn = (HConnection) ConnectionFactory.createConnection(conf); this.tableNameToHConnectionMap.put(tableName, hconn); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/MiniZooKeeperCluster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/MiniZooKeeperCluster.java index 926cd10f403..fe8a17e6959 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/MiniZooKeeperCluster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/MiniZooKeeperCluster.java @@ -42,6 +42,8 @@ import org.apache.zookeeper.server.NIOServerCnxnFactory; import org.apache.zookeeper.server.ZooKeeperServer; import org.apache.zookeeper.server.persistence.FileTxnLog; +import com.google.common.annotations.VisibleForTesting; + /** * TODO: Most of the code in this class is ripped from ZooKeeper tests. Instead * of redoing it, we should contribute updates to their code which let us more @@ -82,6 +84,33 @@ public class MiniZooKeeperCluster { standaloneServerFactoryList = new ArrayList(); } + /** + * Add a client port to the list. + * + * @param clientPort the specified port + */ + public void addClientPort(int clientPort) { + clientPortList.add(clientPort); + } + + /** + * Get the list of client ports. + * @return clientPortList the client port list + */ + @VisibleForTesting + public List getClientPortList() { + return clientPortList; + } + + /** + * Check whether the client port in a specific position of the client port list is valid. + * + * @param index the specified position + */ + private boolean hasValidClientPortInList(int index) { + return (clientPortList.size() > index && clientPortList.get(index) > 0); + } + public void setDefaultClientPort(int clientPort) { if (clientPort <= 0) { throw new IllegalArgumentException("Invalid default ZK client port: " @@ -91,16 +120,39 @@ public class MiniZooKeeperCluster { } /** - * Selects a ZK client port. Returns the default port if specified. - * Otherwise, returns a random port. The random port is selected from the - * range between 49152 to 65535. These ports cannot be registered with IANA - * and are intended for dynamic allocation (see http://bit.ly/dynports). + * Selects a ZK client port. + * + * @param seedPort the seed port to start with; -1 means first time. + * @Returns a valid and unused client port */ - private int selectClientPort() { - if (defaultClientPort > 0) { - return defaultClientPort; + private int selectClientPort(int seedPort) { + int i; + int returnClientPort = seedPort + 1; + if (returnClientPort == 0) { + // If the new port is invalid, find one - starting with the default client port. + // If the default client port is not specified, starting with a random port. + // The random port is selected from the range between 49152 to 65535. These ports cannot be + // registered with IANA and are intended for dynamic allocation (see http://bit.ly/dynports). + if (defaultClientPort > 0) { + returnClientPort = defaultClientPort; + } else { + returnClientPort = 0xc000 + new Random().nextInt(0x3f00); + } } - return 0xc000 + new Random().nextInt(0x3f00); + // Make sure that the port is unused. + while (true) { + for (i = 0; i < clientPortList.size(); i++) { + if (returnClientPort == clientPortList.get(i)) { + // Already used. Update the port and retry. + returnClientPort++; + break; + } + } + if (i == clientPortList.size()) { + break; // found a unused port, exit + } + } + return returnClientPort; } public void setTickTime(int tickTime) { @@ -126,7 +178,11 @@ public class MiniZooKeeperCluster { } public int startup(File baseDir) throws IOException, InterruptedException { - return startup(baseDir,1); + int numZooKeeperServers = clientPortList.size(); + if (numZooKeeperServers == 0) { + numZooKeeperServers = 1; // need at least 1 ZK server for testing + } + return startup(baseDir, numZooKeeperServers); } /** @@ -145,7 +201,8 @@ public class MiniZooKeeperCluster { setupTestEnv(); shutdown(); - int tentativePort = selectClientPort(); + int tentativePort = -1; // the seed port + int currentClientPort; // running all the ZK servers for (int i = 0; i < numZooKeeperServers; i++) { @@ -157,21 +214,33 @@ public class MiniZooKeeperCluster { } else { tickTimeToUse = TICK_TIME; } + + // Set up client port - if we have already had a list of valid ports, use it. + if (hasValidClientPortInList(i)) { + currentClientPort = clientPortList.get(i); + } else { + tentativePort = selectClientPort(tentativePort); // update the seed + currentClientPort = tentativePort; + } + ZooKeeperServer server = new ZooKeeperServer(dir, dir, tickTimeToUse); NIOServerCnxnFactory standaloneServerFactory; while (true) { try { standaloneServerFactory = new NIOServerCnxnFactory(); standaloneServerFactory.configure( - new InetSocketAddress(tentativePort), + new InetSocketAddress(currentClientPort), configuration.getInt(HConstants.ZOOKEEPER_MAX_CLIENT_CNXNS, 1000)); } catch (BindException e) { LOG.debug("Failed binding ZK Server to client port: " + - tentativePort, e); + currentClientPort, e); // We're told to use some port but it's occupied, fail - if (defaultClientPort > 0) return -1; + if (hasValidClientPortInList(i)) { + return -1; + } // This port is already in use, try to use another. - tentativePort = selectClientPort(); + tentativePort = selectClientPort(tentativePort); + currentClientPort = tentativePort; continue; } break; @@ -180,15 +249,21 @@ public class MiniZooKeeperCluster { // Start up this ZK server standaloneServerFactory.startup(server); // Runs a 'stat' against the servers. - if (!waitForServerUp(tentativePort, CONNECTION_TIMEOUT)) { + if (!waitForServerUp(currentClientPort, CONNECTION_TIMEOUT)) { throw new IOException("Waiting for startup of standalone server"); } - // We have selected this port as a client port. - clientPortList.add(tentativePort); + // We have selected a port as a client port. Update clientPortList if necessary. + if (clientPortList.size() <= i) { // it is not in the list, add the port + clientPortList.add(currentClientPort); + } + else if (clientPortList.get(i) <= 0) { // the list has invalid port, update with valid port + clientPortList.remove(i); + clientPortList.add(i, currentClientPort); + } + standaloneServerFactoryList.add(standaloneServerFactory); zooKeeperServers.add(server); - tentativePort++; //for the next server } // set the first one to be active ZK; Others are backups @@ -251,7 +326,7 @@ public class MiniZooKeeperCluster { */ public int killCurrentActiveZooKeeperServer() throws IOException, InterruptedException { - if (!started || activeZKServerIndex < 0 ) { + if (!started || activeZKServerIndex < 0) { return -1; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoveringRegionWatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoveringRegionWatcher.java index a07bd2f6c81..5fff9d22600 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoveringRegionWatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoveringRegionWatcher.java @@ -21,8 +21,8 @@ package org.apache.hadoop.hbase.zookeeper; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.regionserver.handler.FinishRegionRecoveringHandler; import org.apache.zookeeper.KeeperException; /** @@ -33,7 +33,7 @@ public class RecoveringRegionWatcher extends ZooKeeperListener { private static final Log LOG = LogFactory.getLog(RecoveringRegionWatcher.class); private HRegionServer server; - + /** * Construct a ZooKeeper event listener. */ @@ -47,6 +47,7 @@ public class RecoveringRegionWatcher extends ZooKeeperListener { * Called when a node has been deleted * @param path full path of the deleted node */ + @Override public void nodeDeleted(String path) { if (this.server.isStopped() || this.server.isStopping()) { return; @@ -58,12 +59,8 @@ public class RecoveringRegionWatcher extends ZooKeeperListener { } String regionName = path.substring(parentPath.length() + 1); - HRegion region = this.server.getRecoveringRegions().remove(regionName); - if (region != null) { - region.setRecovering(false); - } - LOG.info(path + " deleted; " + regionName + " recovered."); + server.getExecutorService().submit(new FinishRegionRecoveringHandler(server, regionName, path)); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKSplitLog.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKSplitLog.java index 78b3eed0864..fb769c02270 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKSplitLog.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKSplitLog.java @@ -26,12 +26,12 @@ import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds; +import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds; import org.apache.zookeeper.KeeperException; /** @@ -78,6 +78,19 @@ public class ZKSplitLog { return ZKUtil.joinZNode(zkw.splitLogZNode, "RESCAN"); } + /** + * @param name the last part in path + * @return whether the node name represents a rescan node + */ + public static boolean isRescanNode(String name) { + return name.startsWith("RESCAN"); + } + + /** + * @param zkw + * @param path the absolute path, starts with '/' + * @return whether the path represents a rescan node + */ public static boolean isRescanNode(ZooKeeperWatcher zkw, String path) { String prefix = getRescanNode(zkw); if (path.length() <= prefix.length()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperMainServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperMainServer.java index 86348a3e103..e81da596a20 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperMainServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperMainServer.java @@ -20,10 +20,6 @@ package org.apache.hadoop.hbase.zookeeper; import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.Map.Entry; -import java.util.Properties; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; @@ -41,31 +37,7 @@ public class ZooKeeperMainServer { private static final String SERVER_ARG = "-server"; public String parse(final Configuration c) { - // Note that we do not simply grab the property - // HConstants.ZOOKEEPER_QUORUM from the HBaseConfiguration because the - // user may be using a zoo.cfg file. - Properties zkProps = ZKConfig.makeZKProps(c); - String clientPort = null; - List hosts = new ArrayList(); - for (Entry entry: zkProps.entrySet()) { - String key = entry.getKey().toString().trim(); - String value = entry.getValue().toString().trim(); - if (key.startsWith("server.")) { - String[] parts = value.split(":"); - hosts.add(parts[0]); - } else if (key.endsWith("clientPort")) { - clientPort = value; - } - } - if (hosts.isEmpty() || clientPort == null) return null; - StringBuilder host = new StringBuilder(); - for (int i = 0; i < hosts.size(); i++) { - if (i > 0) host.append("," + hosts.get(i)); - else host.append(hosts.get(i)); - host.append(":"); - host.append(clientPort); - } - return host.toString(); + return ZKConfig.getZKQuorumServersString(c); } /** diff --git a/hbase-server/src/main/javadoc/org/apache/hadoop/hbase/replication/package.html b/hbase-server/src/main/javadoc/org/apache/hadoop/hbase/replication/package.html index 2f2e24a887f..8a42139c45e 100644 --- a/hbase-server/src/main/javadoc/org/apache/hadoop/hbase/replication/package.html +++ b/hbase-server/src/main/javadoc/org/apache/hadoop/hbase/replication/package.html @@ -22,144 +22,6 @@

      Multi Cluster Replication

      -This package provides replication between HBase clusters. -

      - -

      Table Of Contents

      -
        -
      1. Status
      2. -
      3. Requirements
      4. -
      5. Deployment
      6. -
      7. Verifying Replicated Data
      8. -
      - -

      - -

      Status

      - -

      -This package is experimental quality software and is only meant to be a base -for future developments. The current implementation offers the following -features: - -

        -
      1. Master/Slave replication.
      2. -
      3. Master/Master replication.
      4. -
      5. Cyclic replication.
      6. -
      7. Replication of scoped families in user tables.
      8. -
      9. Start/stop replication stream.
      10. -
      11. Supports clusters of different sizes.
      12. -
      13. Handling of partitions longer than 10 minutes.
      14. -
      15. Ability to add/remove slave clusters at runtime.
      16. -
      17. MapReduce job to compare tables on two clusters
      18. -
      -Please report bugs on the project's Jira when found. -

      - -

      Requirements

      - -

      - -Before trying out replication, make sure to review the following requirements: - -

        -
      1. Zookeeper should be handled by yourself, not by HBase, and should - always be available during the deployment.
      2. -
      3. All machines from both clusters should be able to reach every - other machine since replication goes from any region server to any - other one on the slave cluster. That also includes the - Zookeeper clusters.
      4. -
      5. Both clusters should have the same HBase and Hadoop major revision. - For example, having 0.90.1 on the master and 0.90.0 on the slave is - correct but not 0.90.1 and 0.89.20100725.
      6. -
      7. Every table that contains families that are scoped for replication - should exist on every cluster with the exact same name, same for those - replicated families.
      8. -
      9. For multiple slaves, Master/Master, or cyclic replication version - 0.92 or greater is needed.
      10. -
      - -

      - -

      Deployment

      - -

      - -The following steps describe how to enable replication from a cluster -to another. -

        -
      1. Edit ${HBASE_HOME}/conf/hbase-site.xml on both cluster to add - the following configurations: -
        -<property>
        -  <name>hbase.replication</name>
        -  <value>true</value>
        -</property>
        - deploy the files, and then restart HBase if it was running. -
      2. -
      3. Run the following command in the master's shell while it's running -
        add_peer 'ID' 'CLUSTER_KEY'
        - The ID is a string, which must not contain a hyphen. To compose the CLUSTER_KEY, use the following template: -
        hbase.zookeeper.quorum:hbase.zookeeper.property.clientPort:zookeeper.znode.parent
        - This will show you the help to setup the replication stream between - both clusters. If both clusters use the same Zookeeper cluster, you have - to use a different zookeeper.znode.parent since they can't - write in the same folder. -
      4. -
      5. - Once you have a peer, you need to enable replication on your column families. - One way to do it is to alter the table and to set the scope like this: -
        -      disable 'your_table'
        -      alter 'your_table', {NAME => 'family_name', REPLICATION_SCOPE => '1'}
        -      enable 'your_table'
        -    
        - Currently, a scope of 0 (default) means that it won't be replicated and a - scope of 1 means it's going to be. In the future, different scope can be - used for routing policies. -
      6. -
      7. To list all configured peers run the following command in the master's - shell -
        list_peers
        (as of version 0.92) -
      8. -
      9. To enable a peer that was previousy disabled, run the following command in the master's shell. -
        enable_peer 'ID'
        -
      10. -
      11. To disable a peer, run the following command in the master's shell. This setting causes - HBase to stop sending the edits to that peer cluster, but it still keeps track of all the - new WALs that it will need to replicate if and when it is re-enabled. -
        disable_peer 'ID'
        -
      12. -
      13. To remove a peer, use the following command in the master's shell. -
        remove_peer 'ID'
        -
      14. -
      - -You can confirm that your setup works by looking at any region server's log -on the master cluster and look for the following lines; - -
      -Considering 1 rs, with ratio 0.1
      -Getting 1 rs from peer cluster # 0
      -Choosing peer 10.10.1.49:62020
      - -In this case it indicates that 1 region server from the slave cluster -was chosen for replication.

      -

      - - -

      Verifying Replicated Data

      - -

      -Verifying the replicated data on two clusters is easy to do in the shell when -looking only at a few rows, but doing a systematic comparison requires more -computing power. This is why the VerifyReplication MR job was created, it has -to be run on the master cluster and needs to be provided with a peer id (the -one provided when establishing a replication stream) and a table name. Other -options let you specify a time range and specific families. This job's short -name is "verifyrep" and needs to be provided when pointing "hadoop jar" to the -hbase jar. -

      - +

      Cluster replication documentation has been moved to the link:http://hbase.apache.org/book.html#_cluster_replication[Cluster Replication] section of the link:http://hbase.apache.org/book.html[Apache HBase Reference Guide].

      diff --git a/hbase-server/src/main/resources/hbase-webapps/master/snapshot.jsp b/hbase-server/src/main/resources/hbase-webapps/master/snapshot.jsp index 831835e8d10..50a75601ccb 100644 --- a/hbase-server/src/main/resources/hbase-webapps/master/snapshot.jsp +++ b/hbase-server/src/main/resources/hbase-webapps/master/snapshot.jsp @@ -21,7 +21,6 @@ import="java.util.Date" import="org.apache.hadoop.conf.Configuration" import="org.apache.hadoop.hbase.client.Admin" - import="org.apache.hadoop.hbase.client.HConnectionManager" import="org.apache.hadoop.hbase.master.HMaster" import="org.apache.hadoop.hbase.snapshot.SnapshotInfo" import="org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription" diff --git a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp index 58e5da44d70..110330097fe 100644 --- a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp +++ b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp @@ -43,15 +43,9 @@ MetaTableLocator metaTableLocator = new MetaTableLocator(); String fqtn = request.getParameter("name"); - HTable table = (HTable) master.getConnection().getTable(fqtn); + HTable table = null; String tableHeader; boolean withReplica = false; - if (table.getTableDescriptor().getRegionReplication() > 1) { - tableHeader = "

      Table Regions

      "; - withReplica = true; - } else { - tableHeader = "

      Table Regions

      NameRegion ServerStart KeyEnd KeyLocalityRequestsReplicaID
      "; - } ServerName rl = metaTableLocator.getMetaRegionLocation(master.getZooKeeper()); boolean showFragmentation = conf.getBoolean("hbase.master.ui.fragmentation.enabled", false); boolean readOnly = conf.getBoolean("hbase.master.ui.readonly", false); @@ -61,21 +55,21 @@ if (showFragmentation) { frags = FSUtils.getTableFragmentation(master); } + String action = request.getParameter("action"); + String key = request.getParameter("key"); %> - -<% - String action = request.getParameter("action"); - String key = request.getParameter("key"); - if ( !readOnly && action != null ) { -%> - HBase Master: <%= master.getServerName() %> + <% if ( !readOnly && action != null ) { %> + HBase Master: <%= master.getServerName() %> + <% } else { %> + Table: <%= fqtn %> + <% } %> @@ -84,11 +78,17 @@ + <% if ( ( !readOnly && action != null ) || fqtn == null ) { %> + <% } else { %> + + <% } %> +<% +if ( fqtn != null ) { + table = (HTable) master.getConnection().getTable(fqtn); + if (table.getTableDescriptor().getRegionReplication() > 1) { + tableHeader = "

      Table Regions

      NameRegion ServerStart KeyEnd KeyLocalityRequests
      "; + withReplica = true; + } else { + tableHeader = "

      Table Regions

      NameRegion ServerStart KeyEnd KeyLocalityRequestsReplicaID
      "; + } + if ( !readOnly && action != null ) { +%>
      @@ -147,50 +158,9 @@ %>

      Go Back, or wait for the redirect.

      - - - <% -} else { + } else { %> - - - Table: <%= fqtn %> - - - - - - - - - - - -
      @@ -387,12 +357,22 @@ Actions:
      NameRegion ServerStart KeyEnd KeyLocalityRequests

      +<% } %> +<% } +} else { // handle the case for fqtn is null with error message + redirect +%> +
      +
      + +
      +


      +

      Go Back, or wait for the redirect. +

      <% } %> -<% -} -%> diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java index e4dc09e7152..0d5b27ef76a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java @@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.InternalScanner; +import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.FSUtils; @@ -230,7 +231,7 @@ public abstract class HBaseTestCase extends TestCase { * @throws IOException * @return count of what we added. */ - public static long addContent(final HRegion r, final byte [] columnFamily, final byte[] column) + public static long addContent(final Region r, final byte [] columnFamily, final byte[] column) throws IOException { byte [] startKey = r.getRegionInfo().getStartKey(); byte [] endKey = r.getRegionInfo().getEndKey(); @@ -242,8 +243,7 @@ public abstract class HBaseTestCase extends TestCase { startKeyBytes, endKey, -1); } - public static long addContent(final HRegion r, final byte [] columnFamily) - throws IOException { + public static long addContent(final Region r, final byte [] columnFamily) throws IOException { return addContent(r, columnFamily, null); } @@ -439,6 +439,10 @@ public abstract class HBaseTestCase extends TestCase { this.region = HRegion; } + public HRegionIncommon(final Region region) { + this.region = (HRegion)region; + } + public void put(Put put) throws IOException { region.put(put); } @@ -469,7 +473,7 @@ public abstract class HBaseTestCase extends TestCase { } public void flushcache() throws IOException { - this.region.flushcache(); + this.region.flush(true); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index 1a377fcae9c..9cb0d57c397 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -18,11 +18,6 @@ package org.apache.hadoop.hbase; import javax.annotation.Nullable; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - import java.io.File; import java.io.IOException; import java.io.OutputStream; @@ -63,6 +58,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.Consistency; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Get; @@ -94,11 +90,13 @@ import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.InternalScanner; +import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException; import org.apache.hadoop.hbase.regionserver.wal.MetricsWAL; import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener; import org.apache.hadoop.hbase.security.User; +import org.apache.hadoop.hbase.security.visibility.VisibilityLabelsCache; import org.apache.hadoop.hbase.tool.Canary; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSTableDescriptors; @@ -127,6 +125,10 @@ import org.apache.zookeeper.WatchedEvent; import org.apache.zookeeper.ZooKeeper; import org.apache.zookeeper.ZooKeeper.States; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + /** * Facility for testing HBase. Replacement for * old HBaseTestCase and HBaseClusterTestCase functionality. @@ -285,6 +287,13 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { return htu; } + /** + * Close both the region {@code r} and it's underlying WAL. For use in tests. + */ + public static void closeRegionAndWAL(final Region r) throws IOException { + closeRegionAndWAL((HRegion)r); + } + /** * Close both the HRegion {@code r} and it's underlying WAL. For use in tests. */ @@ -710,15 +719,17 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * @see #shutdownMiniZKCluster() * @return zk cluster started. */ - public MiniZooKeeperCluster startMiniZKCluster(int zooKeeperServerNum) + public MiniZooKeeperCluster startMiniZKCluster( + final int zooKeeperServerNum, + final int ... clientPortList) throws Exception { setupClusterTestDir(); - return startMiniZKCluster(clusterTestDir, zooKeeperServerNum); + return startMiniZKCluster(clusterTestDir, zooKeeperServerNum, clientPortList); } private MiniZooKeeperCluster startMiniZKCluster(final File dir) throws Exception { - return startMiniZKCluster(dir,1); + return startMiniZKCluster(dir, 1, null); } /** @@ -726,7 +737,8 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * the port mentionned is used as the default port for ZooKeeper. */ private MiniZooKeeperCluster startMiniZKCluster(final File dir, - int zooKeeperServerNum) + final int zooKeeperServerNum, + final int [] clientPortList) throws Exception { if (this.zkCluster != null) { throw new IOException("Cluster already running at " + dir); @@ -738,6 +750,15 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { // If there is a port in the config file, we use it. this.zkCluster.setDefaultClientPort(defPort); } + + if (clientPortList != null) { + // Ignore extra client ports + int clientPortListSize = (clientPortList.length <= zooKeeperServerNum) ? + clientPortList.length : zooKeeperServerNum; + for (int i=0; i < clientPortListSize; i++) { + this.zkCluster.addClientPort(clientPortList[i]); + } + } int clientPort = this.zkCluster.startup(dir,zooKeeperServerNum); this.conf.set(HConstants.ZOOKEEPER_CLIENT_PORT, Integer.toString(clientPort)); @@ -1021,7 +1042,8 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { } this.hbaseCluster = new MiniHBaseCluster(this.conf, servers); // Don't leave here till we've done a successful scan of the hbase:meta - Table t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME); + Connection conn = ConnectionFactory.createConnection(this.conf); + Table t = conn.getTable(TableName.META_TABLE_NAME); ResultScanner s = t.getScanner(new Scan()); while (s.next() != null) { // do nothing @@ -1029,6 +1051,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { LOG.info("HBase has been restarted"); s.close(); t.close(); + conn.close(); } /** @@ -1737,7 +1760,8 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { setFirst(0); setSecond(0); }}; - for (int i = 0; status.getFirst() != 0 && i < 500; i++) { // wait up to 500 seconds + int i = 0; + do { status = admin.getAlterStatus(desc.getTableName()); if (status.getSecond() != 0) { LOG.debug(status.getSecond() - status.getFirst() + "/" + status.getSecond() @@ -1747,9 +1771,9 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { LOG.debug("All regions updated."); break; } - } - if (status.getSecond() != 0) { - throw new IOException("Failed to update replica count after 500 seconds."); + } while (status.getFirst() != 0 && i++ < 500); + if (status.getFirst() != 0) { + throw new IOException("Failed to update all regions even after 500 seconds."); } } @@ -1761,7 +1785,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { admin.disableTable(table); HTableDescriptor desc = admin.getTableDescriptor(table); desc.setRegionReplication(replicaCount); - modifyTableSync(admin, desc); + admin.modifyTable(desc.getTableName(), desc); admin.enableTable(table); } @@ -2115,6 +2139,10 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { return loadRegion(r, f, false); } + public int loadRegion(final Region r, final byte[] f) throws IOException { + return loadRegion((HRegion)r, f); + } + /** * Load region with rows from 'aaa' to 'zzz'. * @param r Region @@ -2136,8 +2164,9 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { Put put = new Put(k); put.setDurability(Durability.SKIP_WAL); put.add(f, null, k); - if (r.getWAL() == null) put.setDurability(Durability.SKIP_WAL); - + if (r.getWAL() == null) { + put.setDurability(Durability.SKIP_WAL); + } int preRowCount = rowCount; int pause = 10; int maxPause = 1000; @@ -2153,7 +2182,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { } } if (flush) { - r.flushcache(); + r.flush(true); } } return rowCount; @@ -2169,12 +2198,51 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { } } - public void verifyNumericRows(HRegion region, final byte[] f, int startRow, int endRow) + public void verifyNumericRows(Table table, final byte[] f, int startRow, int endRow, + int replicaId) throws IOException { + for (int i = startRow; i < endRow; i++) { + String failMsg = "Failed verification of row :" + i; + byte[] data = Bytes.toBytes(String.valueOf(i)); + Get get = new Get(data); + get.setReplicaId(replicaId); + get.setConsistency(Consistency.TIMELINE); + Result result = table.get(get); + assertTrue(failMsg, result.containsColumn(f, null)); + assertEquals(failMsg, result.getColumnCells(f, null).size(), 1); + Cell cell = result.getColumnLatestCell(f, null); + assertTrue(failMsg, + Bytes.equals(data, 0, data.length, cell.getValueArray(), cell.getValueOffset(), + cell.getValueLength())); + } + } + + public void verifyNumericRows(Region region, final byte[] f, int startRow, int endRow) + throws IOException { + verifyNumericRows((HRegion)region, f, startRow, endRow); + } + + public void verifyNumericRows(HRegion region, final byte[] f, int startRow, int endRow) + throws IOException { + verifyNumericRows(region, f, startRow, endRow, true); + } + + public void verifyNumericRows(Region region, final byte[] f, int startRow, int endRow, + final boolean present) throws IOException { + verifyNumericRows((HRegion)region, f, startRow, endRow, present); + } + + public void verifyNumericRows(HRegion region, final byte[] f, int startRow, int endRow, + final boolean present) throws IOException { for (int i = startRow; i < endRow; i++) { String failMsg = "Failed verification of row :" + i; byte[] data = Bytes.toBytes(String.valueOf(i)); Result result = region.get(new Get(data)); + + boolean hasResult = result != null && !result.isEmpty(); + assertEquals(failMsg + result, present, hasResult); + if (!present) continue; + assertTrue(failMsg, result.containsColumn(f, null)); assertEquals(failMsg, result.getColumnCells(f, null).size(), 1); Cell cell = result.getColumnLatestCell(f, null); @@ -2222,6 +2290,18 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { return count; } + /** + * Return the number of rows in the given table. + */ + public int countRows(final TableName tableName) throws IOException { + Table table = getConnection().getTable(tableName); + try { + return countRows(table); + } finally { + table.close(); + } + } + /** * Return an md5 digest of the entire contents of a table. */ @@ -2982,7 +3062,9 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { } }; MetaTableAccessor - .fullScan(connection, visitor, table.getName(), MetaTableAccessor.QueryType.TABLE, true); + .scanMeta(connection, null, null, + MetaTableAccessor.QueryType.TABLE, + Integer.MAX_VALUE, visitor); return lastTableState.get(); } @@ -3571,6 +3653,29 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { return createPreSplitLoadTestTable(conf, desc, hcd, numRegionsPerServer); } + /** + * Creates a pre-split table for load testing. If the table already exists, + * logs a warning and continues. + * @return the number of regions the table was split into + */ + public static int createPreSplitLoadTestTable(Configuration conf, + TableName tableName, byte[][] columnFamilies, Algorithm compression, + DataBlockEncoding dataBlockEncoding, int numRegionsPerServer, int regionReplication, + Durability durability) + throws IOException { + HTableDescriptor desc = new HTableDescriptor(tableName); + desc.setDurability(durability); + desc.setRegionReplication(regionReplication); + HColumnDescriptor[] hcds = new HColumnDescriptor[columnFamilies.length]; + for (int i = 0; i < columnFamilies.length; i++) { + HColumnDescriptor hcd = new HColumnDescriptor(columnFamilies[i]); + hcd.setDataBlockEncoding(dataBlockEncoding); + hcd.setCompressionType(compression); + hcds[i] = hcd; + } + return createPreSplitLoadTestTable(conf, desc, hcds, numRegionsPerServer); + } + /** * Creates a pre-split table for load testing. If the table already exists, * logs a warning and continues. @@ -3588,8 +3693,21 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { */ public static int createPreSplitLoadTestTable(Configuration conf, HTableDescriptor desc, HColumnDescriptor hcd, int numRegionsPerServer) throws IOException { - if (!desc.hasFamily(hcd.getName())) { - desc.addFamily(hcd); + return createPreSplitLoadTestTable(conf, desc, new HColumnDescriptor[] {hcd}, + numRegionsPerServer); + } + + /** + * Creates a pre-split table for load testing. If the table already exists, + * logs a warning and continues. + * @return the number of regions the table was split into + */ + public static int createPreSplitLoadTestTable(Configuration conf, + HTableDescriptor desc, HColumnDescriptor[] hcds, int numRegionsPerServer) throws IOException { + for (HColumnDescriptor hcd : hcds) { + if (!desc.hasFamily(hcd.getName())) { + desc.addFamily(hcd); + } } int totalNumberOfRegions = 0; @@ -3672,10 +3790,10 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { if (server.equals(rs.getServerName())) { continue; } - Collection hrs = rs.getOnlineRegionsLocalContext(); - for (HRegion r: hrs) { + Collection hrs = rs.getOnlineRegionsLocalContext(); + for (Region r: hrs) { assertTrue("Region should not be double assigned", - r.getRegionId() != hri.getRegionId()); + r.getRegionInfo().getRegionId() != hri.getRegionId()); } } return; // good, we are happy @@ -3815,6 +3933,37 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { waitFor(timeout, predicateNoRegionsInTransition()); } + /** + * Wait until labels is ready in VisibilityLabelsCache. + * @param timeoutMillis + * @param labels + */ + public void waitLabelAvailable(long timeoutMillis, final String... labels) { + final VisibilityLabelsCache labelsCache = VisibilityLabelsCache.get(); + waitFor(timeoutMillis, new Waiter.ExplainingPredicate() { + + @Override + public boolean evaluate() { + for (String label : labels) { + if (labelsCache.getLabelOrdinal(label) == 0) { + return false; + } + } + return true; + } + + @Override + public String explainFailure() { + for (String label : labels) { + if (labelsCache.getLabelOrdinal(label) == 0) { + return label + " is not available yet"; + } + } + return ""; + } + }); + } + /** * Create a set of column descriptors with the combination of compression, * encoding, bloom codecs available. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java index ea10f602151..cb12bea1e2d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java @@ -34,7 +34,7 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.io.crypto.Encryption; import org.apache.hadoop.hbase.io.crypto.KeyProviderForTesting; import org.apache.hadoop.hbase.io.crypto.aes.AES; -import org.apache.hadoop.hbase.io.hfile.AbstractHFileWriter; +import org.apache.hadoop.hbase.io.hfile.HFileWriterImpl; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.hfile.HFileContext; @@ -326,7 +326,7 @@ public class HFilePerformanceEvaluation { void setUp() throws Exception { HFileContextBuilder builder = new HFileContextBuilder() - .withCompression(AbstractHFileWriter.compressionByName(codec)) + .withCompression(HFileWriterImpl.compressionByName(codec)) .withBlockSize(RFILE_BLOCKSIZE); if (cipher == "aes") { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java index 24b6e714dbb..4a02b043ae4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.test.MetricsAssertHelper; import org.apache.hadoop.hbase.util.JVMClusterUtil; @@ -535,8 +536,8 @@ public class MiniHBaseCluster extends HBaseCluster { public void flushcache() throws IOException { for (JVMClusterUtil.RegionServerThread t: this.hbaseCluster.getRegionServers()) { - for(HRegion r: t.getRegionServer().getOnlineRegionsLocalContext()) { - r.flushcache(); + for(Region r: t.getRegionServer().getOnlineRegionsLocalContext()) { + r.flush(true); } } } @@ -548,9 +549,9 @@ public class MiniHBaseCluster extends HBaseCluster { public void flushcache(TableName tableName) throws IOException { for (JVMClusterUtil.RegionServerThread t: this.hbaseCluster.getRegionServers()) { - for(HRegion r: t.getRegionServer().getOnlineRegionsLocalContext()) { + for(Region r: t.getRegionServer().getOnlineRegionsLocalContext()) { if(r.getTableDesc().getTableName().equals(tableName)) { - r.flushcache(); + r.flush(true); } } } @@ -563,8 +564,8 @@ public class MiniHBaseCluster extends HBaseCluster { public void compact(boolean major) throws IOException { for (JVMClusterUtil.RegionServerThread t: this.hbaseCluster.getRegionServers()) { - for(HRegion r: t.getRegionServer().getOnlineRegionsLocalContext()) { - r.compactStores(major); + for(Region r: t.getRegionServer().getOnlineRegionsLocalContext()) { + r.compact(major); } } } @@ -576,9 +577,9 @@ public class MiniHBaseCluster extends HBaseCluster { public void compact(TableName tableName, boolean major) throws IOException { for (JVMClusterUtil.RegionServerThread t: this.hbaseCluster.getRegionServers()) { - for(HRegion r: t.getRegionServer().getOnlineRegionsLocalContext()) { + for(Region r: t.getRegionServer().getOnlineRegionsLocalContext()) { if(r.getTableDesc().getTableName().equals(tableName)) { - r.compactStores(major); + r.compact(major); } } } @@ -615,9 +616,9 @@ public class MiniHBaseCluster extends HBaseCluster { List ret = new ArrayList(); for (JVMClusterUtil.RegionServerThread rst : getRegionServerThreads()) { HRegionServer hrs = rst.getRegionServer(); - for (HRegion region : hrs.getOnlineRegionsLocalContext()) { + for (Region region : hrs.getOnlineRegionsLocalContext()) { if (region.getTableDesc().getTableName().equals(tableName)) { - ret.add(region); + ret.add((HRegion)region); } } } @@ -643,8 +644,7 @@ public class MiniHBaseCluster extends HBaseCluster { int count = 0; for (JVMClusterUtil.RegionServerThread rst: getRegionServerThreads()) { HRegionServer hrs = rst.getRegionServer(); - HRegion metaRegion = - hrs.getOnlineRegion(regionName); + Region metaRegion = hrs.getOnlineRegion(regionName); if (metaRegion != null) { index = count; break; @@ -662,7 +662,7 @@ public class MiniHBaseCluster extends HBaseCluster { // should hold some regions. Please refer to #countServedRegions // to see how we find out all regions. HMaster master = getMaster(); - HRegion region = master.getOnlineRegion(regionName); + Region region = master.getOnlineRegion(regionName); if (region != null) { return master.getServerName(); } @@ -712,9 +712,9 @@ public class MiniHBaseCluster extends HBaseCluster { ArrayList ret = new ArrayList(); for (JVMClusterUtil.RegionServerThread rst : getRegionServerThreads()) { HRegionServer hrs = rst.getRegionServer(); - for (HRegion region : hrs.getOnlineRegions(tableName)) { + for (Region region : hrs.getOnlineRegions(tableName)) { if (region.getTableDesc().getTableName().equals(tableName)) { - ret.add(region); + ret.add((HRegion)region); } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java index 6d0a3c7caa4..810ab9089f4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java @@ -40,9 +40,9 @@ import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.Regio import org.apache.hadoop.hbase.quotas.RegionServerQuotaManager; import org.apache.hadoop.hbase.regionserver.CompactionRequestor; import org.apache.hadoop.hbase.regionserver.FlushRequester; -import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HeapMemoryManager; import org.apache.hadoop.hbase.regionserver.Leases; +import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.RegionServerAccounting; import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.hadoop.hbase.regionserver.ServerNonceManager; @@ -59,7 +59,7 @@ import com.google.protobuf.Service; */ public class MockRegionServerServices implements RegionServerServices { protected static final Log LOG = LogFactory.getLog(MockRegionServerServices.class); - private final Map regions = new HashMap(); + private final Map regions = new HashMap(); private final ConcurrentSkipListMap rit = new ConcurrentSkipListMap(Bytes.BYTES_COMPARATOR); private HFileSystem hfs = null; @@ -90,17 +90,17 @@ public class MockRegionServerServices implements RegionServerServices { } @Override - public boolean removeFromOnlineRegions(HRegion r, ServerName destination) { + public boolean removeFromOnlineRegions(Region r, ServerName destination) { return this.regions.remove(r.getRegionInfo().getEncodedName()) != null; } @Override - public HRegion getFromOnlineRegions(String encodedRegionName) { + public Region getFromOnlineRegions(String encodedRegionName) { return this.regions.get(encodedRegionName); } @Override - public List getOnlineRegions(TableName tableName) throws IOException { + public List getOnlineRegions(TableName tableName) throws IOException { return null; } @@ -110,13 +110,12 @@ public class MockRegionServerServices implements RegionServerServices { } @Override - public void addToOnlineRegions(HRegion r) { + public void addToOnlineRegions(Region r) { this.regions.put(r.getRegionInfo().getEncodedName(), r); } @Override - public void postOpenDeployTasks(HRegion r) - throws KeeperException, IOException { + public void postOpenDeployTasks(Region r) throws KeeperException, IOException { addToOnlineRegions(r); } @@ -258,7 +257,7 @@ public class MockRegionServerServices implements RegionServerServices { } @Override - public Map getRecoveringRegions() { + public Map getRecoveringRegions() { // TODO Auto-generated method stub return null; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java index 2e7afa52cc8..eb2f46885b1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java @@ -124,7 +124,8 @@ public class PerformanceEvaluation extends Configured implements Tool { public static final String TABLE_NAME = "TestTable"; public static final byte[] FAMILY_NAME = Bytes.toBytes("info"); - public static final byte[] QUALIFIER_NAME = Bytes.toBytes("data"); + public static final byte [] COLUMN_ZERO = Bytes.toBytes("" + 0); + public static final byte [] QUALIFIER_NAME = COLUMN_ZERO; public static final int DEFAULT_VALUE_LENGTH = 1000; public static final int ROW_LENGTH = 26; @@ -610,6 +611,9 @@ public class PerformanceEvaluation extends Configured implements Tool { int valueSize = DEFAULT_VALUE_LENGTH; int period = (this.perClientRunRows / 10) == 0? perClientRunRows: perClientRunRows / 10; int cycles = 1; + int columns = 1; + int caching = 30; + boolean addColumns = true; public TestOptions() {} @@ -651,6 +655,25 @@ public class PerformanceEvaluation extends Configured implements Tool { this.period = that.period; this.randomSleep = that.randomSleep; this.measureAfter = that.measureAfter; + this.addColumns = that.addColumns; + this.columns = that.columns; + this.caching = that.caching; + } + + public int getCaching() { + return this.caching; + } + + public void setCaching(final int caching) { + this.caching = caching; + } + + public int getColumns() { + return this.columns; + } + + public void setColumns(final int columns) { + this.columns = columns; } public int getCycles() { @@ -916,6 +939,14 @@ public class PerformanceEvaluation extends Configured implements Tool { public void setMeasureAfter(int measureAfter) { this.measureAfter = measureAfter; } + + public boolean getAddColumns() { + return addColumns; + } + + public void setAddColumns(boolean addColumns) { + this.addColumns = addColumns; + } } /* @@ -1147,7 +1178,7 @@ public class PerformanceEvaluation extends Configured implements Tool { void onStartup() throws IOException { this.table = connection.getTable(TableName.valueOf(opts.tableName)); } - + @Override void onTakedown() throws IOException { table.close(); @@ -1165,7 +1196,7 @@ public class PerformanceEvaluation extends Configured implements Tool { void onStartup() throws IOException { this.mutator = connection.getBufferedMutator(TableName.valueOf(opts.tableName)); } - + @Override void onTakedown() throws IOException { mutator.close(); @@ -1180,8 +1211,13 @@ public class PerformanceEvaluation extends Configured implements Tool { @Override void testRow(final int i) throws IOException { Scan scan = new Scan(getRandomRow(this.rand, opts.totalRows)); + scan.setCaching(opts.caching); FilterList list = new FilterList(); - scan.addColumn(FAMILY_NAME, QUALIFIER_NAME); + if (opts.addColumns) { + scan.addColumn(FAMILY_NAME, QUALIFIER_NAME); + } else { + scan.addFamily(FAMILY_NAME); + } if (opts.filterAll) { list.addFilter(new FilterAllFilter()); } @@ -1211,10 +1247,15 @@ public class PerformanceEvaluation extends Configured implements Tool { void testRow(final int i) throws IOException { Pair startAndStopRow = getStartAndStopRow(); Scan scan = new Scan(startAndStopRow.getFirst(), startAndStopRow.getSecond()); + scan.setCaching(opts.caching); if (opts.filterAll) { scan.setFilter(new FilterAllFilter()); } - scan.addColumn(FAMILY_NAME, QUALIFIER_NAME); + if (opts.addColumns) { + scan.addColumn(FAMILY_NAME, QUALIFIER_NAME); + } else { + scan.addFamily(FAMILY_NAME); + } Result r = null; int count = 0; ResultScanner s = this.table.getScanner(scan); @@ -1310,7 +1351,11 @@ public class PerformanceEvaluation extends Configured implements Tool { Thread.sleep(rd.nextInt(opts.randomSleep)); } Get get = new Get(getRandomRow(this.rand, opts.totalRows)); - get.addColumn(FAMILY_NAME, QUALIFIER_NAME); + if (opts.addColumns) { + get.addColumn(FAMILY_NAME, QUALIFIER_NAME); + } else { + get.addFamily(FAMILY_NAME); + } if (opts.filterAll) { get.setFilter(new FilterAllFilter()); } @@ -1353,21 +1398,24 @@ public class PerformanceEvaluation extends Configured implements Tool { void testRow(final int i) throws IOException { byte[] row = getRandomRow(this.rand, opts.totalRows); Put put = new Put(row); - byte[] value = generateData(this.rand, getValueLength(this.rand)); - if (opts.useTags) { - byte[] tag = generateData(this.rand, TAG_LENGTH); - Tag[] tags = new Tag[opts.noOfTags]; - for (int n = 0; n < opts.noOfTags; n++) { - Tag t = new Tag((byte) n, tag); - tags[n] = t; + for (int column = 0; column < opts.columns; column++) { + byte [] qualifier = column == 0? COLUMN_ZERO: Bytes.toBytes("" + column); + byte[] value = generateData(this.rand, getValueLength(this.rand)); + if (opts.useTags) { + byte[] tag = generateData(this.rand, TAG_LENGTH); + Tag[] tags = new Tag[opts.noOfTags]; + for (int n = 0; n < opts.noOfTags; n++) { + Tag t = new Tag((byte) n, tag); + tags[n] = t; + } + KeyValue kv = new KeyValue(row, FAMILY_NAME, qualifier, HConstants.LATEST_TIMESTAMP, + value, tags); + put.add(kv); + updateValueSize(kv.getValueLength()); + } else { + put.add(FAMILY_NAME, qualifier, value); + updateValueSize(value.length); } - KeyValue kv = new KeyValue(row, FAMILY_NAME, QUALIFIER_NAME, HConstants.LATEST_TIMESTAMP, - value, tags); - put.add(kv); - updateValueSize(kv.getValueLength()); - } else { - put.add(FAMILY_NAME, QUALIFIER_NAME, value); - updateValueSize(value.length); } put.setDurability(opts.writeToWAL ? Durability.SYNC_WAL : Durability.SKIP_WAL); mutator.mutate(put); @@ -1394,8 +1442,12 @@ public class PerformanceEvaluation extends Configured implements Tool { void testRow(final int i) throws IOException { if (this.testScanner == null) { Scan scan = new Scan(format(opts.startRow)); - scan.setCaching(30); - scan.addColumn(FAMILY_NAME, QUALIFIER_NAME); + scan.setCaching(opts.caching); + if (opts.addColumns) { + scan.addColumn(FAMILY_NAME, QUALIFIER_NAME); + } else { + scan.addFamily(FAMILY_NAME); + } if (opts.filterAll) { scan.setFilter(new FilterAllFilter()); } @@ -1415,7 +1467,9 @@ public class PerformanceEvaluation extends Configured implements Tool { @Override void testRow(final int i) throws IOException { Get get = new Get(format(i)); - get.addColumn(FAMILY_NAME, QUALIFIER_NAME); + if (opts.addColumns) { + get.addColumn(FAMILY_NAME, QUALIFIER_NAME); + } if (opts.filterAll) { get.setFilter(new FilterAllFilter()); } @@ -1432,21 +1486,24 @@ public class PerformanceEvaluation extends Configured implements Tool { void testRow(final int i) throws IOException { byte[] row = format(i); Put put = new Put(row); - byte[] value = generateData(this.rand, getValueLength(this.rand)); - if (opts.useTags) { - byte[] tag = generateData(this.rand, TAG_LENGTH); - Tag[] tags = new Tag[opts.noOfTags]; - for (int n = 0; n < opts.noOfTags; n++) { - Tag t = new Tag((byte) n, tag); - tags[n] = t; + for (int column = 0; column < opts.columns; column++) { + byte [] qualifier = column == 0? COLUMN_ZERO: Bytes.toBytes("" + column); + byte[] value = generateData(this.rand, getValueLength(this.rand)); + if (opts.useTags) { + byte[] tag = generateData(this.rand, TAG_LENGTH); + Tag[] tags = new Tag[opts.noOfTags]; + for (int n = 0; n < opts.noOfTags; n++) { + Tag t = new Tag((byte) n, tag); + tags[n] = t; + } + KeyValue kv = new KeyValue(row, FAMILY_NAME, qualifier, HConstants.LATEST_TIMESTAMP, + value, tags); + put.add(kv); + updateValueSize(kv.getValueLength()); + } else { + put.add(FAMILY_NAME, qualifier, value); + updateValueSize(value.length); } - KeyValue kv = new KeyValue(row, FAMILY_NAME, QUALIFIER_NAME, HConstants.LATEST_TIMESTAMP, - value, tags); - put.add(kv); - updateValueSize(kv.getValueLength()); - } else { - put.add(FAMILY_NAME, QUALIFIER_NAME, value); - updateValueSize(value.length); } put.setDurability(opts.writeToWAL ? Durability.SYNC_WAL : Durability.SKIP_WAL); mutator.mutate(put); @@ -1478,7 +1535,7 @@ public class PerformanceEvaluation extends Configured implements Tool { protected Scan constructScan(byte[] valuePrefix) throws IOException { FilterList list = new FilterList(); Filter filter = new SingleColumnValueFilter( - FAMILY_NAME, QUALIFIER_NAME, CompareFilter.CompareOp.EQUAL, + FAMILY_NAME, COLUMN_ZERO, CompareFilter.CompareOp.EQUAL, new BinaryComparator(valuePrefix) ); list.addFilter(filter); @@ -1486,7 +1543,12 @@ public class PerformanceEvaluation extends Configured implements Tool { list.addFilter(new FilterAllFilter()); } Scan scan = new Scan(); - scan.addColumn(FAMILY_NAME, QUALIFIER_NAME); + scan.setCaching(opts.caching); + if (opts.addColumns) { + scan.addColumn(FAMILY_NAME, QUALIFIER_NAME); + } else { + scan.addFamily(FAMILY_NAME); + } scan.setFilter(list); return scan; } @@ -1498,11 +1560,9 @@ public class PerformanceEvaluation extends Configured implements Tool { * @param timeMs Time taken in milliseconds. * @return String value with label, ie '123.76 MB/s' */ - private static String calculateMbps(int rows, long timeMs, final int valueSize) { - // MB/s = ((totalRows * ROW_SIZE_BYTES) / totalTimeMS) - // * 1000 MS_PER_SEC / (1024 * 1024) BYTES_PER_MB - BigDecimal rowSize = - BigDecimal.valueOf(ROW_LENGTH + valueSize + FAMILY_NAME.length + QUALIFIER_NAME.length); + private static String calculateMbps(int rows, long timeMs, final int valueSize, int columns) { + BigDecimal rowSize = BigDecimal.valueOf(ROW_LENGTH + + ((valueSize + FAMILY_NAME.length + COLUMN_ZERO.length) * columns)); BigDecimal mbps = BigDecimal.valueOf(rows).multiply(rowSize, CXT) .divide(BigDecimal.valueOf(timeMs), CXT).multiply(MS_PER_SEC, CXT) .divide(BYTES_PER_MB, CXT); @@ -1591,7 +1651,7 @@ public class PerformanceEvaluation extends Configured implements Tool { status.setStatus("Finished " + cmd + " in " + totalElapsedTime + "ms at offset " + opts.startRow + " for " + opts.perClientRunRows + " rows" + " (" + calculateMbps((int)(opts.perClientRunRows * opts.sampleRate), totalElapsedTime, - getAverageValueLength(opts)) + ")"); + getAverageValueLength(opts), opts.columns) + ")"); return new RunResult(totalElapsedTime, t.getLatency()); } @@ -1623,14 +1683,23 @@ public class PerformanceEvaluation extends Configured implements Tool { } protected void printUsage() { - printUsage(null); + printUsage(this.getClass().getName(), null); } - protected void printUsage(final String message) { + protected static void printUsage(final String message) { + printUsage(PerformanceEvaluation.class.getName(), message); + } + + protected static void printUsageAndExit(final String message, final int exitCode) { + printUsage(message); + System.exit(exitCode); + } + + protected static void printUsage(final String className, final String message) { if (message != null && message.length() > 0) { System.err.println(message); } - System.err.println("Usage: java " + this.getClass().getName() + " \\"); + System.err.println("Usage: java " + className + " \\"); System.err.println(" [-D]* "); System.err.println(); System.err.println("Options:"); @@ -1677,10 +1746,13 @@ public class PerformanceEvaluation extends Configured implements Tool { "Default: opts.perClientRunRows / 10"); System.err.println(" multiGet Batch gets together into groups of N. Only supported " + "by randomRead. Default: disabled"); + System.err.println(" addColumns Adds columns to scans/gets explicitly. Default: true"); System.err.println(" replicas Enable region replica testing. Defaults: 1."); System.err.println(" cycles How many times to cycle the test. Defaults: 1."); System.err.println(" splitPolicy Specify a custom RegionSplitPolicy for the table."); System.err.println(" randomSleep Do a random sleep before each get between 0 and entered value. Defaults: 0"); + System.err.println(" columns Columns to write per row. Default: 1"); + System.err.println(" caching Scan caching to use. Default: 30"); System.err.println(); System.err.println(" Note: -D properties will be applied to the conf used. "); System.err.println(" For example: "); @@ -1698,8 +1770,7 @@ public class PerformanceEvaluation extends Configured implements Tool { System.err.println(" running: 1 <= value <= 500"); System.err.println("Examples:"); System.err.println(" To run a single evaluation client:"); - System.err.println(" $ bin/hbase " + this.getClass().getName() - + " sequentialWrite 1"); + System.err.println(" $ bin/hbase " + className + " sequentialWrite 1"); } /** @@ -1906,6 +1977,24 @@ public class PerformanceEvaluation extends Configured implements Tool { continue; } + final String addColumns = "--addColumns="; + if (cmd.startsWith(addColumns)) { + opts.addColumns = Boolean.parseBoolean(cmd.substring(addColumns.length())); + continue; + } + + final String columns = "--columns="; + if (cmd.startsWith(columns)) { + opts.columns = Integer.parseInt(cmd.substring(columns.length())); + continue; + } + + final String caching = "--caching="; + if (cmd.startsWith(caching)) { + opts.caching = Integer.parseInt(cmd.substring(caching.length())); + continue; + } + if (isCommandClass(cmd)) { opts.cmdName = cmd; opts.numClientThreads = Integer.parseInt(args.remove()); @@ -1916,6 +2005,8 @@ public class PerformanceEvaluation extends Configured implements Tool { } opts = calculateRowsAndSize(opts); break; + } else { + printUsageAndExit("ERROR: Unrecognized option/command: " + cmd, -1); } // Not matching any option or command. @@ -1941,7 +2032,7 @@ public class PerformanceEvaluation extends Configured implements Tool { } static int getRowsPerGB(final TestOptions opts) { - return ONE_GB / (opts.valueRandom? opts.valueSize/2: opts.valueSize); + return ONE_GB / ((opts.valueRandom? opts.valueSize/2: opts.valueSize) * opts.getColumns()); } @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ScanPerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ScanPerformanceEvaluation.java index 0c331b76147..24e9590d9c7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ScanPerformanceEvaluation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ScanPerformanceEvaluation.java @@ -29,7 +29,6 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; -import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; @@ -39,9 +38,7 @@ import org.apache.hadoop.hbase.client.metrics.ScanMetrics; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil; import org.apache.hadoop.hbase.mapreduce.TableMapper; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.util.AbstractHBaseTool; -import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.mapreduce.Counters; @@ -137,7 +134,7 @@ public class ScanPerformanceEvaluation extends AbstractHBaseTool { Scan scan = new Scan(); // default scan settings scan.setCacheBlocks(false); scan.setMaxVersions(1); - scan.setAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE, Bytes.toBytes(Boolean.TRUE)); + scan.setScanMetricsEnabled(true); if (caching != null) { scan.setCaching(Integer.parseInt(caching)); } @@ -177,7 +174,7 @@ public class ScanPerformanceEvaluation extends AbstractHBaseTool { table.close(); connection.close(); - ScanMetrics metrics = ProtobufUtil.toScanMetrics(scan.getAttribute(Scan.SCAN_ATTRIBUTES_METRICS_DATA)); + ScanMetrics metrics = scan.getScanMetrics(); long totalBytes = metrics.countOfBytesInResults.get(); double throughput = (double)totalBytes / scanTimer.elapsedTime(TimeUnit.SECONDS); double throughputRows = (double)numRows / scanTimer.elapsedTime(TimeUnit.SECONDS); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ServerResourceCheckerJUnitListener.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ServerResourceCheckerJUnitListener.java index 4e01b5e9f97..4b750e430ca 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ServerResourceCheckerJUnitListener.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ServerResourceCheckerJUnitListener.java @@ -19,24 +19,9 @@ package org.apache.hadoop.hbase; -import org.apache.hadoop.hbase.ResourceChecker.Phase; -import org.apache.hadoop.hbase.client.HConnectionTestingUtility; - /** * Monitor the resources. use by the tests All resources in {@link ResourceCheckerJUnitListener} * plus the number of connection. */ public class ServerResourceCheckerJUnitListener extends ResourceCheckerJUnitListener { - - static class ConnectionCountResourceAnalyzer extends ResourceChecker.ResourceAnalyzer { - @Override - public int getVal(Phase phase) { - return HConnectionTestingUtility.getConnectionCount(); - } - } - - @Override - protected void addResourceAnalyzer(ResourceChecker rc) { - rc.addResourceAnalyzer(new ConnectionCountResourceAnalyzer()); - } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuarantees.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuarantees.java index ebaa92f6905..dbb6156809c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuarantees.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuarantees.java @@ -32,7 +32,6 @@ import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.HBaseAdmin; -import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; @@ -45,7 +44,6 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; -import org.junit.BeforeClass; import org.junit.Test; import org.junit.experimental.categories.Category; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestGlobalMemStoreSize.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestGlobalMemStoreSize.java index d8178f072d3..2d081647ff1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestGlobalMemStoreSize.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestGlobalMemStoreSize.java @@ -31,8 +31,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.util.Bytes; @@ -90,7 +90,7 @@ public class TestGlobalMemStoreSize { ProtobufUtil.getOnlineRegions(server.getRSRpcServices())) { globalMemStoreSize += server.getFromOnlineRegions(regionInfo.getEncodedName()). - getMemstoreSize().get(); + getMemstoreSize(); } assertEquals(server.getRegionServerAccounting().getGlobalMemstoreSize(), globalMemStoreSize); @@ -104,7 +104,7 @@ public class TestGlobalMemStoreSize { for (HRegionInfo regionInfo : ProtobufUtil.getOnlineRegions(server.getRSRpcServices())) { - HRegion r = server.getFromOnlineRegions(regionInfo.getEncodedName()); + Region r = server.getFromOnlineRegions(regionInfo.getEncodedName()); flush(r, server); } LOG.info("Post flush on " + server.getServerName()); @@ -120,14 +120,14 @@ public class TestGlobalMemStoreSize { // our test was running.... for (HRegionInfo regionInfo : ProtobufUtil.getOnlineRegions(server.getRSRpcServices())) { - HRegion r = server.getFromOnlineRegions(regionInfo.getEncodedName()); - long l = r.getMemstoreSize().longValue(); + Region r = server.getFromOnlineRegions(regionInfo.getEncodedName()); + long l = r.getMemstoreSize(); if (l > 0) { // Only meta could have edits at this stage. Give it another flush // clear them. assertTrue(regionInfo.isMetaRegion()); LOG.info(r.toString() + " " + l + ", reflushing"); - r.flushcache(); + r.flush(true); } } } @@ -145,10 +145,10 @@ public class TestGlobalMemStoreSize { * @param server * @throws IOException */ - private void flush(final HRegion r, final HRegionServer server) + private void flush(final Region r, final HRegionServer server) throws IOException { LOG.info("Flush " + r.toString() + " on " + server.getServerName() + - ", " + r.flushcache() + ", size=" + + ", " + r.flush(true) + ", size=" + server.getRegionServerAccounting().getGlobalMemstoreSize()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHBaseTestingUtility.java index f3e3dc2b148..861ddee8c5c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHBaseTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHBaseTestingUtility.java @@ -40,7 +40,9 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hbase.http.ssl.KeyStoreTestUtil; import org.junit.Test; import org.junit.experimental.categories.Category; + import java.io.File; +import java.util.List; /** * Test our testing utility class @@ -186,8 +188,8 @@ public class TestHBaseTestingUtility { htu1.shutdownMiniCluster(); } - - @Test public void testMiniZooKeeper() throws Exception { + @Test + public void testMiniZooKeeperWithOneServer() throws Exception { HBaseTestingUtility hbt = new HBaseTestingUtility(); MiniZooKeeperCluster cluster1 = hbt.startMiniZKCluster(); try { @@ -196,7 +198,11 @@ public class TestHBaseTestingUtility { } finally { hbt.shutdownMiniZKCluster(); } + } + @Test + public void testMiniZooKeeperWithMultipleServers() throws Exception { + HBaseTestingUtility hbt = new HBaseTestingUtility(); // set up zookeeper cluster with 5 zk servers MiniZooKeeperCluster cluster2 = hbt.startMiniZKCluster(5); int defaultClientPort = 21818; @@ -236,6 +242,111 @@ public class TestHBaseTestingUtility { } } + @Test + public void testMiniZooKeeperWithMultipleClientPorts() throws Exception { + int defaultClientPort = 8888; + int i, j; + HBaseTestingUtility hbt = new HBaseTestingUtility(); + + // Test 1 - set up zookeeper cluster with same number of ZK servers and specified client ports + int [] clientPortList1 = {1111, 1112, 1113}; + MiniZooKeeperCluster cluster1 = hbt.startMiniZKCluster(clientPortList1.length, clientPortList1); + try { + List clientPortListInCluster = cluster1.getClientPortList(); + + for (i = 0; i < clientPortListInCluster.size(); i++) { + assertEquals(clientPortListInCluster.get(i).intValue(), clientPortList1[i]); + } + } finally { + hbt.shutdownMiniZKCluster(); + } + + // Test 2 - set up zookeeper cluster with more ZK servers than specified client ports + hbt.getConfiguration().setInt("test.hbase.zookeeper.property.clientPort", defaultClientPort); + int [] clientPortList2 = {2222, 2223}; + MiniZooKeeperCluster cluster2 = + hbt.startMiniZKCluster(clientPortList2.length + 2, clientPortList2); + + try { + List clientPortListInCluster = cluster2.getClientPortList(); + + for (i = 0, j = 0; i < clientPortListInCluster.size(); i++) { + if (i < clientPortList2.length) { + assertEquals(clientPortListInCluster.get(i).intValue(), clientPortList2[i]); + } else { + // servers with no specified client port will use defaultClientPort or some other ports + // based on defaultClientPort + assertEquals(clientPortListInCluster.get(i).intValue(), defaultClientPort + j); + j++; + } + } + } finally { + hbt.shutdownMiniZKCluster(); + } + + // Test 3 - set up zookeeper cluster with invalid client ports + hbt.getConfiguration().setInt("test.hbase.zookeeper.property.clientPort", defaultClientPort); + int [] clientPortList3 = {3333, -3334, 3335, 0}; + MiniZooKeeperCluster cluster3 = + hbt.startMiniZKCluster(clientPortList3.length + 1, clientPortList3); + + try { + List clientPortListInCluster = cluster3.getClientPortList(); + + for (i = 0, j = 0; i < clientPortListInCluster.size(); i++) { + // Servers will only use valid client ports; if ports are not specified or invalid, + // the default port or a port based on default port will be used. + if (i < clientPortList3.length && clientPortList3[i] > 0) { + assertEquals(clientPortListInCluster.get(i).intValue(), clientPortList3[i]); + } else { + assertEquals(clientPortListInCluster.get(i).intValue(), defaultClientPort + j); + j++; + } + } + } finally { + hbt.shutdownMiniZKCluster(); + } + + // Test 4 - set up zookeeper cluster with default port and some other ports used + // This test tests that the defaultClientPort and defaultClientPort+2 are used, so + // the algorithm should choice defaultClientPort+1 and defaultClientPort+3 to fill + // out the ports for servers without ports specified. + hbt.getConfiguration().setInt("test.hbase.zookeeper.property.clientPort", defaultClientPort); + int [] clientPortList4 = {-4444, defaultClientPort+2, 4446, defaultClientPort}; + MiniZooKeeperCluster cluster4 = + hbt.startMiniZKCluster(clientPortList4.length + 1, clientPortList4); + + try { + List clientPortListInCluster = cluster4.getClientPortList(); + + for (i = 0, j = 1; i < clientPortListInCluster.size(); i++) { + // Servers will only use valid client ports; if ports are not specified or invalid, + // the default port or a port based on default port will be used. + if (i < clientPortList4.length && clientPortList4[i] > 0) { + assertEquals(clientPortListInCluster.get(i).intValue(), clientPortList4[i]); + } else { + assertEquals(clientPortListInCluster.get(i).intValue(), defaultClientPort + j); + j +=2; + } + } + } finally { + hbt.shutdownMiniZKCluster(); + } + + // Test 5 - set up zookeeper cluster with same ports specified - fail is expected. + int [] clientPortList5 = {5555, 5556, 5556}; + + try { + MiniZooKeeperCluster cluster5 = + hbt.startMiniZKCluster(clientPortList5.length, clientPortList5); + assertTrue(cluster5.getClientPort() == -1); // expected failure + } catch (Exception e) { + // exception is acceptable + } finally { + hbt.shutdownMiniZKCluster(); + } + } + @Test public void testMiniDFSCluster() throws Exception { HBaseTestingUtility hbt = new HBaseTestingUtility(); MiniDFSCluster cluster = hbt.startMiniDFSCluster(null); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java index f44eb7b9b3c..48a5dbd3d03 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.HStore; +import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.regionserver.StoreFile; @@ -290,13 +291,14 @@ public class TestIOFencing { long startWaitTime = System.currentTimeMillis(); while (compactingRegion.getEarliestFlushTimeForAllStores() <= lastFlushTime || compactingRegion.countStoreFiles() <= 1) { - LOG.info("Waiting for the region to flush " + compactingRegion.getRegionNameAsString()); + LOG.info("Waiting for the region to flush " + + compactingRegion.getRegionInfo().getRegionNameAsString()); Thread.sleep(1000); assertTrue("Timed out waiting for the region to flush", System.currentTimeMillis() - startWaitTime < 30000); } assertTrue(compactingRegion.countStoreFiles() > 1); - final byte REGION_NAME[] = compactingRegion.getRegionName(); + final byte REGION_NAME[] = compactingRegion.getRegionInfo().getRegionName(); LOG.info("Asking for compaction"); ((HBaseAdmin)admin).majorCompact(TABLE_NAME.getName()); LOG.info("Waiting for compaction to be about to start"); @@ -314,7 +316,7 @@ public class TestIOFencing { Waiter.waitFor(c, 60000, new Waiter.Predicate() { @Override public boolean evaluate() throws Exception { - HRegion newRegion = newServer.getOnlineRegion(REGION_NAME); + Region newRegion = newServer.getOnlineRegion(REGION_NAME); return newRegion != null && !newRegion.isRecovering(); } }); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestInfoServers.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestInfoServers.java index a72b151349a..8ac35565141 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestInfoServers.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestInfoServers.java @@ -18,15 +18,16 @@ */ package org.apache.hadoop.hbase; +import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; -import java.io.BufferedInputStream; import java.io.IOException; +import java.io.InputStream; import java.net.URL; +import org.apache.commons.io.IOUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.util.Bytes; @@ -54,6 +55,9 @@ public class TestInfoServers { //We need to make sure that the server can be started as read only. UTIL.getConfiguration().setBoolean("hbase.master.ui.readonly", true); UTIL.startMiniCluster(); + if (!UTIL.getHBaseCluster().waitForActiveAndReadyMaster(30000)) { + throw new RuntimeException("Active master not ready"); + } } @AfterClass @@ -69,12 +73,10 @@ public class TestInfoServers { // give the cluster time to start up UTIL.getConnection().getTable(TableName.META_TABLE_NAME).close(); int port = UTIL.getHBaseCluster().getMaster().getInfoServer().getPort(); - assertContainsContent(new URL("http://localhost:" + port + - "/index.html"), "master-status"); - port = UTIL.getHBaseCluster().getRegionServerThreads().get(0).getRegionServer(). - getInfoServer().getPort(); - assertContainsContent(new URL("http://localhost:" + port + - "/index.html"), "rs-status"); + assertContainsContent(new URL("http://localhost:" + port + "/index.html"), "master-status"); + port = UTIL.getHBaseCluster().getRegionServerThreads().get(0).getRegionServer() + .getInfoServer().getPort(); + assertContainsContent(new URL("http://localhost:" + port + "/index.html"), "rs-status"); } /** @@ -86,15 +88,11 @@ public class TestInfoServers { */ @Test public void testInfoServersStatusPages() throws Exception { - // give the cluster time to start up - UTIL.getConnection().getTable(TableName.META_TABLE_NAME).close(); int port = UTIL.getHBaseCluster().getMaster().getInfoServer().getPort(); - assertContainsContent(new URL("http://localhost:" + port + - "/master-status"), "meta"); - port = UTIL.getHBaseCluster().getRegionServerThreads().get(0).getRegionServer(). - getInfoServer().getPort(); - assertContainsContent(new URL("http://localhost:" + port + - "/rs-status"), "meta"); + assertContainsContent(new URL("http://localhost:" + port + "/master-status"), "meta"); + port = UTIL.getHBaseCluster().getRegionServerThreads().get(0).getRegionServer() + .getInfoServer().getPort(); + assertContainsContent(new URL("http://localhost:" + port + "/rs-status"), "meta"); } @Test @@ -102,44 +100,34 @@ public class TestInfoServers { TableName tableName = TableName.valueOf("testMasterServerReadOnly"); byte[] cf = Bytes.toBytes("d"); UTIL.createTable(tableName, cf); - UTIL.getConnection().getTable(tableName).close(); + UTIL.waitTableAvailable(tableName); int port = UTIL.getHBaseCluster().getMaster().getInfoServer().getPort(); + assertDoesNotContainContent(new URL("http://localhost:" + port + "/table.jsp?name=" + tableName + + "&action=split&key="), "Table action request accepted"); assertDoesNotContainContent( - new URL("http://localhost:" + port + "/table.jsp?name=" + tableName + "&action=split&key="), - "Table action request accepted"); - assertDoesNotContainContent( - new URL("http://localhost:" + port + "/table.jsp?name=" + tableName), - "Actions:"); + new URL("http://localhost:" + port + "/table.jsp?name=" + tableName), "Actions:"); } - private void assertContainsContent(final URL u, final String expected) - throws IOException { + private void assertContainsContent(final URL u, final String expected) throws IOException { LOG.info("Testing " + u.toString() + " has " + expected); String content = getUrlContent(u); - assertTrue("expected=" + expected + ", content=" + content, + assertTrue("expected=" + expected + ", content=" + content, content.contains(expected)); + } + + private void assertDoesNotContainContent(final URL u, final String expected) throws IOException { + LOG.info("Testing " + u.toString() + " does not have " + expected); + String content = getUrlContent(u); + assertFalse("Does Not Contain =" + expected + ", content=" + content, content.contains(expected)); } - - - private void assertDoesNotContainContent(final URL u, final String expected) - throws IOException { - LOG.info("Testing " + u.toString() + " has " + expected); - String content = getUrlContent(u); - assertTrue("Does Not Contain =" + expected + ", content=" + content, - !content.contains(expected)); - } - private String getUrlContent(URL u) throws IOException { java.net.URLConnection c = u.openConnection(); + c.setConnectTimeout(2000); + c.setReadTimeout(2000); c.connect(); - StringBuilder sb = new StringBuilder(); - BufferedInputStream bis = new BufferedInputStream(c.getInputStream()); - byte [] bytes = new byte[1024]; - for (int read = -1; (read = bis.read(bytes)) != -1;) { - sb.append(new String(bytes, 0, read)); + try (InputStream in = c.getInputStream()) { + return IOUtils.toString(in); } - bis.close(); - return sb.toString(); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessor.java index eefb974deb7..3275d156de3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessor.java @@ -23,6 +23,12 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; +import static org.mockito.Matchers.anyObject; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.reset; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; import java.io.IOException; import java.util.List; @@ -395,7 +401,7 @@ public class TestMetaTableAccessor { Get get = new Get(row); Result result = meta.get(get); Cell serverCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY, - MetaTableAccessor.getServerColumn(replicaId)); + MetaTableAccessor.getServerColumn(replicaId)); Cell startCodeCell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY, MetaTableAccessor.getStartCodeColumn(replicaId)); assertNotNull(serverCell); @@ -475,5 +481,49 @@ public class TestMetaTableAccessor { meta.close(); } } + + @Test + public void testMetaScanner() throws Exception { + LOG.info("Starting testMetaScanner"); + + final TableName TABLENAME = TableName.valueOf("testMetaScanner"); + final byte[] FAMILY = Bytes.toBytes("family"); + final byte[][] SPLIT_KEYS = + new byte[][] { Bytes.toBytes("region_a"), Bytes.toBytes("region_b") }; + + UTIL.createTable(TABLENAME, FAMILY, SPLIT_KEYS); + HTable table = (HTable) connection.getTable(TABLENAME); + // Make sure all the regions are deployed + UTIL.countRows(table); + + MetaTableAccessor.Visitor visitor = + mock(MetaTableAccessor.Visitor.class); + doReturn(true).when(visitor).visit((Result) anyObject()); + + // Scanning the entire table should give us three rows + MetaTableAccessor.scanMetaForTableRegions(connection, visitor, TABLENAME); + verify(visitor, times(3)).visit((Result) anyObject()); + + // Scanning the table with a specified empty start row should also + // give us three hbase:meta rows + reset(visitor); + doReturn(true).when(visitor).visit((Result) anyObject()); + MetaTableAccessor.scanMeta(connection, visitor, TABLENAME, null, 1000); + verify(visitor, times(3)).visit((Result) anyObject()); + + // Scanning the table starting in the middle should give us two rows: + // region_a and region_b + reset(visitor); + doReturn(true).when(visitor).visit((Result) anyObject()); + MetaTableAccessor.scanMeta(connection, visitor, TABLENAME, Bytes.toBytes("region_ac"), 1000); + verify(visitor, times(2)).visit((Result) anyObject()); + + // Scanning with a limit of 1 should only give us one row + reset(visitor); + doReturn(true).when(visitor).visit((Result) anyObject()); + MetaTableAccessor.scanMeta(connection, visitor, TABLENAME, Bytes.toBytes("region_ac"), 1); + verify(visitor, times(1)).visit((Result) anyObject()); + table.close(); + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessorNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessorNoCluster.java index f70a0d77e6b..eefadd801ad 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessorNoCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessorNoCluster.java @@ -183,10 +183,8 @@ public class TestMetaTableAccessorNoCluster { // Return the RegionLocations object when locateRegion // The ugly format below comes of 'Important gotcha on spying real objects!' from // http://mockito.googlecode.com/svn/branches/1.6/javadoc/org/mockito/Mockito.html - ClusterConnection cConnection = - HConnectionTestingUtility.getSpiedClusterConnection(UTIL.getConfiguration()); Mockito.doReturn(rl).when - (cConnection).locateRegion((TableName)Mockito.any(), (byte[])Mockito.any(), + (connection).locateRegion((TableName)Mockito.any(), (byte[])Mockito.any(), Mockito.anyBoolean(), Mockito.anyBoolean(), Mockito.anyInt()); // Now shove our HRI implementation into the spied-upon connection. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMovedRegionsCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMovedRegionsCleaner.java new file mode 100644 index 00000000000..421d91ca094 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMovedRegionsCleaner.java @@ -0,0 +1,96 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.MiscTests; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import java.io.IOException; + + +/** + * Test whether background cleanup of MovedRegion entries is happening + */ +@Category({ MiscTests.class, MediumTests.class }) public class TestMovedRegionsCleaner { + + public static final Log LOG = LogFactory.getLog(TestRegionRebalancing.class); + private final HBaseTestingUtility UTIL = new HBaseTestingUtility(); + + public static int numCalls = 0; + + private static class TestMockRegionServer extends MiniHBaseCluster.MiniHBaseClusterRegionServer { + + public TestMockRegionServer(Configuration conf, CoordinatedStateManager cp) + throws IOException, InterruptedException { + super(conf, cp); + } + + protected int movedRegionCleanerPeriod() { + return 500; + } + + @Override protected void cleanMovedRegions() { + // count the number of calls that are being made to this + // + numCalls++; + super.cleanMovedRegions(); + } + } + + @After public void after() throws Exception { + UTIL.shutdownMiniCluster(); + } + + @Before public void before() throws Exception { + UTIL.getConfiguration() + .setStrings(HConstants.REGION_SERVER_IMPL, TestMockRegionServer.class.getName()); + UTIL.startMiniCluster(1); + } + + /** + * Start the cluster, wait for some time and verify that the background + * MovedRegion cleaner indeed gets called + * + * @throws IOException + * @throws InterruptedException + */ + @Test public void testMovedRegionsCleaner() throws IOException, InterruptedException { + // We need to sleep long enough to trigger at least one round of background calls + // to MovedRegionCleaner happen. Currently the period is set to 500ms. + // Setting the sleep here for 2s just to be safe + // + UTIL.waitFor(2000, new Waiter.Predicate() { + @Override + public boolean evaluate() throws IOException { + + // verify that there was at least one call to the cleanMovedRegions function + // + return numCalls > 0; + } + }); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java new file mode 100644 index 00000000000..eef955ecf0d --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java @@ -0,0 +1,831 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Set; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.client.ClientScanner; +import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.ResultScanner; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.filter.ColumnPrefixFilter; +import org.apache.hadoop.hbase.filter.ColumnRangeFilter; +import org.apache.hadoop.hbase.filter.Filter; +import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter; +import org.apache.hadoop.hbase.filter.FirstKeyValueMatchingQualifiersFilter; +import org.apache.hadoop.hbase.filter.RandomRowFilter; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +/** + * These tests are focused on testing how partial results appear to a client. Partial results are + * {@link Result}s that contain only a portion of a row's complete list of cells. Partial results + * are formed when the server breaches its maximum result size when trying to service a client's RPC + * request. It is the responsibility of the scanner on the client side to recognize when partial + * results have been returned and to take action to form the complete results. + *

      + * Unless the flag {@link Scan#setAllowPartialResults(boolean)} has been set to true, the caller of + * {@link ResultScanner#next()} should never see partial results. + */ +@Category(MediumTests.class) +public class TestPartialResultsFromClientSide { + private static final Log LOG = LogFactory.getLog(TestPartialResultsFromClientSide.class); + + private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + + private static Table TABLE = null; + + /** + * Table configuration + */ + private static TableName TABLE_NAME = TableName.valueOf("testTable"); + + private static int NUM_ROWS = 5; + private static byte[] ROW = Bytes.toBytes("testRow"); + private static byte[][] ROWS = HTestConst.makeNAscii(ROW, NUM_ROWS); + + // Should keep this value below 10 to keep generation of expected kv's simple. If above 10 then + // table/row/cf1/... will be followed by table/row/cf10/... instead of table/row/cf2/... which + // breaks the simple generation of expected kv's + private static int NUM_FAMILIES = 10; + private static byte[] FAMILY = Bytes.toBytes("testFamily"); + private static byte[][] FAMILIES = HTestConst.makeNAscii(FAMILY, NUM_FAMILIES); + + private static int NUM_QUALIFIERS = 10; + private static byte[] QUALIFIER = Bytes.toBytes("testQualifier"); + private static byte[][] QUALIFIERS = HTestConst.makeNAscii(QUALIFIER, NUM_QUALIFIERS); + + private static int VALUE_SIZE = 1024; + private static byte[] VALUE = Bytes.createMaxByteArray(VALUE_SIZE); + + private static int NUM_COLS = NUM_FAMILIES * NUM_QUALIFIERS; + + // Approximation of how large the heap size of cells in our table. Should be accessed through + // getCellHeapSize(). + private static long CELL_HEAP_SIZE = -1; + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + TEST_UTIL.startMiniCluster(3); + TABLE = createTestTable(TABLE_NAME, ROWS, FAMILIES, QUALIFIERS, VALUE); + } + + static Table createTestTable(TableName name, byte[][] rows, byte[][] families, + byte[][] qualifiers, byte[] cellValue) throws IOException { + Table ht = TEST_UTIL.createTable(name, families); + List puts = createPuts(rows, families, qualifiers, cellValue); + ht.put(puts); + + return ht; + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + /** + * Ensure that the expected key values appear in a result returned from a scanner that is + * combining partial results into complete results + * @throws Exception + */ + @Test + public void testExpectedValuesOfPartialResults() throws Exception { + testExpectedValuesOfPartialResults(false); + testExpectedValuesOfPartialResults(true); + } + + public void testExpectedValuesOfPartialResults(boolean reversed) throws Exception { + Scan partialScan = new Scan(); + partialScan.setMaxVersions(); + // Max result size of 1 ensures that each RPC request will return a single cell. The scanner + // will need to reconstruct the results into a complete result before returning to the caller + partialScan.setMaxResultSize(1); + partialScan.setReversed(reversed); + ResultScanner partialScanner = TABLE.getScanner(partialScan); + + final int startRow = reversed ? ROWS.length - 1 : 0; + final int endRow = reversed ? -1 : ROWS.length; + final int loopDelta = reversed ? -1 : 1; + String message; + + for (int row = startRow; row != endRow; row = row + loopDelta) { + message = "Ensuring the expected keyValues are present for row " + row; + List expectedKeyValues = createKeyValuesForRow(ROWS[row], FAMILIES, QUALIFIERS, VALUE); + Result result = partialScanner.next(); + assertFalse(result.isPartial()); + verifyResult(result, expectedKeyValues, message); + } + + partialScanner.close(); + } + + /** + * Ensure that we only see Results marked as partial when the allowPartial flag is set + * @throws Exception + */ + @Test + public void testAllowPartialResults() throws Exception { + Scan scan = new Scan(); + scan.setAllowPartialResults(true); + scan.setMaxResultSize(1); + ResultScanner scanner = TABLE.getScanner(scan); + Result result = scanner.next(); + + assertTrue(result != null); + assertTrue(result.isPartial()); + assertTrue(result.rawCells() != null); + assertTrue(result.rawCells().length == 1); + + scanner.close(); + + scan.setAllowPartialResults(false); + scanner = TABLE.getScanner(scan); + result = scanner.next(); + + assertTrue(result != null); + assertTrue(!result.isPartial()); + assertTrue(result.rawCells() != null); + assertTrue(result.rawCells().length == NUM_COLS); + + scanner.close(); + } + + /** + * Ensure that the results returned from a scanner that retrieves all results in a single RPC call + * matches the results that are returned from a scanner that must incrementally combine partial + * results into complete results. A variety of scan configurations can be tested + * @throws Exception + */ + @Test + public void testEquivalenceOfScanResults() throws Exception { + Scan oneShotScan = new Scan(); + oneShotScan.setMaxResultSize(Long.MAX_VALUE); + + Scan partialScan = new Scan(oneShotScan); + partialScan.setMaxResultSize(1); + + testEquivalenceOfScanResults(TABLE, oneShotScan, partialScan); + } + + public void testEquivalenceOfScanResults(Table table, Scan scan1, Scan scan2) throws Exception { + ResultScanner scanner1 = table.getScanner(scan1); + ResultScanner scanner2 = table.getScanner(scan2); + + Result r1 = null; + Result r2 = null; + int count = 0; + + while ((r1 = scanner1.next()) != null) { + r2 = scanner2.next(); + + assertTrue(r2 != null); + compareResults(r1, r2, "Comparing result #" + count); + count++; + } + + assertTrue(scanner2.next() == null); + + scanner1.close(); + scanner2.close(); + } + + /** + * Order of cells in partial results matches the ordering of cells from complete results + * @throws Exception + */ + @Test + public void testOrderingOfCellsInPartialResults() throws Exception { + Scan scan = new Scan(); + + for (int col = 1; col <= NUM_COLS; col++) { + scan.setMaxResultSize(getResultSizeForNumberOfCells(col)); + testOrderingOfCellsInPartialResults(scan); + + // Test again with a reversed scanner + scan.setReversed(true); + testOrderingOfCellsInPartialResults(scan); + } + } + + public void testOrderingOfCellsInPartialResults(final Scan basePartialScan) throws Exception { + // Scan that retrieves results in pieces (partials). By setting allowPartialResults to be true + // the results will NOT be reconstructed and instead the caller will see the partial results + // returned by the server + Scan partialScan = new Scan(basePartialScan); + partialScan.setAllowPartialResults(true); + ResultScanner partialScanner = TABLE.getScanner(partialScan); + + // Scan that retrieves all table results in single RPC request + Scan oneShotScan = new Scan(basePartialScan); + oneShotScan.setMaxResultSize(Long.MAX_VALUE); + oneShotScan.setCaching(ROWS.length); + ResultScanner oneShotScanner = TABLE.getScanner(oneShotScan); + + Result oneShotResult = oneShotScanner.next(); + Result partialResult = null; + int iterationCount = 0; + + while (oneShotResult != null && oneShotResult.rawCells() != null) { + List aggregatePartialCells = new ArrayList(); + do { + partialResult = partialScanner.next(); + assertTrue("Partial Result is null. iteration: " + iterationCount, partialResult != null); + assertTrue("Partial cells are null. iteration: " + iterationCount, + partialResult.rawCells() != null); + + for (Cell c : partialResult.rawCells()) { + aggregatePartialCells.add(c); + } + } while (partialResult.isPartial()); + + assertTrue("Number of cells differs. iteration: " + iterationCount, + oneShotResult.rawCells().length == aggregatePartialCells.size()); + final Cell[] oneShotCells = oneShotResult.rawCells(); + for (int cell = 0; cell < oneShotCells.length; cell++) { + Cell oneShotCell = oneShotCells[cell]; + Cell partialCell = aggregatePartialCells.get(cell); + + assertTrue("One shot cell was null", oneShotCell != null); + assertTrue("Partial cell was null", partialCell != null); + assertTrue("Cell differs. oneShotCell:" + oneShotCell + " partialCell:" + partialCell, + oneShotCell.equals(partialCell)); + } + + oneShotResult = oneShotScanner.next(); + iterationCount++; + } + + assertTrue(partialScanner.next() == null); + + partialScanner.close(); + oneShotScanner.close(); + } + + /** + * Setting the max result size allows us to control how many cells we expect to see on each call + * to next on the scanner. Test a variety of different sizes for correctness + * @throws Exception + */ + @Test + public void testExpectedNumberOfCellsPerPartialResult() throws Exception { + Scan scan = new Scan(); + testExpectedNumberOfCellsPerPartialResult(scan); + + scan.setReversed(true); + testExpectedNumberOfCellsPerPartialResult(scan); + } + + public void testExpectedNumberOfCellsPerPartialResult(Scan baseScan) throws Exception { + for (int expectedCells = 1; expectedCells <= NUM_COLS; expectedCells++) { + testExpectedNumberOfCellsPerPartialResult(baseScan, expectedCells); + } + } + + public void testExpectedNumberOfCellsPerPartialResult(Scan baseScan, int expectedNumberOfCells) + throws Exception { + + if (LOG.isInfoEnabled()) LOG.info("groupSize:" + expectedNumberOfCells); + + // Use the cellHeapSize to set maxResultSize such that we know how many cells to expect back + // from the call. The returned results should NOT exceed expectedNumberOfCells but may be less + // than it in cases where expectedNumberOfCells is not an exact multiple of the number of + // columns in the table. + Scan scan = new Scan(baseScan); + scan.setAllowPartialResults(true); + scan.setMaxResultSize(getResultSizeForNumberOfCells(expectedNumberOfCells)); + + ResultScanner scanner = TABLE.getScanner(scan); + Result result = null; + byte[] prevRow = null; + while ((result = scanner.next()) != null) { + assertTrue(result.rawCells() != null); + + // Cases when cell count won't equal expectedNumberOfCells: + // 1. Returned result is the final result needed to form the complete result for that row + // 2. It is the first result we have seen for that row and thus may have been fetched as + // the last group of cells that fit inside the maxResultSize + assertTrue( + "Result's cell count differed from expected number. result: " + result, + result.rawCells().length == expectedNumberOfCells || !result.isPartial() + || !Bytes.equals(prevRow, result.getRow())); + prevRow = result.getRow(); + } + + scanner.close(); + } + + /** + * @return The approximate heap size of a cell in the test table. All cells should have + * approximately the same heap size, so the value is cached to avoid repeating the + * calculation + * @throws Exception + */ + private long getCellHeapSize() throws Exception { + if (CELL_HEAP_SIZE == -1) { + // Do a partial scan that will return a single result with a single cell + Scan scan = new Scan(); + scan.setMaxResultSize(1); + scan.setAllowPartialResults(true); + ResultScanner scanner = TABLE.getScanner(scan); + + Result result = scanner.next(); + + assertTrue(result != null); + assertTrue(result.rawCells() != null); + assertTrue(result.rawCells().length == 1); + + CELL_HEAP_SIZE = CellUtil.estimatedHeapSizeOf(result.rawCells()[0]); + if (LOG.isInfoEnabled()) LOG.info("Cell heap size: " + CELL_HEAP_SIZE); + scanner.close(); + } + + return CELL_HEAP_SIZE; + } + + /** + * @param numberOfCells + * @return the result size that should be used in {@link Scan#setMaxResultSize(long)} if you want + * the server to return exactly numberOfCells cells + * @throws Exception + */ + private long getResultSizeForNumberOfCells(int numberOfCells) throws Exception { + return getCellHeapSize() * numberOfCells; + } + + /** + * Test various combinations of batching and partial results for correctness + */ + @Test + public void testPartialResultsAndBatch() throws Exception { + for (int batch = 1; batch <= NUM_COLS / 4; batch++) { + for (int cellsPerPartial = 1; cellsPerPartial <= NUM_COLS / 4; cellsPerPartial++) { + testPartialResultsAndBatch(batch, cellsPerPartial); + } + } + } + + public void testPartialResultsAndBatch(final int batch, final int cellsPerPartialResult) + throws Exception { + if (LOG.isInfoEnabled()) { + LOG.info("batch: " + batch + " cellsPerPartialResult: " + cellsPerPartialResult); + } + + Scan scan = new Scan(); + scan.setMaxResultSize(getResultSizeForNumberOfCells(cellsPerPartialResult)); + scan.setBatch(batch); + ResultScanner scanner = TABLE.getScanner(scan); + Result result = scanner.next(); + int repCount = 0; + + while ((result = scanner.next()) != null) { + assertTrue(result.rawCells() != null); + + if (result.isPartial()) { + final String error = + "Cells:" + result.rawCells().length + " Batch size:" + batch + + " cellsPerPartialResult:" + cellsPerPartialResult + " rep:" + repCount; + assertTrue(error, result.rawCells().length <= Math.min(batch, cellsPerPartialResult)); + } else { + assertTrue(result.rawCells().length <= batch); + } + repCount++; + } + + scanner.close(); + } + + /** + * Test the method {@link Result#createCompleteResult(List, Result)} + * @throws Exception + */ + @Test + public void testPartialResultsReassembly() throws Exception { + Scan scan = new Scan(); + testPartialResultsReassembly(scan); + scan.setReversed(true); + testPartialResultsReassembly(scan); + } + + public void testPartialResultsReassembly(Scan scanBase) throws Exception { + Scan partialScan = new Scan(scanBase); + partialScan.setMaxResultSize(1); + partialScan.setAllowPartialResults(true); + ResultScanner partialScanner = TABLE.getScanner(partialScan); + + Scan oneShotScan = new Scan(scanBase); + oneShotScan.setMaxResultSize(Long.MAX_VALUE); + ResultScanner oneShotScanner = TABLE.getScanner(oneShotScan); + + ArrayList partials = new ArrayList<>(); + for (int i = 0; i < NUM_ROWS; i++) { + Result partialResult = null; + Result completeResult = null; + Result oneShotResult = null; + partials.clear(); + + do { + partialResult = partialScanner.next(); + partials.add(partialResult); + } while (partialResult != null && partialResult.isPartial()); + + completeResult = Result.createCompleteResult(partials); + oneShotResult = oneShotScanner.next(); + + compareResults(completeResult, oneShotResult, null); + } + + assertTrue(oneShotScanner.next() == null); + assertTrue(partialScanner.next() == null); + + oneShotScanner.close(); + partialScanner.close(); + } + + /** + * When reconstructing the complete result from its partials we ensure that the row of each + * partial result is the same. If one of the rows differs, an exception is thrown. + */ + @Test + public void testExceptionThrownOnMismatchedPartialResults() throws IOException { + assertTrue(NUM_ROWS >= 2); + + ArrayList partials = new ArrayList<>(); + Scan scan = new Scan(); + scan.setMaxResultSize(Long.MAX_VALUE); + ResultScanner scanner = TABLE.getScanner(scan); + Result r1 = scanner.next(); + partials.add(r1); + Result r2 = scanner.next(); + partials.add(r2); + + assertFalse(Bytes.equals(r1.getRow(), r2.getRow())); + + try { + Result.createCompleteResult(partials); + fail("r1 and r2 are from different rows. It should not be possible to combine them into" + + " a single result"); + } catch (IOException e) { + } + + scanner.close(); + } + + /** + * When a scan has a filter where {@link org.apache.hadoop.hbase.filter.Filter#hasFilterRow()} is + * true, the scanner should not return partial results. The scanner cannot return partial results + * because the entire row needs to be read for the include/exclude decision to be made + */ + @Test + public void testNoPartialResultsWhenRowFilterPresent() throws Exception { + Scan scan = new Scan(); + scan.setMaxResultSize(1); + scan.setAllowPartialResults(true); + // If a filter hasFilter() is true then partial results should not be returned else filter + // application server side would break. + scan.setFilter(new RandomRowFilter(1.0f)); + ResultScanner scanner = TABLE.getScanner(scan); + + Result r = null; + while ((r = scanner.next()) != null) { + assertFalse(r.isPartial()); + } + + scanner.close(); + } + + /** + * Examine the interaction between the maxResultSize and caching. If the caching limit is reached + * before the maxResultSize limit, we should not see partial results. On the other hand, if the + * maxResultSize limit is reached before the caching limit, it is likely that partial results will + * be seen. + * @throws Exception + */ + @Test + public void testPartialResultsAndCaching() throws Exception { + for (int caching = 1; caching <= NUM_ROWS; caching++) { + for (int maxResultRows = 0; maxResultRows <= NUM_ROWS; maxResultRows++) { + testPartialResultsAndCaching(maxResultRows, caching); + } + } + } + + /** + * @param resultSizeRowLimit The row limit that will be enforced through maxResultSize + * @param cachingRowLimit The row limit that will be enforced through caching + * @throws Exception + */ + public void testPartialResultsAndCaching(int resultSizeRowLimit, int cachingRowLimit) + throws Exception { + Scan scan = new Scan(); + scan.setAllowPartialResults(true); + + // The number of cells specified in the call to getResultSizeForNumberOfCells is offset to + // ensure that the result size we specify is not an exact multiple of the number of cells + // in a row. This ensures that partial results will be returned when the result size limit + // is reached before the caching limit. + int cellOffset = NUM_COLS / 3; + long maxResultSize = getResultSizeForNumberOfCells(resultSizeRowLimit * NUM_COLS + cellOffset); + scan.setMaxResultSize(maxResultSize); + scan.setCaching(cachingRowLimit); + + ResultScanner scanner = TABLE.getScanner(scan); + ClientScanner clientScanner = (ClientScanner) scanner; + Result r = null; + + // Approximate the number of rows we expect will fit into the specified max rsult size. If this + // approximation is less than caching, then we expect that the max result size limit will be + // hit before the caching limit and thus partial results may be seen + boolean expectToSeePartialResults = resultSizeRowLimit < cachingRowLimit; + while ((r = clientScanner.next()) != null) { + assertTrue(!r.isPartial() || expectToSeePartialResults); + } + + scanner.close(); + } + + /** + * Small scans should not return partial results because it would prevent small scans from + * retrieving all of the necessary results in a single RPC request which is what makese small + * scans useful. Thus, ensure that even when {@link Scan#getAllowPartialResults()} is true, small + * scans do not return partial results + * @throws Exception + */ + @Test + public void testSmallScansDoNotAllowPartials() throws Exception { + Scan scan = new Scan(); + testSmallScansDoNotAllowPartials(scan); + scan.setReversed(true); + testSmallScansDoNotAllowPartials(scan); + } + + public void testSmallScansDoNotAllowPartials(Scan baseScan) throws Exception { + Scan scan = new Scan(baseScan); + scan.setAllowPartialResults(true); + scan.setSmall(true); + scan.setMaxResultSize(1); + + ResultScanner scanner = TABLE.getScanner(scan); + Result r = null; + + while ((r = scanner.next()) != null) { + assertFalse(r.isPartial()); + } + + scanner.close(); + } + + /** + * Make puts to put the input value into each combination of row, family, and qualifier + * @param rows + * @param families + * @param qualifiers + * @param value + * @return + * @throws IOException + */ + static ArrayList createPuts(byte[][] rows, byte[][] families, byte[][] qualifiers, + byte[] value) throws IOException { + Put put; + ArrayList puts = new ArrayList<>(); + + for (int row = 0; row < rows.length; row++) { + put = new Put(rows[row]); + for (int fam = 0; fam < families.length; fam++) { + for (int qual = 0; qual < qualifiers.length; qual++) { + KeyValue kv = new KeyValue(rows[row], families[fam], qualifiers[qual], qual, value); + put.add(kv); + } + } + puts.add(put); + } + + return puts; + } + + /** + * Make key values to represent each possible combination of family and qualifier in the specified + * row. + * @param row + * @param families + * @param qualifiers + * @param value + * @return + */ + static ArrayList createKeyValuesForRow(byte[] row, byte[][] families, byte[][] qualifiers, + byte[] value) { + ArrayList outList = new ArrayList<>(); + for (int fam = 0; fam < families.length; fam++) { + for (int qual = 0; qual < qualifiers.length; qual++) { + outList.add(new KeyValue(row, families[fam], qualifiers[qual], qual, value)); + } + } + return outList; + } + + /** + * Verifies that result contains all the key values within expKvList. Fails the test otherwise + * @param result + * @param expKvList + * @param msg + */ + static void verifyResult(Result result, List expKvList, String msg) { + if (LOG.isInfoEnabled()) { + LOG.info(msg); + LOG.info("Expected count: " + expKvList.size()); + LOG.info("Actual count: " + result.size()); + } + + if (expKvList.size() == 0) return; + + int i = 0; + for (Cell kv : result.rawCells()) { + if (i >= expKvList.size()) { + break; // we will check the size later + } + + Cell kvExp = expKvList.get(i++); + assertTrue("Not equal. get kv: " + kv.toString() + " exp kv: " + kvExp.toString(), + kvExp.equals(kv)); + } + + assertEquals(expKvList.size(), result.size()); + } + + /** + * Compares two results and fails the test if the results are different + * @param r1 + * @param r2 + * @param message + */ + static void compareResults(Result r1, Result r2, final String message) { + if (LOG.isInfoEnabled()) { + if (message != null) LOG.info(message); + LOG.info("r1: " + r1); + LOG.info("r2: " + r2); + } + + final String failureMessage = "Results r1:" + r1 + " \nr2:" + r2 + " are not equivalent"; + if (r1 == null && r2 == null) fail(failureMessage); + else if (r1 == null || r2 == null) fail(failureMessage); + + try { + Result.compareResults(r1, r2); + } catch (Exception e) { + fail(failureMessage); + } + } + + @Test + public void testReadPointAndPartialResults() throws Exception { + TableName testName = TableName.valueOf("testReadPointAndPartialResults"); + int numRows = 5; + int numFamilies = 5; + int numQualifiers = 5; + byte[][] rows = HTestConst.makeNAscii(Bytes.toBytes("testRow"), numRows); + byte[][] families = HTestConst.makeNAscii(Bytes.toBytes("testFamily"), numFamilies); + byte[][] qualifiers = HTestConst.makeNAscii(Bytes.toBytes("testQualifier"), numQualifiers); + byte[] value = Bytes.createMaxByteArray(100); + + Table tmpTable = createTestTable(testName, rows, families, qualifiers, value); + + Scan scan = new Scan(); + scan.setMaxResultSize(1); + scan.setAllowPartialResults(true); + + // Open scanner before deletes + ResultScanner scanner = tmpTable.getScanner(scan); + + Delete delete1 = new Delete(rows[0]); + delete1.addColumn(families[0], qualifiers[0], 0); + tmpTable.delete(delete1); + + Delete delete2 = new Delete(rows[1]); + delete2.addColumn(families[1], qualifiers[1], 1); + tmpTable.delete(delete2); + + // Should see all cells because scanner was opened prior to deletes + int scannerCount = countCellsFromScanner(scanner); + int expectedCount = numRows * numFamilies * numQualifiers; + assertTrue("scannerCount: " + scannerCount + " expectedCount: " + expectedCount, + scannerCount == expectedCount); + + // Minus 2 for the two cells that were deleted + scanner = tmpTable.getScanner(scan); + scannerCount = countCellsFromScanner(scanner); + expectedCount = numRows * numFamilies * numQualifiers - 2; + assertTrue("scannerCount: " + scannerCount + " expectedCount: " + expectedCount, + scannerCount == expectedCount); + + scanner = tmpTable.getScanner(scan); + // Put in 2 new rows. The timestamps differ from the deleted rows + Put put1 = new Put(rows[0]); + put1.add(new KeyValue(rows[0], families[0], qualifiers[0], 1, value)); + tmpTable.put(put1); + + Put put2 = new Put(rows[1]); + put2.add(new KeyValue(rows[1], families[1], qualifiers[1], 2, value)); + tmpTable.put(put2); + + // Scanner opened prior to puts. Cell count shouldn't have changed + scannerCount = countCellsFromScanner(scanner); + expectedCount = numRows * numFamilies * numQualifiers - 2; + assertTrue("scannerCount: " + scannerCount + " expectedCount: " + expectedCount, + scannerCount == expectedCount); + + // Now the scanner should see the cells that were added by puts + scanner = tmpTable.getScanner(scan); + scannerCount = countCellsFromScanner(scanner); + expectedCount = numRows * numFamilies * numQualifiers; + assertTrue("scannerCount: " + scannerCount + " expectedCount: " + expectedCount, + scannerCount == expectedCount); + + TEST_UTIL.deleteTable(testName); + } + + /** + * Exhausts the scanner by calling next repetitively. Once completely exhausted, close scanner and + * return total cell count + * @param scanner + * @return + * @throws Exception + */ + private int countCellsFromScanner(ResultScanner scanner) throws Exception { + Result result = null; + int numCells = 0; + while ((result = scanner.next()) != null) { + numCells += result.rawCells().length; + } + + scanner.close(); + return numCells; + } + + /** + * Test partial Result re-assembly in the presence of different filters. The Results from the + * partial scanner should match the Results returned from a scanner that receives all of the + * results in one RPC to the server. The partial scanner is tested with a variety of different + * result sizes (all of which are less than the size necessary to fetch an entire row) + * @throws Exception + */ + @Test + public void testPartialResultsWithColumnFilter() throws Exception { + testPartialResultsWithColumnFilter(new FirstKeyOnlyFilter()); + testPartialResultsWithColumnFilter(new ColumnPrefixFilter(Bytes.toBytes("testQualifier5"))); + testPartialResultsWithColumnFilter(new ColumnRangeFilter(Bytes.toBytes("testQualifer1"), true, + Bytes.toBytes("testQualifier7"), true)); + + Set qualifiers = new LinkedHashSet<>(); + qualifiers.add(Bytes.toBytes("testQualifier5")); + testPartialResultsWithColumnFilter(new FirstKeyValueMatchingQualifiersFilter(qualifiers)); + } + + public void testPartialResultsWithColumnFilter(Filter filter) throws Exception { + assertTrue(!filter.hasFilterRow()); + + Scan partialScan = new Scan(); + partialScan.setFilter(filter); + + Scan oneshotScan = new Scan(); + oneshotScan.setFilter(filter); + oneshotScan.setMaxResultSize(Long.MAX_VALUE); + + for (int i = 1; i <= NUM_COLS; i++) { + partialScan.setMaxResultSize(getResultSizeForNumberOfCells(i)); + testEquivalenceOfScanResults(TABLE, partialScan, oneshotScan); + } + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java index 30ad325da5e..23423fa1c3c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java @@ -38,13 +38,12 @@ import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.HBaseAdmin; -import org.apache.hadoop.hbase.client.HConnectionManager; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.coordination.ZkSplitLogWorkerCoordination; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.LoadBalancer; import org.apache.hadoop.hbase.master.balancer.SimpleLoadBalancer; @@ -238,7 +237,7 @@ public class TestZooKeeper { MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); HMaster m = cluster.getMaster(); m.abort("Test recovery from zk session expired", - new KeeperException.SessionExpiredException()); + new KeeperException.SessionExpiredException()); assertTrue(m.isStopped()); // Master doesn't recover any more testSanity("testMasterZKSessionRecoveryFailure"); } @@ -285,9 +284,9 @@ public class TestZooKeeper { // make sure they aren't the same ZooKeeperWatcher z1 = - getZooKeeperWatcher(HConnectionManager.getConnection(localMeta.getConfiguration())); + getZooKeeperWatcher(ConnectionFactory.createConnection(localMeta.getConfiguration())); ZooKeeperWatcher z2 = - getZooKeeperWatcher(HConnectionManager.getConnection(otherConf)); + getZooKeeperWatcher(ConnectionFactory.createConnection(otherConf)); assertFalse(z1 == z2); assertFalse(z1.getQuorum().equals(z2.getQuorum())); @@ -347,8 +346,8 @@ public class TestZooKeeper { @Test public void testClusterKey() throws Exception { - testKey("server", "2181", "hbase"); - testKey("server1,server2,server3", "2181", "hbase"); + testKey("server", 2181, "hbase"); + testKey("server1,server2,server3", 2181, "hbase"); try { ZKUtil.transformClusterKey("2181:hbase"); } catch (IOException ex) { @@ -356,20 +355,58 @@ public class TestZooKeeper { } } - private void testKey(String ensemble, String port, String znode) + @Test + public void testClusterKeyWithMultiplePorts() throws Exception { + // server has different port than the default port + testKey("server1:2182", 2181, "hbase", true); + // multiple servers have their own port + testKey("server1:2182,server2:2183,server3:2184", 2181, "hbase", true); + // one server has no specified port, should use default port + testKey("server1:2182,server2,server3:2184", 2181, "hbase", true); + // the last server has no specified port, should use default port + testKey("server1:2182,server2:2183,server3", 2181, "hbase", true); + // multiple servers have no specified port, should use default port for those servers + testKey("server1:2182,server2,server3:2184,server4", 2181, "hbase", true); + // same server, different ports + testKey("server1:2182,server1:2183,server1", 2181, "hbase", true); + // mix of same server/different port and different server + testKey("server1:2182,server2:2183,server1", 2181, "hbase", true); + } + + private void testKey(String ensemble, int port, String znode) + throws IOException { + testKey(ensemble, port, znode, false); // not support multiple client ports + } + + private void testKey(String ensemble, int port, String znode, Boolean multiplePortSupport) throws IOException { Configuration conf = new Configuration(); String key = ensemble+":"+port+":"+znode; - String[] parts = ZKUtil.transformClusterKey(key); - assertEquals(ensemble, parts[0]); - assertEquals(port, parts[1]); - assertEquals(znode, parts[2]); + String ensemble2 = null; + ZKUtil.ZKClusterKey zkClusterKey = ZKUtil.transformClusterKey(key); + if (multiplePortSupport) { + ensemble2 = ZKUtil.standardizeQuorumServerString(ensemble, Integer.toString(port)); + assertEquals(ensemble2, zkClusterKey.quorumString); + } + else { + assertEquals(ensemble, zkClusterKey.quorumString); + } + assertEquals(port, zkClusterKey.clientPort); + assertEquals(znode, zkClusterKey.znodeParent); + ZKUtil.applyClusterKeyToConf(conf, key); - assertEquals(parts[0], conf.get(HConstants.ZOOKEEPER_QUORUM)); - assertEquals(parts[1], conf.get(HConstants.ZOOKEEPER_CLIENT_PORT)); - assertEquals(parts[2], conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT)); + assertEquals(zkClusterKey.quorumString, conf.get(HConstants.ZOOKEEPER_QUORUM)); + assertEquals(zkClusterKey.clientPort, conf.getInt(HConstants.ZOOKEEPER_CLIENT_PORT, -1)); + assertEquals(zkClusterKey.znodeParent, conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT)); + String reconstructedKey = ZKUtil.getZooKeeperClusterKey(conf); - assertEquals(key, reconstructedKey); + if (multiplePortSupport) { + String key2 = ensemble2 + ":" + port + ":" + znode; + assertEquals(key2, reconstructedKey); + } + else { + assertEquals(key, reconstructedKey); + } } /** @@ -491,14 +528,12 @@ public class TestZooKeeper { cluster.startRegionServer(); cluster.waitForActiveAndReadyMaster(10000); HMaster m = cluster.getMaster(); - ZooKeeperWatcher zkw = m.getZooKeeper(); - int expectedNumOfListeners = zkw.getNumberOfListeners(); + final ZooKeeperWatcher zkw = m.getZooKeeper(); // now the cluster is up. So assign some regions. - Admin admin = TEST_UTIL.getHBaseAdmin(); - try { + try (Admin admin = TEST_UTIL.getHBaseAdmin()) { byte[][] SPLIT_KEYS = new byte[][] { Bytes.toBytes("a"), Bytes.toBytes("b"), - Bytes.toBytes("c"), Bytes.toBytes("d"), Bytes.toBytes("e"), Bytes.toBytes("f"), - Bytes.toBytes("g"), Bytes.toBytes("h"), Bytes.toBytes("i"), Bytes.toBytes("j") }; + Bytes.toBytes("c"), Bytes.toBytes("d"), Bytes.toBytes("e"), Bytes.toBytes("f"), + Bytes.toBytes("g"), Bytes.toBytes("h"), Bytes.toBytes("i"), Bytes.toBytes("j") }; String tableName = "testRegionAssignmentAfterMasterRecoveryDueToZKExpiry"; HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName)); htd.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)); @@ -506,8 +541,9 @@ public class TestZooKeeper { TEST_UTIL.waitUntilNoRegionsInTransition(60000); m.getZooKeeper().close(); MockLoadBalancer.retainAssignCalled = false; + final int expectedNumOfListeners = countPermanentListeners(zkw); m.abort("Test recovery from zk session expired", - new KeeperException.SessionExpiredException()); + new KeeperException.SessionExpiredException()); assertTrue(m.isStopped()); // Master doesn't recover any more // The recovered master should not call retainAssignment, as it is not a // clean startup. @@ -515,12 +551,39 @@ public class TestZooKeeper { // number of listeners should be same as the value before master aborted // wait for new master is initialized cluster.waitForActiveAndReadyMaster(120000); - assertEquals(expectedNumOfListeners, zkw.getNumberOfListeners()); - } finally { - admin.close(); + final HMaster newMaster = cluster.getMasterThread().getMaster(); + assertEquals(expectedNumOfListeners, countPermanentListeners(newMaster.getZooKeeper())); } } + /** + * Count listeners in zkw excluding listeners, that belongs to workers or other + * temporary processes. + */ + private int countPermanentListeners(ZooKeeperWatcher watcher) { + return countListeners(watcher, ZkSplitLogWorkerCoordination.class); + } + + /** + * Count listeners in zkw excluding provided classes + */ + private int countListeners(ZooKeeperWatcher watcher, Class... exclude) { + int cnt = 0; + for (Object o : watcher.getListeners()) { + boolean skip = false; + for (Class aClass : exclude) { + if (aClass.isAssignableFrom(o.getClass())) { + skip = true; + break; + } + } + if (!skip) { + cnt += 1; + } + } + return cnt; + } + /** * Tests whether the logs are split when master recovers from a expired zookeeper session and an * RS goes down. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java index 903ce0ec2eb..3fc7594675c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java @@ -42,8 +42,8 @@ import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.master.cleaner.HFileCleaner; import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy; import org.apache.hadoop.hbase.regionserver.HRegion; -import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.util.Bytes; @@ -231,7 +231,7 @@ public class TestHFileArchiving { List servingRegions = UTIL.getHBaseCluster().getRegions(TABLE_NAME); // make sure we only have 1 region serving this table assertEquals(1, servingRegions.size()); - HRegion region = servingRegions.get(0); + Region region = servingRegions.get(0); // get the parent RS and monitor HRegionServer hrs = UTIL.getRSForFirstRegionInTable(TABLE_NAME); @@ -242,7 +242,7 @@ public class TestHFileArchiving { UTIL.loadRegion(region, TEST_FAM); // get the hfiles in the region - List regions = hrs.getOnlineRegions(TABLE_NAME); + List regions = hrs.getOnlineRegions(TABLE_NAME); assertEquals("More that 1 region for test table.", 1, regions.size()); region = regions.get(0); @@ -257,7 +257,8 @@ public class TestHFileArchiving { clearArchiveDirectory(); // then get the current store files - List storeFiles = getRegionStoreFiles(region); + byte[][]columns = region.getTableDesc().getFamiliesKeys().toArray(new byte[0][]); + List storeFiles = region.getStoreFileList(columns); // then delete the table so the hfiles get archived UTIL.deleteTable(TABLE_NAME); @@ -310,7 +311,7 @@ public class TestHFileArchiving { List servingRegions = UTIL.getHBaseCluster().getRegions(TABLE_NAME); // make sure we only have 1 region serving this table assertEquals(1, servingRegions.size()); - HRegion region = servingRegions.get(0); + Region region = servingRegions.get(0); // get the parent RS and monitor HRegionServer hrs = UTIL.getRSForFirstRegionInTable(TABLE_NAME); @@ -321,7 +322,7 @@ public class TestHFileArchiving { UTIL.loadRegion(region, TEST_FAM); // get the hfiles in the region - List regions = hrs.getOnlineRegions(TABLE_NAME); + List regions = hrs.getOnlineRegions(TABLE_NAME); assertEquals("More that 1 region for test table.", 1, regions.size()); region = regions.get(0); @@ -336,7 +337,8 @@ public class TestHFileArchiving { clearArchiveDirectory(); // then get the current store files - List storeFiles = getRegionStoreFiles(region); + byte[][]columns = region.getTableDesc().getFamiliesKeys().toArray(new byte[0][]); + List storeFiles = region.getStoreFileList(columns); // then delete the table so the hfiles get archived UTIL.getHBaseAdmin().deleteColumn(TABLE_NAME, TEST_FAM); @@ -449,19 +451,4 @@ public class TestHFileArchiving { } return fileNames; } - - private List getRegionStoreFiles(final HRegion region) throws IOException { - Path regionDir = region.getRegionFileSystem().getRegionDir(); - FileSystem fs = region.getRegionFileSystem().getFileSystem(); - List storeFiles = getAllFileNames(fs, regionDir); - // remove all the non-storefile named files for the region - for (int i = 0; i < storeFiles.size(); i++) { - String file = storeFiles.get(i); - if (file.contains(HRegionFileSystem.REGION_INFO_FILE) || file.contains("wal")) { - storeFiles.remove(i--); - } - } - storeFiles.remove(HRegionFileSystem.REGION_INFO_FILE); - return storeFiles; - } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java index 772c345ccaa..eba3c0bd447 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java @@ -42,7 +42,7 @@ import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate; import org.apache.hadoop.hbase.master.cleaner.HFileCleaner; -import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MiscTests; @@ -170,7 +170,7 @@ public class TestZooKeeperTableArchiveClient { // create the region HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAM); - HRegion region = UTIL.createTestRegion(STRING_TABLE_NAME, hcd); + Region region = UTIL.createTestRegion(STRING_TABLE_NAME, hcd); loadFlushAndCompact(region, TEST_FAM); @@ -220,12 +220,12 @@ public class TestZooKeeperTableArchiveClient { // create the region HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAM); - HRegion region = UTIL.createTestRegion(STRING_TABLE_NAME, hcd); + Region region = UTIL.createTestRegion(STRING_TABLE_NAME, hcd); loadFlushAndCompact(region, TEST_FAM); // create the another table that we don't archive hcd = new HColumnDescriptor(TEST_FAM); - HRegion otherRegion = UTIL.createTestRegion(otherTable, hcd); + Region otherRegion = UTIL.createTestRegion(otherTable, hcd); loadFlushAndCompact(otherRegion, TEST_FAM); // get the current hfiles in the archive directory @@ -379,7 +379,7 @@ public class TestZooKeeperTableArchiveClient { return allFiles; } - private void loadFlushAndCompact(HRegion region, byte[] family) throws IOException { + private void loadFlushAndCompact(Region region, byte[] family) throws IOException { // create two hfiles in the region createHFileInRegion(region, family); createHFileInRegion(region, family); @@ -391,7 +391,7 @@ public class TestZooKeeperTableArchiveClient { // compact the two files into one file to get files in the archive LOG.debug("Compacting stores"); - region.compactStores(true); + region.compact(true); } /** @@ -400,13 +400,13 @@ public class TestZooKeeperTableArchiveClient { * @param columnFamily family for which to add data * @throws IOException */ - private void createHFileInRegion(HRegion region, byte[] columnFamily) throws IOException { + private void createHFileInRegion(Region region, byte[] columnFamily) throws IOException { // put one row in the region Put p = new Put(Bytes.toBytes("row")); p.add(columnFamily, Bytes.toBytes("Qual"), Bytes.toBytes("v1")); region.put(p); // flush the region to make a store file - region.flushcache(); + region.flush(true); } /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java index 998cdf0e49f..06fdd7f0bc8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java @@ -28,7 +28,6 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; -import org.apache.hadoop.hbase.client.ConnectionManager.HConnectionImplementation; import org.apache.hadoop.hbase.ipc.RpcControllerFactory; import org.mockito.Mockito; @@ -39,41 +38,31 @@ public class HConnectionTestingUtility { /* * Not part of {@link HBaseTestingUtility} because this class is not * in same package as {@link HConnection}. Would have to reveal ugly - * {@link HConnectionManager} innards to HBaseTestingUtility to give it access. + * {@link ConnectionImplementation} innards to HBaseTestingUtility to give it access. */ /** * Get a Mocked {@link HConnection} that goes with the passed conf * configuration instance. Minimally the mock will return * conf when {@link ClusterConnection#getConfiguration()} is invoked. * Be sure to shutdown the connection when done by calling - * {@link HConnectionManager#deleteConnection(Configuration)} else it - * will stick around; this is probably not what you want. + * {@link Connection#close()} else it will stick around; this is probably not what you want. * @param conf configuration * @return HConnection object for conf * @throws ZooKeeperConnectionException */ public static ClusterConnection getMockedConnection(final Configuration conf) throws ZooKeeperConnectionException { - HConnectionKey connectionKey = new HConnectionKey(conf); - synchronized (ConnectionManager.CONNECTION_INSTANCES) { - HConnectionImplementation connection = - ConnectionManager.CONNECTION_INSTANCES.get(connectionKey); - if (connection == null) { - connection = Mockito.mock(HConnectionImplementation.class); - Mockito.when(connection.getConfiguration()).thenReturn(conf); - ConnectionManager.CONNECTION_INSTANCES.put(connectionKey, connection); - } - return connection; - } + ConnectionImplementation connection = Mockito.mock(ConnectionImplementation.class); + Mockito.when(connection.getConfiguration()).thenReturn(conf); + return connection; } /** * Calls {@link #getMockedConnection(Configuration)} and then mocks a few * more of the popular {@link ClusterConnection} methods so they do 'normal' * operation (see return doc below for list). Be sure to shutdown the - * connection when done by calling - * {@link HConnectionManager#deleteConnection(Configuration)} else it - * will stick around; this is probably not what you want. + * connection when done by calling {@link Connection#close()} else it will stick around; + * this is probably not what you want. * * @param conf Configuration to use * @param admin An AdminProtocol; can be null but is usually @@ -92,8 +81,7 @@ public class HConnectionTestingUtility { * {@link ClusterConnection#getAdmin(ServerName)} is called, returns the passed * {@link ClientProtos.ClientService.BlockingInterface} instance when * {@link ClusterConnection#getClient(ServerName)} is called (Be sure to call - * {@link HConnectionManager#deleteConnection(Configuration)} - * when done with this mocked Connection. + * {@link Connection#close()} when done with this mocked Connection. * @throws IOException */ public static ClusterConnection getMockedConnectionAndDecorate(final Configuration conf, @@ -101,9 +89,8 @@ public class HConnectionTestingUtility { final ClientProtos.ClientService.BlockingInterface client, final ServerName sn, final HRegionInfo hri) throws IOException { - HConnectionImplementation c = Mockito.mock(HConnectionImplementation.class); + ConnectionImplementation c = Mockito.mock(ConnectionImplementation.class); Mockito.when(c.getConfiguration()).thenReturn(conf); - ConnectionManager.CONNECTION_INSTANCES.put(new HConnectionKey(conf), c); Mockito.doNothing().when(c).close(); // Make it so we return a particular location when asked. final HRegionLocation loc = new HRegionLocation(hri, sn); @@ -146,8 +133,7 @@ public class HConnectionTestingUtility { * Get a Mockito spied-upon {@link ClusterConnection} that goes with the passed * conf configuration instance. * Be sure to shutdown the connection when done by calling - * {@link HConnectionManager#deleteConnection(Configuration)} else it - * will stick around; this is probably not what you want. + * {@link Connection#close()} else it will stick around; this is probably not what you want. * @param conf configuration * @return HConnection object for conf * @throws ZooKeeperConnectionException @@ -156,38 +142,8 @@ public class HConnectionTestingUtility { */ public static ClusterConnection getSpiedConnection(final Configuration conf) throws IOException { - HConnectionKey connectionKey = new HConnectionKey(conf); - synchronized (ConnectionManager.CONNECTION_INSTANCES) { - HConnectionImplementation connection = - ConnectionManager.CONNECTION_INSTANCES.get(connectionKey); - if (connection == null) { - connection = Mockito.spy(new HConnectionImplementation(conf, true)); - ConnectionManager.CONNECTION_INSTANCES.put(connectionKey, connection); - } - return connection; - } - } - - public static ClusterConnection getSpiedClusterConnection(final Configuration conf) - throws IOException { - HConnectionKey connectionKey = new HConnectionKey(conf); - synchronized (ConnectionManager.CONNECTION_INSTANCES) { - HConnectionImplementation connection = - ConnectionManager.CONNECTION_INSTANCES.get(connectionKey); - if (connection == null) { - connection = Mockito.spy(new HConnectionImplementation(conf, true)); - ConnectionManager.CONNECTION_INSTANCES.put(connectionKey, connection); - } - return connection; - } - } - - /** - * @return Count of extant connection instances - */ - public static int getConnectionCount() { - synchronized (ConnectionManager.CONNECTION_INSTANCES) { - return ConnectionManager.CONNECTION_INSTANCES.size(); - } + ConnectionImplementation connection = + Mockito.spy(new ConnectionImplementation(conf, null, null)); + return connection; } } \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java index a352c4e7a8f..a1da4406edc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java @@ -24,7 +24,6 @@ import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import java.io.IOException; -import java.util.ArrayList; import java.util.List; import java.util.concurrent.atomic.AtomicInteger; @@ -53,6 +52,7 @@ import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; @@ -60,7 +60,6 @@ import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.wal.DefaultWALProvider; import org.junit.After; import org.junit.AfterClass; -import org.junit.Assert; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; @@ -545,11 +544,8 @@ public class TestAdmin2 { + DefaultWALProvider.getNumRolledLogFiles(regionServer.getWAL(null)) + " log files"); // flush all regions - - List regions = new ArrayList(regionServer - .getOnlineRegionsLocalContext()); - for (HRegion r : regions) { - r.flushcache(); + for (Region r : regionServer.getOnlineRegionsLocalContext()) { + r.flush(true); } admin.rollWALWriter(regionServer.getServerName()); int count = DefaultWALProvider.getNumRolledLogFiles(regionServer.getWAL(null)); @@ -632,20 +628,6 @@ public class TestAdmin2 { return regionServer; } - /** - * HBASE-4417 checkHBaseAvailable() doesn't close zk connections - */ - @Test (timeout=300000) - public void testCheckHBaseAvailableClosesConnection() throws Exception { - Configuration conf = TEST_UTIL.getConfiguration(); - - int initialCount = HConnectionTestingUtility.getConnectionCount(); - HBaseAdmin.checkHBaseAvailable(conf); - int finalCount = HConnectionTestingUtility.getConnectionCount(); - - Assert.assertEquals(initialCount, finalCount) ; - } - /** * Check that we have an exception if the cluster is not there. */ @@ -657,8 +639,6 @@ public class TestAdmin2 { conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, conf.getInt(HConstants.ZOOKEEPER_CLIENT_PORT, 9999)+10); - int initialCount = HConnectionTestingUtility.getConnectionCount(); - long start = System.currentTimeMillis(); try { HBaseAdmin.checkHBaseAvailable(conf); @@ -670,10 +650,6 @@ public class TestAdmin2 { } long end = System.currentTimeMillis(); - int finalCount = HConnectionTestingUtility.getConnectionCount(); - - Assert.assertEquals(initialCount, finalCount) ; - LOG.info("It took "+(end-start)+" ms to find out that" + " HBase was not available"); } @@ -726,4 +702,26 @@ public class TestAdmin2 { pair = rawAdmin.getRegion(region.getEncodedNameAsBytes()); assertTrue(Bytes.equals(regionName, pair.getFirst().getRegionName())); } + + @Test(timeout = 30000) + public void testBalancer() throws Exception { + boolean initialState = admin.isBalancerEnabled(); + + // Start the balancer, wait for it. + boolean prevState = admin.setBalancerRunning(!initialState, true); + + // The previous state should be the original state we observed + assertEquals(initialState, prevState); + + // Current state should be opposite of the original + assertEquals(!initialState, admin.isBalancerEnabled()); + + // Reset it back to what it was + prevState = admin.setBalancerRunning(initialState, true); + + // The previous state should be the opposite of the initial state + assertEquals(!initialState, prevState); + // Current state should be the original state again + assertEquals(initialState, admin.isBalancerEnabled()); + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCheckAndMutate.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCheckAndMutate.java index a8c4abd522f..ae8a4492a5b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCheckAndMutate.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCheckAndMutate.java @@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.client; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.filter.CompareFilter; +import org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; import org.junit.AfterClass; @@ -29,6 +30,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; @Category(MediumTests.class) public class TestCheckAndMutate { @@ -96,6 +98,18 @@ public class TestCheckAndMutate { Bytes.toString(result.getValue(family, Bytes.toBytes("B"))).equals("b")); assertTrue("Column C should not exist", result.getValue(family, Bytes.toBytes("C")) == null); + + //Test that we get a region level exception + try { + Put p = new Put(rowKey); + p.add(new byte[]{'b', 'o', 'g', 'u', 's'}, new byte[]{'A'}, new byte[0]); + rm = new RowMutations(rowKey); + rm.add(p); + table.checkAndMutate(rowKey, family, Bytes.toBytes("A"), CompareFilter.CompareOp.EQUAL, + Bytes.toBytes("a"), rm); + fail("Expected NoSuchColumnFamilyException"); + } catch(NoSuchColumnFamilyException e) { + } } finally { table.close(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientPushback.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientPushback.java index 82f62e44cfb..953f64159c7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientPushback.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientPushback.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.client.backoff.ServerStatistics; import org.apache.hadoop.hbase.client.coprocessor.Batch; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; @@ -91,7 +92,7 @@ public class TestClientPushback { HTable table = (HTable) conn.getTable(tablename); HRegionServer rs = UTIL.getHBaseCluster().getRegionServer(0); - HRegion region = rs.getOnlineRegions(tablename).get(0); + Region region = rs.getOnlineRegions(tablename).get(0); LOG.debug("Writing some data to "+tablename); // write some data @@ -101,7 +102,7 @@ public class TestClientPushback { table.flushCommits(); // get the current load on RS. Hopefully memstore isn't flushed since we wrote the the data - int load = (int)((region.addAndGetGlobalMemstoreSize(0) * 100) / flushSizeBytes); + int load = (int)((((HRegion)region).addAndGetGlobalMemstoreSize(0) * 100) / flushSizeBytes); LOG.debug("Done writing some data to "+tablename); // get the stats for the region hosting our table @@ -114,7 +115,7 @@ public class TestClientPushback { assertNotNull( "No stats configured for the client!", stats); // get the names so we can query the stats ServerName server = rs.getServerName(); - byte[] regionName = region.getRegionName(); + byte[] regionName = region.getRegionInfo().getRegionName(); // check to see we found some load on the memstore ServerStatistics serverStats = stats.getServerStatsForTesting(server); @@ -125,8 +126,8 @@ public class TestClientPushback { // check that the load reported produces a nonzero delay long backoffTime = backoffPolicy.getBackoffTime(server, regionName, serverStats); assertNotEquals("Reported load does not produce a backoff", backoffTime, 0); - LOG.debug("Backoff calculated for " + region.getRegionNameAsString() + " @ " + server + - " is " + backoffTime); + LOG.debug("Backoff calculated for " + region.getRegionInfo().getRegionNameAsString() + " @ " + + server + " is " + backoffTime); // Reach into the connection and submit work directly to AsyncProcess so we can // monitor how long the submission was delayed via a callback diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java index c77ab2978a8..e337ce25492 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java @@ -43,6 +43,7 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.atomic.AtomicReference; +import org.apache.log4j.Level; import org.apache.commons.lang.ArrayUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -82,6 +83,7 @@ import org.apache.hadoop.hbase.filter.WhileMatchFilter; import org.apache.hadoop.hbase.io.hfile.BlockCache; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; +import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto; @@ -91,6 +93,7 @@ import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MutateR import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException; +import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; @@ -98,6 +101,9 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.log4j.AppenderSkeleton; +import org.apache.log4j.Logger; +import org.apache.log4j.spi.LoggingEvent; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; @@ -285,96 +291,6 @@ public class TestFromClientSide { table.close(); } - /** - * @deprecated Tests deprecated functionality. Remove when we are past 1.0. - * @throws Exception - */ - @Deprecated - @Test - public void testSharedZooKeeper() throws Exception { - Configuration newConfig = new Configuration(TEST_UTIL.getConfiguration()); - newConfig.set(HConstants.HBASE_CLIENT_INSTANCE_ID, "12345"); - - // First with a simple ZKW - ZooKeeperWatcher z0 = new ZooKeeperWatcher( - newConfig, "hconnection", new Abortable() { - @Override public void abort(String why, Throwable e) {} - @Override public boolean isAborted() {return false;} - }); - z0.getRecoverableZooKeeper().getZooKeeper().exists("/oldZooKeeperWatcher", false); - z0.close(); - - // Then a ZooKeeperKeepAliveConnection - ConnectionManager.HConnectionImplementation connection1 = - (ConnectionManager.HConnectionImplementation) - HConnectionManager.getConnection(newConfig); - - ZooKeeperKeepAliveConnection z1 = connection1.getKeepAliveZooKeeperWatcher(); - z1.getRecoverableZooKeeper().getZooKeeper().exists("/z1", false); - - z1.close(); - - // will still work, because the real connection is not closed yet - // Not do be done in real code - z1.getRecoverableZooKeeper().getZooKeeper().exists("/z1afterclose", false); - - - ZooKeeperKeepAliveConnection z2 = connection1.getKeepAliveZooKeeperWatcher(); - assertTrue( - "ZooKeeperKeepAliveConnection equals on same connection", z1 == z2); - - - - Configuration newConfig2 = new Configuration(TEST_UTIL.getConfiguration()); - newConfig2.set(HConstants.HBASE_CLIENT_INSTANCE_ID, "6789"); - ConnectionManager.HConnectionImplementation connection2 = - (ConnectionManager.HConnectionImplementation) - HConnectionManager.getConnection(newConfig2); - - assertTrue("connections should be different ", connection1 != connection2); - - ZooKeeperKeepAliveConnection z3 = connection2.getKeepAliveZooKeeperWatcher(); - assertTrue( - "ZooKeeperKeepAliveConnection should be different" + - " on different connections", z1 != z3); - - // Bypass the private access - Method m = ConnectionManager.HConnectionImplementation.class. - getDeclaredMethod("closeZooKeeperWatcher"); - m.setAccessible(true); - m.invoke(connection2); - - ZooKeeperKeepAliveConnection z4 = connection2.getKeepAliveZooKeeperWatcher(); - assertTrue( - "ZooKeeperKeepAliveConnection should be recreated" + - " when previous connections was closed" - , z3 != z4); - - - z2.getRecoverableZooKeeper().getZooKeeper().exists("/z2", false); - z4.getRecoverableZooKeeper().getZooKeeper().exists("/z4", false); - - - HConnectionManager.deleteConnection(newConfig); - try { - z2.getRecoverableZooKeeper().getZooKeeper().exists("/z2", false); - assertTrue("We should not have a valid connection for z2", false); - } catch (Exception e){ - } - - z4.getRecoverableZooKeeper().getZooKeeper().exists("/z4", false); - // We expect success here. - - - HConnectionManager.deleteConnection(newConfig2); - try { - z4.getRecoverableZooKeeper().getZooKeeper().exists("/z4", false); - assertTrue("We should not have a valid connection for z4", false); - } catch (Exception e){ - } - } - - /** * Verifies that getConfiguration returns the same Configuration object used * to create the HTable instance. @@ -433,11 +349,6 @@ public class TestFromClientSide { 0, getNumberOfRows(keyPrefix2, value2, table)); assertEquals("Got back incorrect number of rows from scan: " + keyPrefix3, 0, getNumberOfRows(keyPrefix3, value2, table)); - ht.setScannerCaching(0); - assertEquals("Got back incorrect number of rows from scan", 0, - getNumberOfRows(keyPrefix1, value2, table)); ht.setScannerCaching(100); - assertEquals("Got back incorrect number of rows from scan", 0, - getNumberOfRows(keyPrefix2, value2, table)); } private void deleteColumns(Table ht, String value, String keyPrefix) @@ -4127,7 +4038,7 @@ public class TestFromClientSide { */ HTable createUnmangedHConnectionHTable(final TableName tableName) throws IOException { TEST_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY); - HConnection conn = HConnectionManager.createConnection(TEST_UTIL.getConfiguration()); + Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); return (HTable)conn.getTable(tableName); } @@ -4275,7 +4186,7 @@ public class TestFromClientSide { // set block size to 64 to making 2 kvs into one block, bypassing the walkForwardInSingleRow // in Store.rowAtOrBeforeFromStoreFile String regionName = table.getRegionLocations().firstKey().getEncodedName(); - HRegion region = + Region region = TEST_UTIL.getRSForFirstRegionInTable(tableAname).getFromOnlineRegions(regionName); Put put1 = new Put(firstRow); Put put2 = new Put(secondRow); @@ -4294,7 +4205,7 @@ public class TestFromClientSide { table.put(put2); table.put(put3); table.put(put4); - region.flushcache(); + region.flush(true); Result result = null; // Test before first that null is returned @@ -4432,6 +4343,17 @@ public class TestFromClientSide { r = t.get(g); assertEquals(0, Bytes.compareTo(VALUE, r.getValue(FAMILY, QUALIFIERS[1]))); assertNull(r.getValue(FAMILY, QUALIFIERS[0])); + + //Test that we get a region level exception + try { + arm = new RowMutations(ROW); + p = new Put(ROW); + p.add(new byte[]{'b', 'o', 'g', 'u', 's'}, QUALIFIERS[0], VALUE); + arm.add(p); + t.mutateRow(arm); + fail("Expected NoSuchColumnFamilyException"); + } catch(NoSuchColumnFamilyException e) { + } } @Test @@ -4606,6 +4528,49 @@ public class TestFromClientSide { assertIncrementKey(kvs[2], ROW, FAMILY, QUALIFIERS[2], 2); } + @Test + public void testIncrementOnSameColumn() throws Exception { + LOG.info("Starting testIncrementOnSameColumn"); + final byte[] TABLENAME = Bytes.toBytes("testIncrementOnSameColumn"); + HTable ht = TEST_UTIL.createTable(TABLENAME, FAMILY); + + byte[][] QUALIFIERS = + new byte[][] { Bytes.toBytes("A"), Bytes.toBytes("B"), Bytes.toBytes("C") }; + + Increment inc = new Increment(ROW); + for (int i = 0; i < QUALIFIERS.length; i++) { + inc.addColumn(FAMILY, QUALIFIERS[i], 1); + inc.addColumn(FAMILY, QUALIFIERS[i], 1); + } + ht.increment(inc); + + // Verify expected results + Result r = ht.get(new Get(ROW)); + Cell[] kvs = r.rawCells(); + assertEquals(3, kvs.length); + assertIncrementKey(kvs[0], ROW, FAMILY, QUALIFIERS[0], 1); + assertIncrementKey(kvs[1], ROW, FAMILY, QUALIFIERS[1], 1); + assertIncrementKey(kvs[2], ROW, FAMILY, QUALIFIERS[2], 1); + + // Now try multiple columns again + inc = new Increment(ROW); + for (int i = 0; i < QUALIFIERS.length; i++) { + inc.addColumn(FAMILY, QUALIFIERS[i], 1); + inc.addColumn(FAMILY, QUALIFIERS[i], 1); + } + ht.increment(inc); + + // Verify + r = ht.get(new Get(ROW)); + kvs = r.rawCells(); + assertEquals(3, kvs.length); + assertIncrementKey(kvs[0], ROW, FAMILY, QUALIFIERS[0], 2); + assertIncrementKey(kvs[1], ROW, FAMILY, QUALIFIERS[1], 2); + assertIncrementKey(kvs[2], ROW, FAMILY, QUALIFIERS[2], 2); + + ht.close(); + } + @Test public void testIncrement() throws Exception { LOG.info("Starting testIncrement"); @@ -5003,58 +4968,62 @@ public class TestFromClientSide { Scan scan1 = new Scan(); int numRecords = 0; - for(Result result : ht.getScanner(scan1)) { + ResultScanner scanner = ht.getScanner(scan1); + for(Result result : scanner) { numRecords++; } + scanner.close(); LOG.info("test data has " + numRecords + " records."); // by default, scan metrics collection is turned off - assertEquals(null, scan1.getAttribute(Scan.SCAN_ATTRIBUTES_METRICS_DATA)); + assertEquals(null, scan1.getScanMetrics()); // turn on scan metrics - Scan scan = new Scan(); - scan.setAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE, Bytes.toBytes(Boolean.TRUE)); - scan.setCaching(numRecords+1); - ResultScanner scanner = ht.getScanner(scan); + Scan scan2 = new Scan(); + scan2.setScanMetricsEnabled(true); + scan2.setCaching(numRecords+1); + scanner = ht.getScanner(scan2); for (Result result : scanner.next(numRecords - 1)) { } scanner.close(); // closing the scanner will set the metrics. - assertNotNull(scan.getAttribute(Scan.SCAN_ATTRIBUTES_METRICS_DATA)); + assertNotNull(scan2.getScanMetrics()); - // set caching to 1, becasue metrics are collected in each roundtrip only - scan = new Scan(); - scan.setAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE, Bytes.toBytes(Boolean.TRUE)); - scan.setCaching(1); - scanner = ht.getScanner(scan); + // set caching to 1, because metrics are collected in each roundtrip only + scan2 = new Scan(); + scan2.setScanMetricsEnabled(true); + scan2.setCaching(1); + scanner = ht.getScanner(scan2); // per HBASE-5717, this should still collect even if you don't run all the way to // the end of the scanner. So this is asking for 2 of the 3 rows we inserted. for (Result result : scanner.next(numRecords - 1)) { } scanner.close(); - ScanMetrics scanMetrics = getScanMetrics(scan); + ScanMetrics scanMetrics = scan2.getScanMetrics(); assertEquals("Did not access all the regions in the table", numOfRegions, scanMetrics.countOfRegions.get()); // now, test that the metrics are still collected even if you don't call close, but do // run past the end of all the records + /** There seems to be a timing issue here. Comment out for now. Fix when time. Scan scanWithoutClose = new Scan(); scanWithoutClose.setCaching(1); - scanWithoutClose.setAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE, Bytes.toBytes(Boolean.TRUE)); + scanWithoutClose.setScanMetricsEnabled(true); ResultScanner scannerWithoutClose = ht.getScanner(scanWithoutClose); for (Result result : scannerWithoutClose.next(numRecords + 1)) { } ScanMetrics scanMetricsWithoutClose = getScanMetrics(scanWithoutClose); assertEquals("Did not access all the regions in the table", numOfRegions, scanMetricsWithoutClose.countOfRegions.get()); + */ // finally, test that the metrics are collected correctly if you both run past all the records, // AND close the scanner Scan scanWithClose = new Scan(); // make sure we can set caching up to the number of a scanned values scanWithClose.setCaching(numRecords); - scanWithClose.setAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE, Bytes.toBytes(Boolean.TRUE)); + scanWithClose.setScanMetricsEnabled(true); ResultScanner scannerWithClose = ht.getScanner(scanWithClose); for (Result result : scannerWithClose.next(numRecords + 1)) { } @@ -5068,7 +5037,6 @@ public class TestFromClientSide { byte[] serializedMetrics = scan.getAttribute(Scan.SCAN_ATTRIBUTES_METRICS_DATA); assertTrue("Serialized metrics were not found.", serializedMetrics != null); - ScanMetrics scanMetrics = ProtobufUtil.toScanMetrics(serializedMetrics); return scanMetrics; @@ -5089,8 +5057,9 @@ public class TestFromClientSide { HTable table = TEST_UTIL.createTable(tableName, FAMILY); // get the block cache and region String regionName = table.getRegionLocations().firstKey().getEncodedName(); - HRegion region = TEST_UTIL.getRSForFirstRegionInTable(tableName).getFromOnlineRegions(regionName); - Store store = region.getStores().values().iterator().next(); + Region region = TEST_UTIL.getRSForFirstRegionInTable(tableName) + .getFromOnlineRegions(regionName); + Store store = region.getStores().iterator().next(); CacheConfig cacheConf = store.getCacheConfig(); cacheConf.setCacheDataOnWrite(true); cacheConf.setEvictOnClose(true); @@ -5125,7 +5094,7 @@ public class TestFromClientSide { assertEquals(startBlockMiss, cache.getStats().getMissCount()); // flush the data System.out.println("Flushing cache"); - region.flushcache(); + region.flush(true); // expect one more block in cache, no change in hits/misses long expectedBlockCount = startBlockCount + 1; long expectedBlockHits = startBlockHits; @@ -5152,7 +5121,7 @@ public class TestFromClientSide { assertEquals(expectedBlockMiss, cache.getStats().getMissCount()); // flush, one new block System.out.println("Flushing cache"); - region.flushcache(); + region.flush(true); assertEquals(++expectedBlockCount, cache.getBlockCount()); assertEquals(expectedBlockHits, cache.getStats().getHitCount()); assertEquals(expectedBlockMiss, cache.getStats().getMissCount()); @@ -5160,7 +5129,7 @@ public class TestFromClientSide { System.out.println("Compacting"); assertEquals(2, store.getStorefilesCount()); store.triggerMajorCompaction(); - region.compactStores(); + region.compact(true); waitForStoreFileCount(store, 1, 10000); // wait 10 seconds max assertEquals(1, store.getStorefilesCount()); expectedBlockCount -= 2; // evicted two blocks, cached none @@ -5209,10 +5178,10 @@ public class TestFromClientSide { // Verify region location before move. HRegionLocation addrCache = table.getRegionLocation(regionInfo.getStartKey(), false); HRegionLocation addrNoCache = table.getRegionLocation(regionInfo.getStartKey(), true); - + assertEquals(addrBefore.getPort(), addrCache.getPort()); assertEquals(addrBefore.getPort(), addrNoCache.getPort()); - + ServerName addrAfter = null; // Now move the region to a different server. for (int i = 0; i < SLAVES; i++) { @@ -5227,7 +5196,7 @@ public class TestFromClientSide { break; } } - + // Verify the region was moved. addrCache = table.getRegionLocation(regionInfo.getStartKey(), false); addrNoCache = table.getRegionLocation(regionInfo.getStartKey(), true); @@ -5474,8 +5443,44 @@ public class TestFromClientSide { // check the conf settings to disable sanity checks htd.setMemStoreFlushSize(0); + + // Check that logs warn on invalid table but allow it. + ListAppender listAppender = new ListAppender(); + Logger log = Logger.getLogger(HMaster.class); + log.addAppender(listAppender); + log.setLevel(Level.WARN); + htd.setConfiguration("hbase.table.sanity.checks", Boolean.FALSE.toString()); checkTableIsLegal(htd); + + assertFalse(listAppender.getMessages().isEmpty()); + assertTrue(listAppender.getMessages().get(0).startsWith("MEMSTORE_FLUSHSIZE for table " + + "descriptor or \"hbase.hregion.memstore.flush.size\" (0) is too small, which might " + + "cause very frequent flushing.")); + + log.removeAppender(listAppender); + } + + private static class ListAppender extends AppenderSkeleton { + private final List messages = new ArrayList(); + + @Override + protected void append(LoggingEvent event) { + messages.add(event.getMessage().toString()); + } + + @Override + public void close() { + } + + @Override + public boolean requiresLayout() { + return false; + } + + public List getMessages() { + return messages; + } } private void checkTableIsLegal(HTableDescriptor htd) throws IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java index 219496ffc52..9ed5be6d066 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java @@ -23,16 +23,12 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; import java.io.IOException; import java.lang.reflect.Field; import java.lang.reflect.Modifier; import java.net.SocketTimeoutException; -import java.util.ArrayList; -import java.util.HashMap; import java.util.List; -import java.util.Map; import java.util.Random; import java.util.concurrent.ExecutorService; import java.util.concurrent.SynchronousQueue; @@ -47,7 +43,6 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; @@ -56,7 +51,6 @@ import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Waiter; -import org.apache.hadoop.hbase.client.ConnectionManager.HConnectionImplementation; import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver; import org.apache.hadoop.hbase.coprocessor.ObserverContext; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; @@ -66,8 +60,8 @@ import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.filter.FilterBase; import org.apache.hadoop.hbase.ipc.RpcClient; import org.apache.hadoop.hbase.master.HMaster; -import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; @@ -75,8 +69,6 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.JVMClusterUtil; import org.apache.hadoop.hbase.util.ManualEnvironmentEdge; import org.apache.hadoop.hbase.util.Threads; -import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; -import org.jboss.netty.util.internal.DetectionUtil; import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; @@ -108,8 +100,6 @@ public class TestHCM { private static final byte[] ROW_X = Bytes.toBytes("xxx"); private static Random _randy = new Random(); - private static boolean isJavaOk = DetectionUtil.javaVersion() > 6; - /** * This copro sleeps 20 second. The first call it fails. The second time, it works. */ @@ -131,11 +121,7 @@ public class TestHCM { @BeforeClass public static void setUpBeforeClass() throws Exception { - TEST_UTIL.getConfiguration().setBoolean(HConstants.STATUS_PUBLISHED, - HConstants.STATUS_PUBLISHED_DEFAULT); - if (isJavaOk) { - TEST_UTIL.getConfiguration().setBoolean(HConstants.STATUS_PUBLISHED, true); - } + TEST_UTIL.getConfiguration().setBoolean(HConstants.STATUS_PUBLISHED, true); TEST_UTIL.startMiniCluster(2); } @@ -143,11 +129,6 @@ public class TestHCM { TEST_UTIL.shutdownMiniCluster(); } - - private static int getHConnectionManagerCacheSize(){ - return HConnectionTestingUtility.getConnectionCount(); - } - @Test public void testClusterConnection() throws IOException { ThreadPoolExecutor otherPool = new ThreadPoolExecutor(1, 1, @@ -155,26 +136,26 @@ public class TestHCM { new SynchronousQueue(), Threads.newDaemonThreadFactory("test-hcm")); - HConnection con1 = HConnectionManager.createConnection(TEST_UTIL.getConfiguration()); - HConnection con2 = HConnectionManager.createConnection(TEST_UTIL.getConfiguration(), otherPool); + Connection con1 = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); + Connection con2 = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration(), otherPool); // make sure the internally created ExecutorService is the one passed - assertTrue(otherPool == ((HConnectionImplementation)con2).getCurrentBatchPool()); + assertTrue(otherPool == ((ConnectionImplementation)con2).getCurrentBatchPool()); String tableName = "testClusterConnection"; TEST_UTIL.createTable(tableName.getBytes(), FAM_NAM).close(); - HTable t = (HTable)con1.getTable(tableName, otherPool); + HTable t = (HTable)con1.getTable(TableName.valueOf(tableName), otherPool); // make sure passing a pool to the getTable does not trigger creation of an internal pool - assertNull("Internal Thread pool should be null", ((HConnectionImplementation)con1).getCurrentBatchPool()); + assertNull("Internal Thread pool should be null", ((ConnectionImplementation)con1).getCurrentBatchPool()); // table should use the pool passed assertTrue(otherPool == t.getPool()); t.close(); - t = (HTable)con2.getTable(tableName); + t = (HTable)con2.getTable(TableName.valueOf(tableName)); // table should use the connectin's internal pool assertTrue(otherPool == t.getPool()); t.close(); - t = (HTable)con2.getTable(Bytes.toBytes(tableName)); + t = (HTable)con2.getTable(TableName.valueOf(tableName)); // try other API too assertTrue(otherPool == t.getPool()); t.close(); @@ -184,15 +165,15 @@ public class TestHCM { assertTrue(otherPool == t.getPool()); t.close(); - t = (HTable)con1.getTable(tableName); - ExecutorService pool = ((HConnectionImplementation)con1).getCurrentBatchPool(); + t = (HTable)con1.getTable(TableName.valueOf(tableName)); + ExecutorService pool = ((ConnectionImplementation)con1).getCurrentBatchPool(); // make sure an internal pool was created assertNotNull("An internal Thread pool should have been created", pool); // and that the table is using it assertTrue(t.getPool() == pool); t.close(); - t = (HTable)con1.getTable(tableName); + t = (HTable)con1.getTable(TableName.valueOf(tableName)); // still using the *same* internal pool assertTrue(t.getPool() == pool); t.close(); @@ -223,10 +204,6 @@ public class TestHCM { // Fails too often! Needs work. HBASE-12558 @Ignore @Test(expected = RegionServerStoppedException.class) public void testClusterStatus() throws Exception { - if (!isJavaOk){ - // This test requires jdk 1.7+ - throw new RegionServerStoppedException("as expected by the test..."); - } TableName tn = TableName.valueOf("testClusterStatus"); @@ -244,7 +221,7 @@ public class TestHCM { getRegionStates().isRegionsInTransition()){ Thread.sleep(1); } - final HConnectionImplementation hci = (HConnectionImplementation)t.getConnection(); + final ConnectionImplementation hci = (ConnectionImplementation)t.getConnection(); while (t.getRegionLocation(rk).getPort() != sn.getPort()){ TEST_UTIL.getHBaseAdmin().move(t.getRegionLocation(rk).getRegionInfo(). getEncodedNameAsBytes(), Bytes.toBytes(sn.toString())); @@ -388,8 +365,8 @@ public class TestHCM { }); ServerName sn = table.getRegionLocation(ROW).getServerName(); - ConnectionManager.HConnectionImplementation conn = - (ConnectionManager.HConnectionImplementation) table.getConnection(); + ConnectionImplementation conn = + (ConnectionImplementation) table.getConnection(); RpcClient rpcClient = conn.getRpcClient(); LOG.info("Going to cancel connections. connection=" + conn.toString() + ", sn=" + sn); @@ -479,10 +456,6 @@ public class TestHCM { */ @Test public void testConnectionCut() throws Exception { - if (!isJavaOk){ - // This test requires jdk 1.7+ - return; - } TableName tableName = TableName.valueOf("HCM-testConnectionCut"); @@ -502,7 +475,7 @@ public class TestHCM { p.add(FAM_NAM, FAM_NAM, FAM_NAM); table.put(p); - final HConnectionImplementation hci = (HConnectionImplementation)table.getConnection(); + final ConnectionImplementation hci = (ConnectionImplementation)table.getConnection(); final HRegionLocation loc = table.getRegionLocation(FAM_NAM); Get get = new Get(FAM_NAM); @@ -537,7 +510,6 @@ public class TestHCM { } finally { syncBlockingFilter.set(true); t.join(); - HConnectionManager.getConnection(c2).close(); TEST_UTIL.getHBaseAdmin().setBalancerRunning(previousBalance, true); } @@ -570,28 +542,6 @@ public class TestHCM { } } - @Test - public void abortingHConnectionRemovesItselfFromHCM() throws Exception { - // Save off current HConnections - Map oldHBaseInstances = - new HashMap(); - oldHBaseInstances.putAll(ConnectionManager.CONNECTION_INSTANCES); - - ConnectionManager.CONNECTION_INSTANCES.clear(); - - try { - HConnection connection = HConnectionManager.getConnection(TEST_UTIL.getConfiguration()); - connection.abort("test abortingHConnectionRemovesItselfFromHCM", new Exception( - "test abortingHConnectionRemovesItselfFromHCM")); - Assert.assertNotSame(connection, - HConnectionManager.getConnection(TEST_UTIL.getConfiguration())); - } finally { - // Put original HConnections back - ConnectionManager.CONNECTION_INSTANCES.clear(); - ConnectionManager.CONNECTION_INSTANCES.putAll(oldHBaseInstances); - } - } - /** * Test that when we delete a location using the first row of a region * that we really delete it. @@ -609,8 +559,8 @@ public class TestHCM { Put put = new Put(ROW); put.add(FAM_NAM, ROW, ROW); table.put(put); - ConnectionManager.HConnectionImplementation conn = - (ConnectionManager.HConnectionImplementation)table.getConnection(); + ConnectionImplementation conn = + (ConnectionImplementation)table.getConnection(); assertNotNull(conn.getCachedLocation(TABLE_NAME, ROW)); @@ -712,7 +662,7 @@ public class TestHCM { Assert.assertArrayEquals(e.getRow(0).getRow(), ROW); // Check that we unserialized the exception as expected - Throwable cause = ConnectionManager.findException(e.getCause(0)); + Throwable cause = ConnectionImplementation.findException(e.getCause(0)); Assert.assertNotNull(cause); Assert.assertTrue(cause instanceof RegionMovedException); } @@ -810,8 +760,8 @@ public class TestHCM { Put put = new Put(ROW); put.add(FAM_NAM, ROW, ROW); table.put(put); - ConnectionManager.HConnectionImplementation conn = - (ConnectionManager.HConnectionImplementation)table.getConnection(); + ConnectionImplementation conn = + (ConnectionImplementation)table.getConnection(); HRegionLocation location = conn.getCachedLocation(TABLE_NAME2, ROW).getRegionLocation(); assertNotNull(location); @@ -848,88 +798,6 @@ public class TestHCM { table.close(); } - /** - * Make sure that {@link Configuration} instances that are essentially the - * same map to the same {@link HConnection} instance. - */ - @Test - public void testConnectionSameness() throws Exception { - Connection previousConnection = null; - for (int i = 0; i < 2; i++) { - // set random key to differentiate the connection from previous ones - Configuration configuration = TEST_UTIL.getConfiguration(); - configuration.set("some_key", String.valueOf(_randy.nextInt())); - LOG.info("The hash code of the current configuration is: " - + configuration.hashCode()); - Connection currentConnection = HConnectionManager - .getConnection(configuration); - if (previousConnection != null) { - assertTrue( - "Did not get the same connection even though its key didn't change", - previousConnection == currentConnection); - } - previousConnection = currentConnection; - // change the configuration, so that it is no longer reachable from the - // client's perspective. However, since its part of the LRU doubly linked - // list, it will eventually get thrown out, at which time it should also - // close the corresponding {@link HConnection}. - configuration.set("other_key", String.valueOf(_randy.nextInt())); - } - } - - /** - * Makes sure that there is no leaking of - * {@link ConnectionManager.HConnectionImplementation} in the {@link HConnectionManager} - * class. - * @deprecated Tests deprecated functionality. Remove in 1.0. - */ - @Deprecated - @Test - public void testConnectionUniqueness() throws Exception { - int zkmaxconnections = TEST_UTIL.getConfiguration(). - getInt(HConstants.ZOOKEEPER_MAX_CLIENT_CNXNS, - HConstants.DEFAULT_ZOOKEPER_MAX_CLIENT_CNXNS); - // Test up to a max that is < the maximum number of zk connections. If we - // go above zk connections, we just fall into cycle where we are failing - // to set up a session and test runs for a long time. - int maxConnections = Math.min(zkmaxconnections - 1, 20); - List connections = new ArrayList(maxConnections); - Connection previousConnection = null; - try { - for (int i = 0; i < maxConnections; i++) { - // set random key to differentiate the connection from previous ones - Configuration configuration = new Configuration(TEST_UTIL.getConfiguration()); - configuration.set("some_key", String.valueOf(_randy.nextInt())); - configuration.set(HConstants.HBASE_CLIENT_INSTANCE_ID, - String.valueOf(_randy.nextInt())); - LOG.info("The hash code of the current configuration is: " - + configuration.hashCode()); - HConnection currentConnection = - HConnectionManager.getConnection(configuration); - if (previousConnection != null) { - assertTrue("Got the same connection even though its key changed!", - previousConnection != currentConnection); - } - // change the configuration, so that it is no longer reachable from the - // client's perspective. However, since its part of the LRU doubly linked - // list, it will eventually get thrown out, at which time it should also - // close the corresponding {@link HConnection}. - configuration.set("other_key", String.valueOf(_randy.nextInt())); - - previousConnection = currentConnection; - LOG.info("The current HConnectionManager#HBASE_INSTANCES cache size is: " - + getHConnectionManagerCacheSize()); - Thread.sleep(50); - connections.add(currentConnection); - } - } finally { - for (Connection c: connections) { - // Clean up connections made so we don't interfere w/ subsequent tests. - HConnectionManager.deleteConnection(c.getConfiguration()); - } - } - } - @Test public void testClosing() throws Exception { Configuration configuration = @@ -937,38 +805,26 @@ public class TestHCM { configuration.set(HConstants.HBASE_CLIENT_INSTANCE_ID, String.valueOf(_randy.nextInt())); + // as connection caching is going away, now we're just testing + // that closed connection does actually get closed. + Connection c1 = ConnectionFactory.createConnection(configuration); - // We create two connections with the same key. Connection c2 = ConnectionFactory.createConnection(configuration); + // no caching, different connections + assertTrue(c1 != c2); - Connection c3 = HConnectionManager.getConnection(configuration); - Connection c4 = HConnectionManager.getConnection(configuration); - assertTrue(c3 == c4); - + // closing independently c1.close(); assertTrue(c1.isClosed()); assertFalse(c2.isClosed()); - assertFalse(c3.isClosed()); - c3.close(); - // still a reference left - assertFalse(c3.isClosed()); - c3.close(); - assertTrue(c3.isClosed()); - // c3 was removed from the cache - Connection c5 = HConnectionManager.getConnection(configuration); - assertTrue(c5 != c3); - - assertFalse(c2.isClosed()); c2.close(); assertTrue(c2.isClosed()); - c5.close(); - assertTrue(c5.isClosed()); } /** * Trivial test to verify that nobody messes with - * {@link HConnectionManager#createConnection(Configuration)} + * {@link ConnectionFactory#createConnection(Configuration)} */ @Test public void testCreateConnection() throws Exception { @@ -978,13 +834,8 @@ public class TestHCM { // created from the same configuration, yet they are different assertTrue(c1 != c2); assertTrue(c1.getConfiguration() == c2.getConfiguration()); - // make sure these were not cached - Connection c3 = HConnectionManager.getConnection(configuration); - assertTrue(c1 != c3); - assertTrue(c2 != c3); } - /** * This test checks that one can connect to the cluster with only the * ZooKeeper quorum set. Other stuff like master address will be read @@ -996,16 +847,16 @@ public class TestHCM { Configuration c = new Configuration(); c.set(HConstants.ZOOKEEPER_QUORUM, TEST_UTIL.getConfiguration().get(HConstants.ZOOKEEPER_QUORUM)); - c.set(HConstants.ZOOKEEPER_CLIENT_PORT , + c.set(HConstants.ZOOKEEPER_CLIENT_PORT, TEST_UTIL.getConfiguration().get(HConstants.ZOOKEEPER_CLIENT_PORT)); // This should be enough to connect - HConnection conn = HConnectionManager.getConnection(c); - assertTrue( conn.isMasterRunning() ); + HConnection conn = (HConnection) ConnectionFactory.createConnection(c); + assertTrue(conn.isMasterRunning()); conn.close(); } - private int setNumTries(HConnectionImplementation hci, int newVal) throws Exception { + private int setNumTries(ConnectionImplementation hci, int newVal) throws Exception { Field numTries = hci.getClass().getDeclaredField("numTries"); numTries.setAccessible(true); Field modifiersField = Field.class.getDeclaredField("modifiers"); @@ -1021,8 +872,8 @@ public class TestHCM { public void testMulti() throws Exception { HTable table = TEST_UTIL.createMultiRegionTable(TABLE_NAME3, FAM_NAM); try { - ConnectionManager.HConnectionImplementation conn = - ( ConnectionManager.HConnectionImplementation)table.getConnection(); + ConnectionImplementation conn = + (ConnectionImplementation)table.getConnection(); // We're now going to move the region and check that it works for the client // First a new put to add the location in the cache @@ -1056,9 +907,9 @@ public class TestHCM { ServerName destServerName = destServer.getServerName(); //find another row in the cur server that is less than ROW_X - List regions = curServer.getOnlineRegions(TABLE_NAME3); + List regions = curServer.getOnlineRegions(TABLE_NAME3); byte[] otherRow = null; - for (HRegion region : regions) { + for (Region region : regions) { if (!region.getRegionInfo().getEncodedName().equals(toMove.getRegionInfo().getEncodedName()) && Bytes.BYTES_COMPARATOR.compare(region.getRegionInfo().getStartKey(), ROW_X) < 0) { otherRow = region.getRegionInfo().getStartKey(); @@ -1141,8 +992,8 @@ public class TestHCM { try { long timeBase = timeMachine.currentTime(); long largeAmountOfTime = ANY_PAUSE * 1000; - ConnectionManager.ServerErrorTracker tracker = - new ConnectionManager.ServerErrorTracker(largeAmountOfTime, 100); + ConnectionImplementation.ServerErrorTracker tracker = + new ConnectionImplementation.ServerErrorTracker(largeAmountOfTime, 100); // The default backoff is 0. assertEquals(0, tracker.calculateBackoffTime(location, ANY_PAUSE)); @@ -1194,86 +1045,7 @@ public class TestHCM { private static void assertEqualsWithJitter(long expected, long actual, long jitterBase) { assertTrue("Value not within jitter: " + expected + " vs " + actual, - Math.abs(actual - expected) <= (0.01f * jitterBase)); - } - - /** - * Tests that a destroyed connection does not have a live zookeeper. - * Below is timing based. We put up a connection to a table and then close the connection while - * having a background thread running that is forcing close of the connection to try and - * provoke a close catastrophe; we are hoping for a car crash so we can see if we are leaking - * zk connections. - * @throws Exception - */ - @Ignore ("Flakey test: See HBASE-8996")@Test - public void testDeleteForZKConnLeak() throws Exception { - TEST_UTIL.createTable(TABLE_NAME4, FAM_NAM); - final Configuration config = HBaseConfiguration.create(TEST_UTIL.getConfiguration()); - config.setInt("zookeeper.recovery.retry", 1); - config.setInt("zookeeper.recovery.retry.intervalmill", 1000); - config.setInt("hbase.rpc.timeout", 2000); - config.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1); - - ThreadPoolExecutor pool = new ThreadPoolExecutor(1, 10, - 5, TimeUnit.SECONDS, - new SynchronousQueue(), - Threads.newDaemonThreadFactory("test-hcm-delete")); - - pool.submit(new Runnable() { - @Override - public void run() { - while (!Thread.interrupted()) { - try { - HConnection conn = HConnectionManager.getConnection(config); - LOG.info("Connection " + conn); - HConnectionManager.deleteStaleConnection(conn); - LOG.info("Connection closed " + conn); - // TODO: This sleep time should be less than the time that it takes to open and close - // a table. Ideally we would do a few runs first to measure. For now this is - // timing based; hopefully we hit the bad condition. - Threads.sleep(10); - } catch (Exception e) { - } - } - } - }); - - // Use connection multiple times. - for (int i = 0; i < 30; i++) { - Connection c1 = null; - try { - c1 = ConnectionManager.getConnectionInternal(config); - LOG.info("HTable connection " + i + " " + c1); - Table table = c1.getTable(TABLE_NAME4, pool); - table.close(); - LOG.info("HTable connection " + i + " closed " + c1); - } catch (Exception e) { - LOG.info("We actually want this to happen!!!! So we can see if we are leaking zk", e); - } finally { - if (c1 != null) { - if (c1.isClosed()) { - // cannot use getZooKeeper as method instantiates watcher if null - Field zkwField = c1.getClass().getDeclaredField("keepAliveZookeeper"); - zkwField.setAccessible(true); - Object watcher = zkwField.get(c1); - - if (watcher != null) { - if (((ZooKeeperWatcher)watcher).getRecoverableZooKeeper().getState().isAlive()) { - // non-synchronized access to watcher; sleep and check again in case zk connection - // hasn't been cleaned up yet. - Thread.sleep(1000); - if (((ZooKeeperWatcher) watcher).getRecoverableZooKeeper().getState().isAlive()) { - pool.shutdownNow(); - fail("Live zookeeper in closed connection"); - } - } - } - } - c1.close(); - } - } - } - pool.shutdownNow(); + Math.abs(actual - expected) <= (0.01f * jitterBase)); } @Test(timeout = 60000) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHTableMultiplexerFlushCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHTableMultiplexerFlushCache.java index 2898369078b..f6c73ca9bdb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHTableMultiplexerFlushCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHTableMultiplexerFlushCache.java @@ -19,15 +19,13 @@ */ package org.apache.hadoop.hbase.client; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; @@ -36,6 +34,8 @@ import org.junit.BeforeClass; import org.junit.Test; import org.junit.experimental.categories.Category; +import static org.junit.Assert.assertTrue; + @Category({ LargeTests.class, ClientTests.class }) public class TestHTableMultiplexerFlushCache { final Log LOG = LogFactory.getLog(getClass()); @@ -64,21 +64,22 @@ public class TestHTableMultiplexerFlushCache { TEST_UTIL.shutdownMiniCluster(); } - private static void checkExistence(HTable htable, byte[] row, byte[] family, byte[] quality, - byte[] value) throws Exception { + private static void checkExistence(final HTable htable, final byte[] row, final byte[] family, + final byte[] quality, + final byte[] value) throws Exception { // verify that the Get returns the correct result - Result r; - Get get = new Get(row); - get.addColumn(family, quality); - int nbTry = 0; - do { - assertTrue("Fail to get from " + htable.getName() + " after " + nbTry + " tries", nbTry < 50); - nbTry++; - Thread.sleep(100); - r = htable.get(get); - } while (r == null || r.getValue(family, quality) == null); - assertEquals("value", Bytes.toStringBinary(value), - Bytes.toStringBinary(r.getValue(family, quality))); + TEST_UTIL.waitFor(30000, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + Result r; + Get get = new Get(row); + get.addColumn(family, quality); + r = htable.get(get); + return r != null && r.getValue(family, quality) != null + && Bytes.toStringBinary(value).equals( + Bytes.toStringBinary(r.getValue(family, quality))); + } + }); } @Test diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaScanner.java deleted file mode 100644 index e195baf31ce..00000000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaScanner.java +++ /dev/null @@ -1,243 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.client; - -import static org.mockito.Matchers.anyObject; -import static org.mockito.Mockito.doReturn; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.reset; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; - -import java.math.BigDecimal; -import java.util.List; -import java.util.NavigableMap; -import java.util.Random; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.MetaTableAccessor; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.testclassification.ClientTests; -import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.StoppableImplementation; -import org.apache.hadoop.hbase.util.Threads; -import org.apache.hadoop.util.StringUtils; -import org.junit.After; -import org.junit.Assert; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -@Category({MediumTests.class, ClientTests.class}) -public class TestMetaScanner { - final Log LOG = LogFactory.getLog(getClass()); - private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - private Connection connection; - - public void setUp() throws Exception { - TEST_UTIL.startMiniCluster(1); - this.connection = TEST_UTIL.getConnection(); - } - - @After - public void tearDown() throws Exception { - TEST_UTIL.shutdownMiniCluster(); - } - - @Test - public void testMetaScanner() throws Exception { - LOG.info("Starting testMetaScanner"); - - setUp(); - final TableName TABLENAME = TableName.valueOf("testMetaScanner"); - final byte[] FAMILY = Bytes.toBytes("family"); - final byte[][] SPLIT_KEYS = - new byte[][] { Bytes.toBytes("region_a"), Bytes.toBytes("region_b") }; - - TEST_UTIL.createTable(TABLENAME, FAMILY, SPLIT_KEYS); - HTable table = (HTable) connection.getTable(TABLENAME); - // Make sure all the regions are deployed - TEST_UTIL.countRows(table); - - MetaScanner.MetaScannerVisitor visitor = - mock(MetaScanner.MetaScannerVisitor.class); - doReturn(true).when(visitor).processRow((Result)anyObject()); - - // Scanning the entire table should give us three rows - MetaScanner.metaScan(connection, visitor, TABLENAME); - verify(visitor, times(3)).processRow((Result)anyObject()); - - // Scanning the table with a specified empty start row should also - // give us three hbase:meta rows - reset(visitor); - doReturn(true).when(visitor).processRow((Result)anyObject()); - MetaScanner.metaScan(connection, visitor, TABLENAME, HConstants.EMPTY_BYTE_ARRAY, 1000); - verify(visitor, times(3)).processRow((Result)anyObject()); - - // Scanning the table starting in the middle should give us two rows: - // region_a and region_b - reset(visitor); - doReturn(true).when(visitor).processRow((Result)anyObject()); - MetaScanner.metaScan(connection, visitor, TABLENAME, Bytes.toBytes("region_ac"), 1000); - verify(visitor, times(2)).processRow((Result)anyObject()); - - // Scanning with a limit of 1 should only give us one row - reset(visitor); - doReturn(true).when(visitor).processRow((Result) anyObject()); - MetaScanner.metaScan(connection, visitor, TABLENAME, Bytes.toBytes("region_ac"), 1); - verify(visitor, times(1)).processRow((Result) anyObject()); - table.close(); - } - - @Test - public void testConcurrentMetaScannerAndCatalogJanitor() throws Throwable { - /* TEST PLAN: start with only one region in a table. Have a splitter - * thread and metascanner threads that continously scan the meta table for regions. - * CatalogJanitor from master will run frequently to clean things up - */ - TEST_UTIL.getConfiguration().setLong("hbase.catalogjanitor.interval", 500); - setUp(); - - final long runtime = 30 * 1000; //30 sec - LOG.info("Starting testConcurrentMetaScannerAndCatalogJanitor"); - final TableName TABLENAME = - TableName.valueOf("testConcurrentMetaScannerAndCatalogJanitor"); - final byte[] FAMILY = Bytes.toBytes("family"); - TEST_UTIL.createTable(TABLENAME, FAMILY); - - class RegionMetaSplitter extends StoppableImplementation implements Runnable { - Random random = new Random(); - Throwable ex = null; - @Override - public void run() { - while (!isStopped()) { - try { - List regions = MetaScanner.listAllRegions(TEST_UTIL.getConfiguration(), - connection, false); - - //select a random region - HRegionInfo parent = regions.get(random.nextInt(regions.size())); - if (parent == null || !TABLENAME.equals(parent.getTable())) { - continue; - } - - long startKey = 0, endKey = Long.MAX_VALUE; - byte[] start = parent.getStartKey(); - byte[] end = parent.getEndKey(); - if (!Bytes.equals(HConstants.EMPTY_START_ROW, parent.getStartKey())) { - startKey = Bytes.toLong(parent.getStartKey()); - } - if (!Bytes.equals(HConstants.EMPTY_END_ROW, parent.getEndKey())) { - endKey = Bytes.toLong(parent.getEndKey()); - } - if (startKey == endKey) { - continue; - } - - long midKey = BigDecimal.valueOf(startKey).add(BigDecimal.valueOf(endKey)) - .divideToIntegralValue(BigDecimal.valueOf(2)).longValue(); - - HRegionInfo splita = new HRegionInfo(TABLENAME, - start, - Bytes.toBytes(midKey)); - HRegionInfo splitb = new HRegionInfo(TABLENAME, - Bytes.toBytes(midKey), - end); - - MetaTableAccessor.splitRegion(connection, - parent, splita, splitb, ServerName.valueOf("fooserver", 1, 0), 1); - - Threads.sleep(random.nextInt(200)); - } catch (Throwable e) { - ex = e; - Assert.fail(StringUtils.stringifyException(e)); - } - } - } - void rethrowExceptionIfAny() throws Throwable { - if (ex != null) { throw ex; } - } - } - - class MetaScannerVerifier extends StoppableImplementation implements Runnable { - Random random = new Random(); - Throwable ex = null; - @Override - public void run() { - while(!isStopped()) { - try { - NavigableMap regions = - MetaScanner.allTableRegions(connection, TABLENAME); - - LOG.info("-------"); - byte[] lastEndKey = HConstants.EMPTY_START_ROW; - for (HRegionInfo hri: regions.navigableKeySet()) { - long startKey = 0, endKey = Long.MAX_VALUE; - if (!Bytes.equals(HConstants.EMPTY_START_ROW, hri.getStartKey())) { - startKey = Bytes.toLong(hri.getStartKey()); - } - if (!Bytes.equals(HConstants.EMPTY_END_ROW, hri.getEndKey())) { - endKey = Bytes.toLong(hri.getEndKey()); - } - LOG.info("start:" + startKey + " end:" + endKey + " hri:" + hri); - Assert.assertTrue("lastEndKey=" + Bytes.toString(lastEndKey) + ", startKey=" + - Bytes.toString(hri.getStartKey()), Bytes.equals(lastEndKey, hri.getStartKey())); - lastEndKey = hri.getEndKey(); - } - Assert.assertTrue(Bytes.equals(lastEndKey, HConstants.EMPTY_END_ROW)); - LOG.info("-------"); - Threads.sleep(10 + random.nextInt(50)); - } catch (Throwable e) { - ex = e; - Assert.fail(StringUtils.stringifyException(e)); - } - } - } - void rethrowExceptionIfAny() throws Throwable { - if (ex != null) { throw ex; } - } - } - - RegionMetaSplitter regionMetaSplitter = new RegionMetaSplitter(); - MetaScannerVerifier metaScannerVerifier = new MetaScannerVerifier(); - - Thread regionMetaSplitterThread = new Thread(regionMetaSplitter); - Thread metaScannerVerifierThread = new Thread(metaScannerVerifier); - - regionMetaSplitterThread.start(); - metaScannerVerifierThread.start(); - - Threads.sleep(runtime); - - regionMetaSplitter.stop("test finished"); - metaScannerVerifier.stop("test finished"); - - regionMetaSplitterThread.join(); - metaScannerVerifierThread.join(); - - regionMetaSplitter.rethrowExceptionIfAny(); - metaScannerVerifier.rethrowExceptionIfAny(); - } - -} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java index 8e60353c387..76516862bca 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java @@ -19,10 +19,12 @@ package org.apache.hadoop.hbase.client; import javax.annotation.Nullable; + import java.io.IOException; import java.util.Arrays; import java.util.Collection; import java.util.List; +import java.util.concurrent.ExecutorService; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -73,7 +75,7 @@ public class TestMetaWithReplicas { TEST_UTIL.getConfiguration().setInt(HConstants.META_REPLICAS_NUM, 3); TEST_UTIL.getConfiguration().setInt( StorefileRefresherChore.REGIONSERVER_STOREFILE_REFRESH_PERIOD, 1000); - TEST_UTIL.startMiniCluster(3); + TEST_UTIL.startMiniCluster(3); // disable the balancer LoadBalancerTracker l = new LoadBalancerTracker(TEST_UTIL.getZooKeeperWatcher(), new Abortable() { @@ -84,7 +86,7 @@ public class TestMetaWithReplicas { } @Override public void abort(String why, Throwable e) { - aborted = true; + aborted = true; } }); l.setBalancerOn(false); @@ -106,7 +108,7 @@ public class TestMetaWithReplicas { assertTrue(TEST_UTIL.getHBaseAdmin().getTableDescriptor(TableName.META_TABLE_NAME) .getRegionReplication() == 3); } - + @Test public void testZookeeperNodesForReplicas() throws Exception { // Checks all the znodes exist when meta's replicas are enabled @@ -220,6 +222,23 @@ public class TestMetaWithReplicas { assertTrue(Arrays.equals(r.getRow(), row)); } + @Test + public void testMetaLookupThreadPoolCreated() throws Exception { + byte[] TABLE = Bytes.toBytes("testMetaLookupThreadPoolCreated"); + byte[][] FAMILIES = new byte[][] { Bytes.toBytes("foo") }; + if (TEST_UTIL.getHBaseAdmin().tableExists(TABLE)) { + TEST_UTIL.getHBaseAdmin().disableTable(TABLE); + TEST_UTIL.getHBaseAdmin().deleteTable(TABLE); + } + Table htable = TEST_UTIL.createTable(TABLE, FAMILIES, TEST_UTIL.getConfiguration()); + byte[] row = "test".getBytes(); + ConnectionImplementation c = ((ConnectionImplementation)((HTable)htable).connection); + // check that metalookup pool would get created + c.relocateRegion(TABLE, row); + ExecutorService ex = c.getCurrentMetaLookupPool(); + assert(ex != null); + } + @Test public void testChangingReplicaCount() throws Exception { // tests changing the replica count across master restarts @@ -390,7 +409,9 @@ public class TestMetaWithReplicas { public void testShutdownOfReplicaHolder() throws Exception { // checks that the when the server holding meta replica is shut down, the meta replica // can be recovered - RegionLocations rl = ConnectionManager.getConnectionInternal(TEST_UTIL.getConfiguration()). + ClusterConnection conn = (ClusterConnection) + ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); + RegionLocations rl = conn. locateRegion(TableName.META_TABLE_NAME, Bytes.toBytes(""), false, true); HRegionLocation hrl = rl.getRegionLocation(1); ServerName oldServer = hrl.getServerName(); @@ -399,12 +420,12 @@ public class TestMetaWithReplicas { do { LOG.debug("Waiting for the replica " + hrl.getRegionInfo() + " to come up"); Thread.sleep(30000); //wait for the detection/recovery - rl = ConnectionManager.getConnectionInternal(TEST_UTIL.getConfiguration()). - locateRegion(TableName.META_TABLE_NAME, Bytes.toBytes(""), false, true); + rl = conn.locateRegion(TableName.META_TABLE_NAME, Bytes.toBytes(""), false, true); hrl = rl.getRegionLocation(1); i++; } while ((hrl == null || hrl.getServerName().equals(oldServer)) && i < 3); assertTrue(i != 3); + conn.close(); } @Test diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobCloneSnapshotFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobCloneSnapshotFromClient.java index 27d53ba27ca..60fc0ff6e0c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobCloneSnapshotFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobCloneSnapshotFromClient.java @@ -103,7 +103,8 @@ public class TestMobCloneSnapshotFromClient { // take an empty snapshot admin.snapshot(emptySnapshot, tableName); - HTable table = new HTable(TEST_UTIL.getConfiguration(), tableName); + Connection c = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); + Table table = c.getTable(tableName); try { // enable table and insert data admin.enableTable(tableName); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobRestoreSnapshotFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobRestoreSnapshotFromClient.java index 0bb498d734a..6fc2d28f3ae 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobRestoreSnapshotFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobRestoreSnapshotFromClient.java @@ -115,7 +115,8 @@ public class TestMobRestoreSnapshotFromClient { // take an empty snapshot admin.snapshot(emptySnapshot, tableName); - HTable table = new HTable(TEST_UTIL.getConfiguration(), tableName); + Table table = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()) + .getTable(tableName); // enable table and insert data admin.enableTable(tableName); SnapshotTestingUtils.loadData(TEST_UTIL, tableName, 500, FAMILY); @@ -179,7 +180,8 @@ public class TestMobRestoreSnapshotFromClient { public void testRestoreSchemaChange() throws Exception { byte[] TEST_FAMILY2 = Bytes.toBytes("cf2"); - HTable table = new HTable(TEST_UTIL.getConfiguration(), tableName); + Table table = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()) + .getTable(tableName); // Add one column family and put some data in it admin.disableTable(tableName); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobSnapshotCloneIndependence.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobSnapshotCloneIndependence.java index 612b98a6543..a2cd51c1245 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobSnapshotCloneIndependence.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobSnapshotCloneIndependence.java @@ -196,7 +196,7 @@ public class TestMobSnapshotCloneIndependence { final TableName localTableName = TableName.valueOf(STRING_TABLE_NAME + startTime); - HTable original = MobSnapshotTestingUtils.createMobTable(UTIL, localTableName, TEST_FAM); + Table original = MobSnapshotTestingUtils.createMobTable(UTIL, localTableName, TEST_FAM); try { SnapshotTestingUtils.loadData(UTIL, localTableName, 500, TEST_FAM); @@ -215,7 +215,8 @@ public class TestMobSnapshotCloneIndependence { TableName cloneTableName = TableName.valueOf("test-clone-" + localTableName); admin.cloneSnapshot(snapshotName, cloneTableName); - HTable clonedTable = new HTable(UTIL.getConfiguration(), cloneTableName); + Table clonedTable = ConnectionFactory.createConnection(UTIL.getConfiguration()) + .getTable(cloneTableName); try { final int clonedTableRowCount = MobSnapshotTestingUtils.countMobRows(clonedTable); @@ -230,7 +231,6 @@ public class TestMobSnapshotCloneIndependence { Put p = new Put(Bytes.toBytes(rowKey)); p.add(TEST_FAM, Bytes.toBytes("someQualifier"), Bytes.toBytes("someString")); original.put(p); - original.flushCommits(); // Verify that it is not present in the original table Assert.assertEquals("The row count of the original table was not modified by the put", @@ -240,9 +240,8 @@ public class TestMobSnapshotCloneIndependence { clonedTableRowCount, MobSnapshotTestingUtils.countMobRows(clonedTable)); p = new Put(Bytes.toBytes(rowKey)); - p.add(TEST_FAM, Bytes.toBytes("someQualifier"), Bytes.toBytes("someString")); + p.addColumn(TEST_FAM, Bytes.toBytes("someQualifier"), Bytes.toBytes("someString")); clonedTable.put(p); - clonedTable.flushCommits(); // Verify that the new family is not in the restored table's description Assert.assertEquals( @@ -273,7 +272,7 @@ public class TestMobSnapshotCloneIndependence { final long startTime = System.currentTimeMillis(); final TableName localTableName = TableName.valueOf(STRING_TABLE_NAME + startTime); - HTable original = MobSnapshotTestingUtils.createMobTable(UTIL, localTableName, TEST_FAM); + Table original = MobSnapshotTestingUtils.createMobTable(UTIL, localTableName, TEST_FAM); SnapshotTestingUtils.loadData(UTIL, localTableName, 500, TEST_FAM); final int loadedTableCount = MobSnapshotTestingUtils.countMobRows(original); System.out.println("Original table has: " + loadedTableCount + " rows"); @@ -295,7 +294,7 @@ public class TestMobSnapshotCloneIndependence { admin.cloneSnapshot(snapshotName, cloneTableName); // Verify that region information is the same pre-split - original.clearRegionCache(); + ((HTable)original).clearRegionCache(); List originalTableHRegions = admin.getTableRegions(localTableName); final int originalRegionCount = originalTableHRegions.size(); @@ -306,7 +305,7 @@ public class TestMobSnapshotCloneIndependence { // Split a region on the parent table admin.splitRegion(originalTableHRegions.get(0).getRegionName()); - waitOnSplit(original, originalRegionCount); + waitOnSplit((HTable)original, originalRegionCount); // Verify that the cloned table region is not split final int cloneTableRegionCount2 = admin.getTableRegions(cloneTableName).size(); @@ -329,7 +328,7 @@ public class TestMobSnapshotCloneIndependence { final long startTime = System.currentTimeMillis(); final TableName localTableName = TableName.valueOf(STRING_TABLE_NAME + startTime); - HTable original = MobSnapshotTestingUtils.createMobTable(UTIL, localTableName, TEST_FAM); + Table original = MobSnapshotTestingUtils.createMobTable(UTIL, localTableName, TEST_FAM); SnapshotTestingUtils.loadData(UTIL, localTableName, 500, TEST_FAM); final String snapshotNameAsString = "snapshot_" + localTableName; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobSnapshotFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobSnapshotFromClient.java index 5bf5a30a694..b4c3aca04f0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobSnapshotFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobSnapshotFromClient.java @@ -149,7 +149,7 @@ public class TestMobSnapshotFromClient { SnapshotTestingUtils.assertNoSnapshots(admin); // put some stuff in the table - HTable table = new HTable(UTIL.getConfiguration(), TABLE_NAME); + Table table = ConnectionFactory.createConnection(UTIL.getConfiguration()).getTable(TABLE_NAME); UTIL.loadTable(table, TEST_FAM); table.close(); @@ -185,7 +185,7 @@ public class TestMobSnapshotFromClient { SnapshotTestingUtils.assertNoSnapshots(admin); // put some stuff in the table - HTable table = new HTable(UTIL.getConfiguration(), TABLE_NAME); + Table table = ConnectionFactory.createConnection(UTIL.getConfiguration()).getTable(TABLE_NAME); UTIL.loadTable(table, TEST_FAM, false); LOG.debug("FS state before disable:"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java index abea6997e3d..29588343011 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java @@ -567,7 +567,7 @@ public class TestMultiParallel { validateResult(result, QUALIFIER, Bytes.toBytes((numRequests / 2) + 1L)); table.close(); } finally { - ConnectionManager.injectNonceGeneratorForTesting((ClusterConnection)connection, oldCnm); + ConnectionImplementation.injectNonceGeneratorForTesting((ClusterConnection) connection, oldCnm); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java index efc8db21da4..bfc1230d87f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java @@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.NotServingRegionException; import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.TableNotFoundException; @@ -659,25 +660,34 @@ public class TestReplicasClient { private void runMultipleScansOfOneType(boolean reversed, boolean small) throws Exception { openRegion(hriSecondary); int NUMROWS = 100; + int NUMCOLS = 10; try { for (int i = 0; i < NUMROWS; i++) { byte[] b1 = Bytes.toBytes("testUseRegionWithReplica" + i); - Put p = new Put(b1); - p.add(f, b1, b1); - table.put(p); + for (int col = 0; col < NUMCOLS; col++) { + Put p = new Put(b1); + String qualifier = "qualifer" + col; + KeyValue kv = new KeyValue(b1, f, qualifier.getBytes()); + p.add(kv); + table.put(p); + } } LOG.debug("PUT done"); int caching = 20; + long maxResultSize = Long.MAX_VALUE; + byte[] start; if (reversed) start = Bytes.toBytes("testUseRegionWithReplica" + (NUMROWS - 1)); else start = Bytes.toBytes("testUseRegionWithReplica" + 0); - scanWithReplicas(reversed, small, Consistency.TIMELINE, caching, start, NUMROWS, false, false); + scanWithReplicas(reversed, small, Consistency.TIMELINE, caching, maxResultSize, + start, NUMROWS, NUMCOLS, false, false); - //Even if we were to slow the server down, unless we ask for stale - //we won't get it + // Even if we were to slow the server down, unless we ask for stale + // we won't get it SlowMeCopro.sleepTime.set(5000); - scanWithReplicas(reversed, small, Consistency.STRONG, caching, start, NUMROWS, false, false); + scanWithReplicas(reversed, small, Consistency.STRONG, caching, maxResultSize, start, NUMROWS, + NUMCOLS, false, false); SlowMeCopro.sleepTime.set(0); flushRegion(hriPrimary); @@ -686,13 +696,32 @@ public class TestReplicasClient { //Now set the flag to get a response even if stale SlowMeCopro.sleepTime.set(5000); - scanWithReplicas(reversed, small, Consistency.TIMELINE, caching, start, NUMROWS, true, false); + scanWithReplicas(reversed, small, Consistency.TIMELINE, caching, maxResultSize, + start, NUMROWS, NUMCOLS, true, false); SlowMeCopro.sleepTime.set(0); // now make some 'next' calls slow SlowMeCopro.slowDownNext.set(true); SlowMeCopro.countOfNext.set(0); - scanWithReplicas(reversed, small, Consistency.TIMELINE, caching, start, NUMROWS, true, true); + scanWithReplicas(reversed, small, Consistency.TIMELINE, caching, maxResultSize, start, + NUMROWS, NUMCOLS, true, true); + SlowMeCopro.slowDownNext.set(false); + SlowMeCopro.countOfNext.set(0); + + // Make sure we do not get stale data.. + SlowMeCopro.sleepTime.set(5000); + scanWithReplicas(reversed, small, Consistency.STRONG, caching, maxResultSize, + start, NUMROWS, NUMCOLS, false, false); + SlowMeCopro.sleepTime.set(0); + + // While the next calls are slow, set maxResultSize to 1 so that some partial results will be + // returned from the server before the replica switch occurs. + maxResultSize = 1; + SlowMeCopro.slowDownNext.set(true); + SlowMeCopro.countOfNext.set(0); + scanWithReplicas(reversed, small, Consistency.TIMELINE, caching, maxResultSize, start, + NUMROWS, NUMCOLS, true, true); + maxResultSize = Long.MAX_VALUE; SlowMeCopro.slowDownNext.set(false); SlowMeCopro.countOfNext.set(0); } finally { @@ -710,33 +739,60 @@ public class TestReplicasClient { } private void scanWithReplicas(boolean reversed, boolean small, Consistency consistency, - int caching, byte[] startRow, int numRows, boolean staleExpected, boolean slowNext) + int caching, long maxResultSize, byte[] startRow, int numRows, int numCols, + boolean staleExpected, boolean slowNext) throws Exception { Scan scan = new Scan(startRow); scan.setCaching(caching); + scan.setMaxResultSize(maxResultSize); scan.setReversed(reversed); scan.setSmall(small); scan.setConsistency(consistency); ResultScanner scanner = table.getScanner(scan); Iterator iter = scanner.iterator(); + + // Maps of row keys that we have seen so far HashMap map = new HashMap(); - int count = 0; + + // Tracked metrics + int rowCount = 0; + int cellCount = 0; int countOfStale = 0; + while (iter.hasNext()) { - count++; + rowCount++; Result r = iter.next(); - if (map.containsKey(new String(r.getRow()))) { + String row = new String(r.getRow()); + + if (map.containsKey(row)) { throw new Exception("Unexpected scan result. Repeated row " + Bytes.toString(r.getRow())); } - map.put(new String(r.getRow()), true); + + map.put(row, true); + + for (Cell cell : r.rawCells()) { + cellCount++; + } + if (!slowNext) Assert.assertTrue(r.isStale() == staleExpected); if (r.isStale()) countOfStale++; } - LOG.debug("Count of rows " + count + " num rows expected " + numRows); - Assert.assertTrue(count == numRows); + Assert.assertTrue("Count of rows " + rowCount + " num rows expected " + numRows, + rowCount == numRows); + Assert.assertTrue("Count of cells: " + cellCount + " cells expected: " + numRows * numCols, + cellCount == (numRows * numCols)); + if (slowNext) { LOG.debug("Count of Stale " + countOfStale); - Assert.assertTrue(countOfStale > 1 && countOfStale < numRows); + Assert.assertTrue(countOfStale > 1); + + // If the scan was configured in such a way that a full row was NOT retrieved before the + // replica switch occurred, then it is possible that all rows were stale + if (maxResultSize != Long.MAX_VALUE) { + Assert.assertTrue(countOfStale <= numRows); + } else { + Assert.assertTrue(countOfStale < numRows); + } } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResult.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResult.java index fd4b01af47d..aa25dd3493b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResult.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResult.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; @@ -229,6 +230,32 @@ public class TestResult extends TestCase { } } + /** + * Verifies that one can't modify instance of EMPTY_RESULT. + */ + public void testEmptyResultIsReadonly() { + Result emptyResult = Result.EMPTY_RESULT; + Result otherResult = new Result(); + + try { + emptyResult.copyFrom(otherResult); + fail("UnsupportedOperationException should have been thrown!"); + } catch (UnsupportedOperationException ex) { + LOG.debug("As expected: " + ex.getMessage()); + } + try { + emptyResult.addResults(ClientProtos.RegionLoadStats.getDefaultInstance()); + fail("UnsupportedOperationException should have been thrown!"); + } catch (UnsupportedOperationException ex) { + LOG.debug("As expected: " + ex.getMessage()); + } + try { + emptyResult.setExists(true); + fail("UnsupportedOperationException should have been thrown!"); + } catch (UnsupportedOperationException ex) { + LOG.debug("As expected: " + ex.getMessage()); + } + } /** * Microbenchmark that compares {@link Result#getValue} and {@link Result#loadValue} performance. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResultSizeEstimation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResultSizeEstimation.java new file mode 100644 index 00000000000..1f3a95bba62 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestResultSizeEstimation.java @@ -0,0 +1,127 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.Tag; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.ResultScanner; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.io.hfile.HFile; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import static org.junit.Assert.assertEquals; + +@Category(LargeTests.class) +public class TestResultSizeEstimation { + + final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + + final static int TAG_DATA_SIZE = 2048; + final static int SCANNER_DATA_LIMIT = TAG_DATA_SIZE + 256; + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + Configuration conf = TEST_UTIL.getConfiguration(); + // Need HFileV3 + conf.setInt(HFile.FORMAT_VERSION_KEY, HFile.MIN_FORMAT_VERSION_WITH_TAGS); + // effectively limit max result size to one entry if it has tags + conf.setLong(HConstants.HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY, SCANNER_DATA_LIMIT); + conf.setBoolean(ScannerCallable.LOG_SCANNER_ACTIVITY, true); + TEST_UTIL.startMiniCluster(1); + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + @Test + public void testResultSizeEstimation() throws Exception { + byte [] ROW1 = Bytes.toBytes("testRow1"); + byte [] ROW2 = Bytes.toBytes("testRow2"); + byte [] FAMILY = Bytes.toBytes("testFamily"); + byte [] QUALIFIER = Bytes.toBytes("testQualifier"); + byte [] VALUE = Bytes.toBytes("testValue"); + + TableName TABLE = TableName.valueOf("testResultSizeEstimation"); + byte[][] FAMILIES = new byte[][] { FAMILY }; + Configuration conf = new Configuration(TEST_UTIL.getConfiguration()); + HTable table = TEST_UTIL.createTable(TABLE, FAMILIES, conf); + Put p = new Put(ROW1); + p.add(new KeyValue(ROW1, FAMILY, QUALIFIER, Long.MAX_VALUE, VALUE)); + table.put(p); + p = new Put(ROW2); + p.add(new KeyValue(ROW2, FAMILY, QUALIFIER, Long.MAX_VALUE, VALUE)); + table.put(p); + + Scan s = new Scan(); + s.setMaxResultSize(SCANNER_DATA_LIMIT); + ResultScanner rs = table.getScanner(s); + int count = 0; + while(rs.next() != null) { + count++; + } + assertEquals("Result size estimation did not work properly", 2, count); + rs.close(); + table.close(); + } + + @Test + public void testResultSizeEstimationWithTags() throws Exception { + byte [] ROW1 = Bytes.toBytes("testRow1"); + byte [] ROW2 = Bytes.toBytes("testRow2"); + byte [] FAMILY = Bytes.toBytes("testFamily"); + byte [] QUALIFIER = Bytes.toBytes("testQualifier"); + byte [] VALUE = Bytes.toBytes("testValue"); + + TableName TABLE = TableName.valueOf("testResultSizeEstimationWithTags"); + byte[][] FAMILIES = new byte[][] { FAMILY }; + Configuration conf = new Configuration(TEST_UTIL.getConfiguration()); + HTable table = TEST_UTIL.createTable(TABLE, FAMILIES, conf); + Put p = new Put(ROW1); + p.add(new KeyValue(ROW1, FAMILY, QUALIFIER, Long.MAX_VALUE, VALUE, + new Tag[] { new Tag((byte)1, new byte[TAG_DATA_SIZE]) } )); + table.put(p); + p = new Put(ROW2); + p.add(new KeyValue(ROW2, FAMILY, QUALIFIER, Long.MAX_VALUE, VALUE, + new Tag[] { new Tag((byte)1, new byte[TAG_DATA_SIZE]) } )); + table.put(p); + + Scan s = new Scan(); + s.setMaxResultSize(SCANNER_DATA_LIMIT); + ResultScanner rs = table.getScanner(s); + int count = 0; + while(rs.next() != null) { + count++; + } + assertEquals("Result size estimation did not work properly", 2, count); + rs.close(); + table.close(); + } +} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java index a6c1cfef3e8..1e939335a04 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java @@ -24,8 +24,10 @@ import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.HTestConst; @@ -68,6 +70,8 @@ public class TestScannersFromClientSide { */ @BeforeClass public static void setUpBeforeClass() throws Exception { + Configuration conf = TEST_UTIL.getConfiguration(); + conf.setLong(HConstants.HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY, 10 * 1024 * 1024); TEST_UTIL.startMiniCluster(3); } @@ -170,6 +174,85 @@ public class TestScannersFromClientSide { } + @Test + public void testSmallScan() throws Exception { + TableName TABLE = TableName.valueOf("testSmallScan"); + + int numRows = 10; + byte[][] ROWS = HTestConst.makeNAscii(ROW, numRows); + + int numQualifiers = 10; + byte[][] QUALIFIERS = HTestConst.makeNAscii(QUALIFIER, numQualifiers); + + Table ht = TEST_UTIL.createTable(TABLE, FAMILY); + + Put put; + List puts = new ArrayList(); + for (int row = 0; row < ROWS.length; row++) { + put = new Put(ROWS[row]); + for (int qual = 0; qual < QUALIFIERS.length; qual++) { + KeyValue kv = new KeyValue(ROWS[row], FAMILY, QUALIFIERS[qual], VALUE); + put.add(kv); + } + puts.add(put); + } + ht.put(puts); + + int expectedRows = numRows; + int expectedCols = numRows * numQualifiers; + + // Test normal and reversed + testSmallScan(ht, true, expectedRows, expectedCols); + testSmallScan(ht, false, expectedRows, expectedCols); + } + + /** + * Run through a variety of test configurations with a small scan + * @param table + * @param reversed + * @param rows + * @param columns + * @throws Exception + */ + public void testSmallScan(Table table, boolean reversed, int rows, int columns) throws Exception { + Scan baseScan = new Scan(); + baseScan.setReversed(reversed); + baseScan.setSmall(true); + + Scan scan = new Scan(baseScan); + verifyExpectedCounts(table, scan, rows, columns); + + scan = new Scan(baseScan); + scan.setMaxResultSize(1); + verifyExpectedCounts(table, scan, rows, columns); + + scan = new Scan(baseScan); + scan.setMaxResultSize(1); + scan.setCaching(Integer.MAX_VALUE); + verifyExpectedCounts(table, scan, rows, columns); + } + + private void verifyExpectedCounts(Table table, Scan scan, int expectedRowCount, + int expectedCellCount) throws Exception { + ResultScanner scanner = table.getScanner(scan); + + int rowCount = 0; + int cellCount = 0; + Result r = null; + while ((r = scanner.next()) != null) { + rowCount++; + for (Cell c : r.rawCells()) { + cellCount++; + } + } + + assertTrue("Expected row count: " + expectedRowCount + " Actual row count: " + rowCount, + expectedRowCount == rowCount); + assertTrue("Expected cell count: " + expectedCellCount + " Actual cell count: " + cellCount, + expectedCellCount == cellCount); + scanner.close(); + } + /** * Test from client side for get with maxResultPerCF set * diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSizeFailures.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSizeFailures.java new file mode 100644 index 00000000000..a0b0f70b442 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSizeFailures.java @@ -0,0 +1,176 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import static org.junit.Assert.assertEquals; + +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.List; +import java.util.Map.Entry; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import com.google.common.collect.Maps; + +@Category(LargeTests.class) +public class TestSizeFailures { + static final Log LOG = LogFactory.getLog(TestSizeFailures.class); + protected final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + private static byte [] FAMILY = Bytes.toBytes("testFamily"); + protected static int SLAVES = 1; + private static TableName TABLENAME; + private static final int NUM_ROWS = 1000 * 1000, NUM_COLS = 10; + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + // Uncomment the following lines if more verbosity is needed for + // debugging (see HBASE-12285 for details). + //((Log4JLogger)RpcServer.LOG).getLogger().setLevel(Level.ALL); + //((Log4JLogger)RpcClient.LOG).getLogger().setLevel(Level.ALL); + //((Log4JLogger)ScannerCallable.LOG).getLogger().setLevel(Level.ALL); + Configuration conf = TEST_UTIL.getConfiguration(); + conf.setBoolean("hbase.table.sanity.checks", true); // ignore sanity checks in the server + TEST_UTIL.startMiniCluster(SLAVES); + + // Write a bunch of data + TABLENAME = TableName.valueOf("testSizeFailures"); + List qualifiers = new ArrayList<>(); + for (int i = 1; i <= 10; i++) { + qualifiers.add(Bytes.toBytes(Integer.toString(i))); + } + + HColumnDescriptor hcd = new HColumnDescriptor(FAMILY); + HTableDescriptor desc = new HTableDescriptor(TABLENAME); + desc.addFamily(hcd); + byte[][] splits = new byte[9][2]; + for (int i = 1; i < 10; i++) { + int split = 48 + i; + splits[i - 1][0] = (byte) (split >>> 8); + splits[i - 1][0] = (byte) (split); + } + TEST_UTIL.getHBaseAdmin().createTable(desc, splits); + Connection conn = TEST_UTIL.getConnection(); + + try (Table table = conn.getTable(TABLENAME)) { + List puts = new LinkedList<>(); + for (int i = 0; i < NUM_ROWS; i++) { + Put p = new Put(Bytes.toBytes(Integer.toString(i))); + for (int j = 0; j < NUM_COLS; j++) { + byte[] value = new byte[50]; + Bytes.random(value); + p.addColumn(FAMILY, Bytes.toBytes(Integer.toString(j)), value); + } + puts.add(p); + + if (puts.size() == 1000) { + table.batch(puts, new Object[1000]); + puts.clear(); + } + } + + if (puts.size() > 0) { + table.batch(puts, new Object[puts.size()]); + } + } + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + /** + * Basic client side validation of HBASE-13262 + */ + @Test + public void testScannerSeesAllRecords() throws Exception { + Connection conn = TEST_UTIL.getConnection(); + try (Table table = conn.getTable(TABLENAME)) { + Scan s = new Scan(); + s.addFamily(FAMILY); + s.setMaxResultSize(-1); + s.setBatch(-1); + s.setCaching(500); + Entry entry = sumTable(table.getScanner(s)); + long rowsObserved = entry.getKey(); + long entriesObserved = entry.getValue(); + + // Verify that we see 1M rows and 10M cells + assertEquals(NUM_ROWS, rowsObserved); + assertEquals(NUM_ROWS * NUM_COLS, entriesObserved); + } + } + + /** + * Basic client side validation of HBASE-13262 + */ + @Test + public void testSmallScannerSeesAllRecords() throws Exception { + Connection conn = TEST_UTIL.getConnection(); + try (Table table = conn.getTable(TABLENAME)) { + Scan s = new Scan(); + s.setSmall(true); + s.addFamily(FAMILY); + s.setMaxResultSize(-1); + s.setBatch(-1); + s.setCaching(500); + Entry entry = sumTable(table.getScanner(s)); + long rowsObserved = entry.getKey(); + long entriesObserved = entry.getValue(); + + // Verify that we see 1M rows and 10M cells + assertEquals(NUM_ROWS, rowsObserved); + assertEquals(NUM_ROWS * NUM_COLS, entriesObserved); + } + } + + /** + * Count the number of rows and the number of entries from a scanner + * + * @param scanner + * The Scanner + * @return An entry where the first item is rows observed and the second is entries observed. + */ + private Entry sumTable(ResultScanner scanner) { + long rowsObserved = 0l; + long entriesObserved = 0l; + + // Read all the records in the table + for (Result result : scanner) { + rowsObserved++; + while (result.advance()) { + entriesObserved++; + } + } + return Maps.immutableEntry(rowsObserved,entriesObserved); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdminWithClusters.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdminWithClusters.java new file mode 100644 index 00000000000..b5899b814f1 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdminWithClusters.java @@ -0,0 +1,163 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license + * agreements. See the NOTICE file distributed with this work for additional information regarding + * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable + * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under the License. + */ +package org.apache.hadoop.hbase.client.replication; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.TableNotFoundException; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.replication.TestReplicationBase; +import org.apache.hadoop.hbase.testclassification.ClientTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +/** + * Unit testing of ReplicationAdmin with clusters + */ +@Category({ MediumTests.class, ClientTests.class }) +public class TestReplicationAdminWithClusters extends TestReplicationBase { + + static Connection connection1; + static Connection connection2; + static Admin admin1; + static Admin admin2; + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + TestReplicationBase.setUpBeforeClass(); + connection1 = ConnectionFactory.createConnection(conf1); + connection2 = ConnectionFactory.createConnection(conf2); + admin1 = connection1.getAdmin(); + admin2 = connection2.getAdmin(); + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + admin1.close(); + admin2.close(); + connection1.close(); + connection2.close(); + TestReplicationBase.tearDownAfterClass(); + } + + @Test(timeout = 300000) + public void testEnableReplicationWhenSlaveClusterDoesntHaveTable() throws Exception { + admin2.disableTable(tableName); + admin2.deleteTable(tableName); + assertFalse(admin2.tableExists(tableName)); + ReplicationAdmin adminExt = new ReplicationAdmin(conf1); + adminExt.enableTableRep(tableName); + assertTrue(admin2.tableExists(tableName)); + } + + @Test(timeout = 300000) + public void testEnableReplicationWhenReplicationNotEnabled() throws Exception { + HTableDescriptor table = admin1.getTableDescriptor(tableName); + for (HColumnDescriptor fam : table.getColumnFamilies()) { + fam.setScope(HConstants.REPLICATION_SCOPE_LOCAL); + } + admin1.disableTable(tableName); + admin1.modifyTable(tableName, table); + admin1.enableTable(tableName); + + admin2.disableTable(tableName); + admin2.modifyTable(tableName, table); + admin2.enableTable(tableName); + + ReplicationAdmin adminExt = new ReplicationAdmin(conf1); + adminExt.enableTableRep(tableName); + table = admin1.getTableDescriptor(tableName); + for (HColumnDescriptor fam : table.getColumnFamilies()) { + assertEquals(fam.getScope(), HConstants.REPLICATION_SCOPE_GLOBAL); + } + } + + @Test(timeout = 300000) + public void testEnableReplicationWhenTableDescriptorIsNotSameInClusters() throws Exception { + HTableDescriptor table = admin2.getTableDescriptor(tableName); + HColumnDescriptor f = new HColumnDescriptor("newFamily"); + table.addFamily(f); + admin2.disableTable(tableName); + admin2.modifyTable(tableName, table); + admin2.enableTable(tableName); + + ReplicationAdmin adminExt = new ReplicationAdmin(conf1); + try { + adminExt.enableTableRep(tableName); + fail("Exception should be thrown if table descriptors in the clusters are not same."); + } catch (RuntimeException ignored) { + + } + admin1.disableTable(tableName); + admin1.modifyTable(tableName, table); + admin1.enableTable(tableName); + adminExt.enableTableRep(tableName); + table = admin1.getTableDescriptor(tableName); + for (HColumnDescriptor fam : table.getColumnFamilies()) { + assertEquals(fam.getScope(), HConstants.REPLICATION_SCOPE_GLOBAL); + } + } + + @Test(timeout = 300000) + public void testDisableAndEnableReplication() throws Exception { + ReplicationAdmin adminExt = new ReplicationAdmin(conf1); + adminExt.disableTableRep(tableName); + HTableDescriptor table = admin1.getTableDescriptor(tableName); + for (HColumnDescriptor fam : table.getColumnFamilies()) { + assertEquals(fam.getScope(), HConstants.REPLICATION_SCOPE_LOCAL); + } + table = admin2.getTableDescriptor(tableName); + for (HColumnDescriptor fam : table.getColumnFamilies()) { + assertEquals(fam.getScope(), HConstants.REPLICATION_SCOPE_LOCAL); + } + adminExt.enableTableRep(tableName); + table = admin1.getTableDescriptor(tableName); + for (HColumnDescriptor fam : table.getColumnFamilies()) { + assertEquals(fam.getScope(), HConstants.REPLICATION_SCOPE_GLOBAL); + } + } + + @Test(timeout = 300000, expected = TableNotFoundException.class) + public void testDisableReplicationForNonExistingTable() throws Exception { + ReplicationAdmin adminExt = new ReplicationAdmin(conf1); + adminExt.disableTableRep(TableName.valueOf("nonExistingTable")); + } + + @Test(timeout = 300000, expected = TableNotFoundException.class) + public void testEnableReplicationForNonExistingTable() throws Exception { + ReplicationAdmin adminExt = new ReplicationAdmin(conf1); + adminExt.enableTableRep(TableName.valueOf("nonExistingTable")); + } + + @Test(timeout = 300000, expected = IllegalArgumentException.class) + public void testDisableReplicationWhenTableNameAsNull() throws Exception { + ReplicationAdmin adminExt = new ReplicationAdmin(conf1); + adminExt.disableTableRep(null); + } + + @Test(timeout = 300000, expected = IllegalArgumentException.class) + public void testEnableReplicationWhenTableNameAsNull() throws Exception { + ReplicationAdmin adminExt = new ReplicationAdmin(conf1); + adminExt.enableTableRep(null); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpoint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpoint.java index 8e374707f23..8aa8da1d817 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpoint.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpoint.java @@ -28,7 +28,6 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.CoprocessorEnvironment; -import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.coprocessor.protobuf.generated.ColumnAggregationProtos.ColumnAggregationService; import org.apache.hadoop.hbase.coprocessor.protobuf.generated.ColumnAggregationProtos.SumRequest; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpointNullResponse.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpointNullResponse.java index f91b8a6399c..43159460f09 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpointNullResponse.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpointNullResponse.java @@ -33,8 +33,8 @@ import org.apache.hadoop.hbase.coprocessor.protobuf.generated.ColumnAggregationW import org.apache.hadoop.hbase.coprocessor.protobuf.generated.ColumnAggregationWithNullResponseProtos.SumRequest; import org.apache.hadoop.hbase.coprocessor.protobuf.generated.ColumnAggregationWithNullResponseProtos.SumResponse; import org.apache.hadoop.hbase.protobuf.ResponseConverter; -import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.InternalScanner; +import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.util.Bytes; import com.google.protobuf.RpcCallback; @@ -86,9 +86,9 @@ implements Coprocessor, CoprocessorService { int sumResult = 0; InternalScanner scanner = null; try { - HRegion region = this.env.getRegion(); + Region region = this.env.getRegion(); // for the last region in the table, return null to test null handling - if (Bytes.equals(region.getEndKey(), HConstants.EMPTY_END_ROW)) { + if (Bytes.equals(region.getRegionInfo().getEndKey(), HConstants.EMPTY_END_ROW)) { done.run(null); return; } @@ -122,6 +122,6 @@ implements Coprocessor, CoprocessorService { } done.run(SumResponse.newBuilder().setSum(sumResult).build()); LOG.info("Returning sum " + sumResult + " for region " + - Bytes.toStringBinary(env.getRegion().getRegionName())); + Bytes.toStringBinary(env.getRegion().getRegionInfo().getRegionName())); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpointWithErrors.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpointWithErrors.java index a16fc19b7e5..54289ef1613 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpointWithErrors.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpointWithErrors.java @@ -34,8 +34,8 @@ import org.apache.hadoop.hbase.coprocessor.protobuf.generated.ColumnAggregationW import org.apache.hadoop.hbase.coprocessor.protobuf.generated.ColumnAggregationWithErrorsProtos.SumRequest; import org.apache.hadoop.hbase.coprocessor.protobuf.generated.ColumnAggregationWithErrorsProtos.SumResponse; import org.apache.hadoop.hbase.protobuf.ResponseConverter; -import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.InternalScanner; +import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.util.Bytes; import com.google.protobuf.RpcCallback; @@ -87,9 +87,9 @@ implements Coprocessor, CoprocessorService { int sumResult = 0; InternalScanner scanner = null; try { - HRegion region = this.env.getRegion(); + Region region = this.env.getRegion(); // throw an exception for requests to the last region in the table, to test error handling - if (Bytes.equals(region.getEndKey(), HConstants.EMPTY_END_ROW)) { + if (Bytes.equals(region.getRegionInfo().getEndKey(), HConstants.EMPTY_END_ROW)) { throw new DoNotRetryIOException("An expected exception"); } scanner = region.getScanner(scan); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java index 7100ae7c4fd..f4981f17354 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/SimpleRegionObserver.java @@ -54,12 +54,12 @@ import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; import org.apache.hadoop.hbase.io.Reference; import org.apache.hadoop.hbase.io.hfile.CacheConfig; -import org.apache.hadoop.hbase.regionserver.HRegion; -import org.apache.hadoop.hbase.regionserver.HRegion.Operation; import org.apache.hadoop.hbase.regionserver.InternalScanner; import org.apache.hadoop.hbase.regionserver.KeyValueScanner; import org.apache.hadoop.hbase.regionserver.Leases; import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress; +import org.apache.hadoop.hbase.regionserver.Region; +import org.apache.hadoop.hbase.regionserver.Region.Operation; import org.apache.hadoop.hbase.regionserver.RegionScanner; import org.apache.hadoop.hbase.regionserver.ScanType; import org.apache.hadoop.hbase.regionserver.Store; @@ -151,8 +151,8 @@ public class SimpleRegionObserver extends BaseRegionObserver { // from external packages RegionCoprocessorEnvironment re = (RegionCoprocessorEnvironment)e; Leases leases = re.getRegionServerServices().getLeases(); - leases.createLease(re.getRegion().getRegionNameAsString(), 2000, null); - leases.cancelLease(re.getRegion().getRegionNameAsString()); + leases.createLease(re.getRegion().getRegionInfo().getRegionNameAsString(), 2000, null); + leases.cancelLease(re.getRegion().getRegionInfo().getRegionNameAsString()); } @Override @@ -229,7 +229,7 @@ public class SimpleRegionObserver extends BaseRegionObserver { } @Override - public void postSplit(ObserverContext c, HRegion l, HRegion r) { + public void postSplit(ObserverContext c, Region l, Region r) { ctPostSplit.incrementAndGet(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java index 140c3b9d6c2..6a5080bf3c1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java @@ -20,11 +20,10 @@ package org.apache.hadoop.hbase.coprocessor; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.*; import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.TestServerCustomProtocol; import org.apache.hadoop.hbase.testclassification.CoprocessorTests; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -163,12 +162,12 @@ public class TestClassLoading { // verify that the coprocessors were loaded boolean foundTableRegion=false; boolean found1 = true, found2 = true, found2_k1 = true, found2_k2 = true, found2_k3 = true; - Map> regionsActiveClassLoaders = - new HashMap>(); + Map> regionsActiveClassLoaders = + new HashMap>(); MiniHBaseCluster hbase = TEST_UTIL.getHBaseCluster(); - for (HRegion region: + for (Region region: hbase.getRegionServer(0).getOnlineRegionsLocalContext()) { - if (region.getRegionNameAsString().startsWith(tableName.getNameAsString())) { + if (region.getRegionInfo().getRegionNameAsString().startsWith(tableName.getNameAsString())) { foundTableRegion = true; CoprocessorEnvironment env; env = region.getCoprocessorHost().findCoprocessorEnvironment(cpName1); @@ -206,7 +205,7 @@ public class TestClassLoading { //check if region active classloaders are shared across all RS regions Set externalClassLoaders = new HashSet( CoprocessorClassLoader.getAllCached()); - for (Map.Entry> regionCP : regionsActiveClassLoaders.entrySet()) { + for (Map.Entry> regionCP : regionsActiveClassLoaders.entrySet()) { assertTrue("Some CP classloaders for region " + regionCP.getKey() + " are not cached." + " ClassLoader Cache:" + externalClassLoaders + " Region ClassLoaders:" + regionCP.getValue(), @@ -235,9 +234,8 @@ public class TestClassLoading { // verify that the coprocessor was loaded boolean found = false; MiniHBaseCluster hbase = TEST_UTIL.getHBaseCluster(); - for (HRegion region: - hbase.getRegionServer(0).getOnlineRegionsLocalContext()) { - if (region.getRegionNameAsString().startsWith(cpName3)) { + for (Region region: hbase.getRegionServer(0).getOnlineRegionsLocalContext()) { + if (region.getRegionInfo().getRegionNameAsString().startsWith(cpName3)) { found = (region.getCoprocessorHost().findCoprocessor(cpName3) != null); } } @@ -261,9 +259,8 @@ public class TestClassLoading { // verify that the coprocessor was loaded correctly boolean found = false; MiniHBaseCluster hbase = TEST_UTIL.getHBaseCluster(); - for (HRegion region: - hbase.getRegionServer(0).getOnlineRegionsLocalContext()) { - if (region.getRegionNameAsString().startsWith(cpName4)) { + for (Region region: hbase.getRegionServer(0).getOnlineRegionsLocalContext()) { + if (region.getRegionInfo().getRegionNameAsString().startsWith(cpName4)) { Coprocessor cp = region.getCoprocessorHost().findCoprocessor(cpName4); if (cp != null) { found = true; @@ -333,9 +330,8 @@ public class TestClassLoading { found6_k4 = false; MiniHBaseCluster hbase = TEST_UTIL.getHBaseCluster(); - for (HRegion region: - hbase.getRegionServer(0).getOnlineRegionsLocalContext()) { - if (region.getRegionNameAsString().startsWith(tableName.getNameAsString())) { + for (Region region: hbase.getRegionServer(0).getOnlineRegionsLocalContext()) { + if (region.getRegionInfo().getRegionNameAsString().startsWith(tableName.getNameAsString())) { found_1 = found_1 || (region.getCoprocessorHost().findCoprocessor(cpName1) != null); found_2 = found_2 || @@ -422,9 +418,8 @@ public class TestClassLoading { boolean found1 = false, found2 = false, found2_k1 = false, found2_k2 = false, found2_k3 = false; MiniHBaseCluster hbase = TEST_UTIL.getHBaseCluster(); - for (HRegion region: - hbase.getRegionServer(0).getOnlineRegionsLocalContext()) { - if (region.getRegionNameAsString().startsWith(tableName.getNameAsString())) { + for (Region region: hbase.getRegionServer(0).getOnlineRegionsLocalContext()) { + if (region.getRegionInfo().getRegionNameAsString().startsWith(tableName.getNameAsString())) { CoprocessorEnvironment env; env = region.getCoprocessorHost().findCoprocessorEnvironment(cpName1); if (env != null) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorConfiguration.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorConfiguration.java new file mode 100644 index 00000000000..fb2f20cfbc8 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorConfiguration.java @@ -0,0 +1,172 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.coprocessor; + +import java.io.IOException; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.mockito.Mockito.*; +import static org.junit.Assert.*; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Coprocessor; +import org.apache.hadoop.hbase.CoprocessorEnvironment; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.master.MasterCoprocessorHost; +import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost; +import org.apache.hadoop.hbase.regionserver.RegionServerCoprocessorHost; +import org.apache.hadoop.hbase.regionserver.RegionServerServices; +import org.apache.hadoop.hbase.testclassification.CoprocessorTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +/** + * Tests for global coprocessor loading configuration + */ +@Category({CoprocessorTests.class, SmallTests.class}) +public class TestCoprocessorConfiguration { + + private static final Configuration CONF = HBaseConfiguration.create(); + static { + CONF.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, + SystemCoprocessor.class.getName()); + CONF.setStrings(CoprocessorHost.REGIONSERVER_COPROCESSOR_CONF_KEY, + SystemCoprocessor.class.getName()); + CONF.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, + SystemCoprocessor.class.getName()); + } + private static final TableName TABLENAME = TableName.valueOf("TestCoprocessorConfiguration"); + private static final HRegionInfo REGIONINFO = new HRegionInfo(TABLENAME); + private static final HTableDescriptor TABLEDESC = new HTableDescriptor(TABLENAME); + static { + try { + TABLEDESC.addCoprocessor(TableCoprocessor.class.getName()); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + // use atomic types in case coprocessor loading is ever multithreaded, also + // so we can mutate them even though they are declared final here + private static final AtomicBoolean systemCoprocessorLoaded = new AtomicBoolean(); + private static final AtomicBoolean tableCoprocessorLoaded = new AtomicBoolean(); + + public static class SystemCoprocessor implements Coprocessor { + @Override + public void start(CoprocessorEnvironment env) throws IOException { + systemCoprocessorLoaded.set(true); + } + + @Override + public void stop(CoprocessorEnvironment env) throws IOException { } + } + + public static class TableCoprocessor implements Coprocessor { + @Override + public void start(CoprocessorEnvironment env) throws IOException { + tableCoprocessorLoaded.set(true); + } + + @Override + public void stop(CoprocessorEnvironment env) throws IOException { } + } + + @Test + public void testRegionCoprocessorHostDefaults() throws Exception { + Configuration conf = new Configuration(CONF); + HRegion region = mock(HRegion.class); + when(region.getRegionInfo()).thenReturn(REGIONINFO); + when(region.getTableDesc()).thenReturn(TABLEDESC); + RegionServerServices rsServices = mock(RegionServerServices.class); + systemCoprocessorLoaded.set(false); + tableCoprocessorLoaded.set(false); + new RegionCoprocessorHost(region, rsServices, conf); + assertEquals("System coprocessors loading default was not honored", + systemCoprocessorLoaded.get(), + CoprocessorHost.DEFAULT_COPROCESSORS_ENABLED); + assertEquals("Table coprocessors loading default was not honored", + tableCoprocessorLoaded.get(), + CoprocessorHost.DEFAULT_COPROCESSORS_ENABLED && + CoprocessorHost.DEFAULT_USER_COPROCESSORS_ENABLED); + } + + @Test + public void testRegionServerCoprocessorHostDefaults() throws Exception { + Configuration conf = new Configuration(CONF); + RegionServerServices rsServices = mock(RegionServerServices.class); + systemCoprocessorLoaded.set(false); + new RegionServerCoprocessorHost(rsServices, conf); + assertEquals("System coprocessors loading default was not honored", + systemCoprocessorLoaded.get(), + CoprocessorHost.DEFAULT_COPROCESSORS_ENABLED); + } + + @Test + public void testMasterCoprocessorHostDefaults() throws Exception { + Configuration conf = new Configuration(CONF); + MasterServices masterServices = mock(MasterServices.class); + systemCoprocessorLoaded.set(false); + new MasterCoprocessorHost(masterServices, conf); + assertEquals("System coprocessors loading default was not honored", + systemCoprocessorLoaded.get(), + CoprocessorHost.DEFAULT_COPROCESSORS_ENABLED); + } + + @Test + public void testRegionCoprocessorHostAllDisabled() throws Exception { + Configuration conf = new Configuration(CONF); + conf.setBoolean(CoprocessorHost.COPROCESSORS_ENABLED_CONF_KEY, false); + HRegion region = mock(HRegion.class); + when(region.getRegionInfo()).thenReturn(REGIONINFO); + when(region.getTableDesc()).thenReturn(TABLEDESC); + RegionServerServices rsServices = mock(RegionServerServices.class); + systemCoprocessorLoaded.set(false); + tableCoprocessorLoaded.set(false); + new RegionCoprocessorHost(region, rsServices, conf); + assertFalse("System coprocessors should not have been loaded", + systemCoprocessorLoaded.get()); + assertFalse("Table coprocessors should not have been loaded", + tableCoprocessorLoaded.get()); + } + + @Test + public void testRegionCoprocessorHostTableLoadingDisabled() throws Exception { + Configuration conf = new Configuration(CONF); + conf.setBoolean(CoprocessorHost.COPROCESSORS_ENABLED_CONF_KEY, true); // if defaults change + conf.setBoolean(CoprocessorHost.USER_COPROCESSORS_ENABLED_CONF_KEY, false); + HRegion region = mock(HRegion.class); + when(region.getRegionInfo()).thenReturn(REGIONINFO); + when(region.getTableDesc()).thenReturn(TABLEDESC); + RegionServerServices rsServices = mock(RegionServerServices.class); + systemCoprocessorLoaded.set(false); + tableCoprocessorLoaded.set(false); + new RegionCoprocessorHost(region, rsServices, conf); + assertTrue("System coprocessors should have been loaded", + systemCoprocessorLoaded.get()); + assertFalse("Table coprocessors should not have been loaded", + tableCoprocessorLoaded.get()); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorEndpoint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorEndpoint.java index c04499d3322..a3e0c9162f7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorEndpoint.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorEndpoint.java @@ -45,7 +45,6 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.coprocessor.Batch; @@ -72,7 +71,7 @@ public class TestCoprocessorEndpoint { private static final Log LOG = LogFactory.getLog(TestCoprocessorEndpoint.class); private static final TableName TEST_TABLE = - TableName.valueOf("TestTable"); + TableName.valueOf("TestCoprocessorEndpoint"); private static final byte[] TEST_FAMILY = Bytes.toBytes("TestFamily"); private static final byte[] TEST_QUALIFIER = Bytes.toBytes("TestQualifier"); private static byte[] ROW = Bytes.toBytes("testRow"); @@ -99,12 +98,11 @@ public class TestCoprocessorEndpoint { desc.addFamily(new HColumnDescriptor(TEST_FAMILY)); admin.createTable(desc, new byte[][]{ROWS[rowSeperator1], ROWS[rowSeperator2]}); util.waitUntilAllRegionsAssigned(TEST_TABLE); - admin.close(); Table table = util.getConnection().getTable(TEST_TABLE); for (int i = 0; i < ROWSIZE; i++) { Put put = new Put(ROWS[i]); - put.add(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes(i)); + put.addColumn(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes(i)); table.put(put); } table.close(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java index ce76e8a4523..a8b545647a7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java @@ -54,10 +54,13 @@ import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.InternalScanner; +import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost; import org.apache.hadoop.hbase.regionserver.RegionScanner; import org.apache.hadoop.hbase.regionserver.ScanType; +import org.apache.hadoop.hbase.regionserver.ScannerContext; import org.apache.hadoop.hbase.regionserver.SplitTransaction; +import org.apache.hadoop.hbase.regionserver.SplitTransactionFactory; import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.regionserver.StoreFile; import org.apache.hadoop.hbase.testclassification.CoprocessorTests; @@ -90,20 +93,21 @@ public class TestCoprocessorInterface { } @Override - public boolean next(List result, int limit) throws IOException { - return delegate.next(result, limit); + public boolean next(List result, ScannerContext scannerContext) + throws IOException { + return delegate.next(result, scannerContext); } @Override - public boolean nextRaw(List result) + public boolean nextRaw(List result) throws IOException { return delegate.nextRaw(result); } @Override - public boolean nextRaw(List result, int limit) + public boolean nextRaw(List result, ScannerContext context) throws IOException { - return delegate.nextRaw(result, limit); + return delegate.nextRaw(result, context); } @Override @@ -135,6 +139,12 @@ public class TestCoprocessorInterface { public long getMvccReadPoint() { return delegate.getMvccReadPoint(); } + + @Override + public int getBatch() { + return delegate.getBatch(); + } + } public static class CoprocessorImpl extends BaseRegionObserver { @@ -214,7 +224,7 @@ public class TestCoprocessorInterface { preSplitWithSplitRowCalled = true; } @Override - public void postSplit(ObserverContext e, HRegion l, HRegion r) { + public void postSplit(ObserverContext e, Region l, Region r) { postSplitCalled = true; } @@ -280,20 +290,19 @@ public class TestCoprocessorInterface { byte [][] families = { fam1, fam2, fam3 }; Configuration hc = initSplit(); - HRegion region = initHRegion(tableName, name.getMethodName(), hc, + Region region = initHRegion(tableName, name.getMethodName(), hc, new Class[]{}, families); for (int i = 0; i < 3; i++) { HBaseTestCase.addContent(region, fam3); - region.flushcache(); + region.flush(true); } - region.compactStores(); - - byte [] splitRow = region.checkSplit(); + region.compact(false); + byte [] splitRow = ((HRegion)region).checkSplit(); assertNotNull(splitRow); - HRegion [] regions = split(region, splitRow); + Region [] regions = split(region, splitRow); for (int i = 0; i < regions.length; i++) { regions[i] = reopenRegion(regions[i], CoprocessorImpl.class, CoprocessorII.class); } @@ -319,7 +328,7 @@ public class TestCoprocessorInterface { // now have all Environments fail for (int i = 0; i < regions.length; i++) { try { - byte [] r = regions[i].getStartKey(); + byte [] r = regions[i].getRegionInfo().getStartKey(); if (r == null || r.length <= 0) { // Its the start row. Can't ask for null. Ask for minimal key instead. r = new byte [] {0}; @@ -359,19 +368,19 @@ public class TestCoprocessorInterface { byte [][] families = { fam1, fam2, fam3 }; Configuration hc = initSplit(); - HRegion region = initHRegion(tableName, name.getMethodName(), hc, + Region region = initHRegion(tableName, name.getMethodName(), hc, new Class[]{CoprocessorImpl.class}, families); for (int i = 0; i < 3; i++) { HBaseTestCase.addContent(region, fam3); - region.flushcache(); + region.flush(true); } - region.compactStores(); + region.compact(false); - byte [] splitRow = region.checkSplit(); + byte [] splitRow = ((HRegion)region).checkSplit(); assertNotNull(splitRow); - HRegion [] regions = split(region, splitRow); + Region [] regions = split(region, splitRow); for (int i = 0; i < regions.length; i++) { regions[i] = reopenRegion(regions[i], CoprocessorImpl.class); } @@ -406,10 +415,10 @@ public class TestCoprocessorInterface { } } - HRegion reopenRegion(final HRegion closedRegion, Class ... implClasses) + Region reopenRegion(final Region closedRegion, Class ... implClasses) throws IOException { //HRegionInfo info = new HRegionInfo(tableName, null, null, false); - HRegion r = HRegion.openHRegion(closedRegion, null); + Region r = HRegion.openHRegion(closedRegion, null); // this following piece is a hack. currently a coprocessorHost // is secretly loaded at OpenRegionHandler. we don't really @@ -417,7 +426,7 @@ public class TestCoprocessorInterface { // and set it to region. Configuration conf = TEST_UTIL.getConfiguration(); RegionCoprocessorHost host = new RegionCoprocessorHost(r, null, conf); - r.setCoprocessorHost(host); + ((HRegion)r).setCoprocessorHost(host); for (Class implClass : implClasses) { host.load(implClass, Coprocessor.PRIORITY_USER, conf); @@ -433,7 +442,7 @@ public class TestCoprocessorInterface { return r; } - HRegion initHRegion (TableName tableName, String callingMethod, + Region initHRegion (TableName tableName, String callingMethod, Configuration conf, Class [] implClasses, byte [][] families) throws IOException { HTableDescriptor htd = new HTableDescriptor(tableName); @@ -442,11 +451,11 @@ public class TestCoprocessorInterface { } HRegionInfo info = new HRegionInfo(tableName, null, null, false); Path path = new Path(DIR + callingMethod); - HRegion r = HBaseTestingUtility.createRegionAndWAL(info, path, conf, htd); + Region r = HBaseTestingUtility.createRegionAndWAL(info, path, conf, htd); // this following piece is a hack. RegionCoprocessorHost host = new RegionCoprocessorHost(r, null, conf); - r.setCoprocessorHost(host); + ((HRegion)r).setCoprocessorHost(host); for (Class implClass : implClasses) { host.load(implClass, Coprocessor.PRIORITY_USER, conf); @@ -480,12 +489,11 @@ public class TestCoprocessorInterface { return TEST_UTIL.getConfiguration(); } - private HRegion [] split(final HRegion r, final byte [] splitRow) - throws IOException { + private Region [] split(final Region r, final byte [] splitRow) throws IOException { + Region[] regions = new Region[2]; - HRegion[] regions = new HRegion[2]; - - SplitTransaction st = new SplitTransaction(r, splitRow); + SplitTransaction st = new SplitTransactionFactory(TEST_UTIL.getConfiguration()) + .create(r, splitRow); int i = 0; if (!st.prepare()) { @@ -494,20 +502,19 @@ public class TestCoprocessorInterface { } try { Server mockServer = Mockito.mock(Server.class); - when(mockServer.getConfiguration()).thenReturn( - TEST_UTIL.getConfiguration()); - PairOfSameType daughters = st.execute(mockServer, null); - for (HRegion each_daughter: daughters) { + when(mockServer.getConfiguration()).thenReturn(TEST_UTIL.getConfiguration()); + PairOfSameType daughters = st.execute(mockServer, null); + for (Region each_daughter: daughters) { regions[i] = each_daughter; i++; } } catch (IOException ioe) { - LOG.info("Split transaction of " + r.getRegionNameAsString() + + LOG.info("Split transaction of " + r.getRegionInfo().getRegionNameAsString() + " failed:" + ioe.getMessage()); assertTrue(false); } catch (RuntimeException e) { LOG.info("Failed rollback of failed split of " + - r.getRegionNameAsString() + e.getMessage()); + r.getRegionInfo().getRegionNameAsString() + e.getMessage()); } assertTrue(i == 2); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorTableEndpoint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorTableEndpoint.java new file mode 100644 index 00000000000..76953614797 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorTableEndpoint.java @@ -0,0 +1,182 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.coprocessor; + +import static org.junit.Assert.assertEquals; + +import java.io.IOException; +import java.util.Map; + +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.testclassification.CoprocessorTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.ByteStringer; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.coprocessor.Batch; +import org.apache.hadoop.hbase.coprocessor.protobuf.generated.ColumnAggregationProtos; +import org.apache.hadoop.hbase.ipc.BlockingRpcCallback; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import com.google.protobuf.ServiceException; + +@Category({CoprocessorTests.class, MediumTests.class}) +public class TestCoprocessorTableEndpoint { + + private static final byte[] TEST_FAMILY = Bytes.toBytes("TestFamily"); + private static final byte[] TEST_QUALIFIER = Bytes.toBytes("TestQualifier"); + private static final byte[] ROW = Bytes.toBytes("testRow"); + private static final int ROWSIZE = 20; + private static final int rowSeperator1 = 5; + private static final int rowSeperator2 = 12; + private static final byte[][] ROWS = makeN(ROW, ROWSIZE); + + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + + @BeforeClass + public static void setupBeforeClass() throws Exception { + TEST_UTIL.startMiniCluster(2); + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + @Test + public void testCoprocessorTableEndpoint() throws Throwable { + final TableName tableName = TableName.valueOf("testCoprocessorTableEndpoint"); + + HTableDescriptor desc = new HTableDescriptor(tableName); + desc.addFamily(new HColumnDescriptor(TEST_FAMILY)); + desc.addCoprocessor(org.apache.hadoop.hbase.coprocessor.ColumnAggregationEndpoint.class.getName()); + + createTable(desc); + verifyTable(tableName); + } + + @Test + public void testDynamicCoprocessorTableEndpoint() throws Throwable { + final TableName tableName = TableName.valueOf("testDynamicCoprocessorTableEndpoint"); + + HTableDescriptor desc = new HTableDescriptor(tableName); + desc.addFamily(new HColumnDescriptor(TEST_FAMILY)); + + createTable(desc); + + desc.addCoprocessor(org.apache.hadoop.hbase.coprocessor.ColumnAggregationEndpoint.class.getName()); + updateTable(desc); + + verifyTable(tableName); + } + + private static byte[][] makeN(byte[] base, int n) { + byte[][] ret = new byte[n][]; + for (int i = 0; i < n; i++) { + ret[i] = Bytes.add(base, Bytes.toBytes(String.format("%02d", i))); + } + return ret; + } + + private static Map sum(final Table table, final byte [] family, + final byte [] qualifier, final byte [] start, final byte [] end) + throws ServiceException, Throwable { + return table.coprocessorService(ColumnAggregationProtos.ColumnAggregationService.class, + start, end, + new Batch.Call() { + @Override + public Long call(ColumnAggregationProtos.ColumnAggregationService instance) + throws IOException { + BlockingRpcCallback rpcCallback = + new BlockingRpcCallback(); + ColumnAggregationProtos.SumRequest.Builder builder = + ColumnAggregationProtos.SumRequest.newBuilder(); + builder.setFamily(ByteStringer.wrap(family)); + if (qualifier != null && qualifier.length > 0) { + builder.setQualifier(ByteStringer.wrap(qualifier)); + } + instance.sum(null, builder.build(), rpcCallback); + return rpcCallback.get().getSum(); + } + }); + } + + private static final void createTable(HTableDescriptor desc) throws Exception { + Admin admin = TEST_UTIL.getHBaseAdmin(); + admin.createTable(desc, new byte[][]{ROWS[rowSeperator1], ROWS[rowSeperator2]}); + TEST_UTIL.waitUntilAllRegionsAssigned(desc.getTableName()); + Table table = TEST_UTIL.getConnection().getTable(desc.getTableName()); + try { + for (int i = 0; i < ROWSIZE; i++) { + Put put = new Put(ROWS[i]); + put.addColumn(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes(i)); + table.put(put); + } + } finally { + table.close(); + } + } + + private static void updateTable(HTableDescriptor desc) throws Exception { + Admin admin = TEST_UTIL.getHBaseAdmin(); + admin.disableTable(desc.getTableName()); + admin.modifyTable(desc.getTableName(), desc); + admin.enableTable(desc.getTableName()); + } + + private static final void verifyTable(TableName tableName) throws Throwable { + Table table = TEST_UTIL.getConnection().getTable(tableName); + try { + Map results = sum(table, TEST_FAMILY, TEST_QUALIFIER, ROWS[0], + ROWS[ROWS.length-1]); + int sumResult = 0; + int expectedResult = 0; + for (Map.Entry e : results.entrySet()) { + sumResult += e.getValue(); + } + for (int i = 0; i < ROWSIZE; i++) { + expectedResult += i; + } + assertEquals("Invalid result", expectedResult, sumResult); + + // scan: for region 2 and region 3 + results.clear(); + results = sum(table, TEST_FAMILY, TEST_QUALIFIER, ROWS[rowSeperator1], ROWS[ROWS.length-1]); + sumResult = 0; + expectedResult = 0; + for (Map.Entry e : results.entrySet()) { + sumResult += e.getValue(); + } + for (int i = rowSeperator1; i < ROWSIZE; i++) { + expectedResult += i; + } + assertEquals("Invalid result", expectedResult, sumResult); + } finally { + table.close(); + } + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestHTableWrapper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestHTableWrapper.java index 317707a6861..8efbfbf091a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestHTableWrapper.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestHTableWrapper.java @@ -32,7 +32,6 @@ import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.HTableInterface; import org.apache.hadoop.hbase.client.Increment; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; @@ -86,7 +85,7 @@ public class TestHTableWrapper { static class DummyRegionObserver extends BaseRegionObserver { } - private HTableInterface hTableInterface; + private Table hTableInterface; private Table table; @BeforeClass @@ -144,10 +143,8 @@ public class TestHTableWrapper { private void checkHTableInterfaceMethods() throws Exception { checkConf(); checkNameAndDescriptor(); - checkAutoFlush(); checkBufferSize(); checkExists(); - checkGetRowOrBefore(); checkAppend(); checkPutsAndDeletes(); checkCheckAndPut(); @@ -159,7 +156,6 @@ public class TestHTableWrapper { checkMutateRow(); checkResultScanner(); - hTableInterface.flushCommits(); hTableInterface.close(); } @@ -174,15 +170,6 @@ public class TestHTableWrapper { assertEquals(table.getTableDescriptor(), hTableInterface.getTableDescriptor()); } - private void checkAutoFlush() { - boolean initialAutoFlush = hTableInterface.isAutoFlush(); - hTableInterface.setAutoFlush(false); - assertFalse(hTableInterface.isAutoFlush()); - hTableInterface.setAutoFlush(true); - assertTrue(hTableInterface.isAutoFlush()); - hTableInterface.setAutoFlush(initialAutoFlush); - } - private void checkBufferSize() throws IOException { long initialWriteBufferSize = hTableInterface.getWriteBufferSize(); hTableInterface.setWriteBufferSize(12345L); @@ -194,19 +181,12 @@ public class TestHTableWrapper { boolean ex = hTableInterface.exists(new Get(ROW_A).addColumn(TEST_FAMILY, qualifierCol1)); assertTrue(ex); - Boolean[] exArray = hTableInterface.exists(Arrays.asList(new Get[] { - new Get(ROW_A).addColumn(TEST_FAMILY, qualifierCol1), - new Get(ROW_B).addColumn(TEST_FAMILY, qualifierCol1), - new Get(ROW_C).addColumn(TEST_FAMILY, qualifierCol1), - new Get(Bytes.toBytes("does not exist")).addColumn(TEST_FAMILY, qualifierCol1), })); - assertArrayEquals(new Boolean[] { Boolean.TRUE, Boolean.TRUE, Boolean.TRUE, Boolean.FALSE }, - exArray); - } - - @SuppressWarnings("deprecation") - private void checkGetRowOrBefore() throws IOException { - Result rowOrBeforeResult = hTableInterface.getRowOrBefore(ROW_A, TEST_FAMILY); - assertArrayEquals(ROW_A, rowOrBeforeResult.getRow()); + boolean[] exArray = hTableInterface.existsAll(Arrays.asList(new Get[]{ + new Get(ROW_A).addColumn(TEST_FAMILY, qualifierCol1), + new Get(ROW_B).addColumn(TEST_FAMILY, qualifierCol1), + new Get(ROW_C).addColumn(TEST_FAMILY, qualifierCol1), + new Get(Bytes.toBytes("does not exist")).addColumn(TEST_FAMILY, qualifierCol1),})); + assertTrue(Arrays.equals(new boolean[]{true, true, true, false}, exArray)); } private void checkAppend() throws IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java index 094555eb100..f5ea7484917 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java @@ -63,8 +63,10 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Threads; import org.junit.AfterClass; import org.junit.BeforeClass; +import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; +import org.junit.rules.TestName; /** * Tests invocation of the {@link org.apache.hadoop.hbase.coprocessor.MasterObserver} @@ -75,6 +77,7 @@ public class TestMasterObserver { private static final Log LOG = LogFactory.getLog(TestMasterObserver.class); public static CountDownLatch tableCreationLatch = new CountDownLatch(1); + public static CountDownLatch tableDeletionLatch = new CountDownLatch(1); public static class CPMasterObserver implements MasterObserver { @@ -874,6 +877,7 @@ public class TestMasterObserver { ObserverContext ctx, TableName tableName) throws IOException { postDeleteTableHandlerCalled = true; + tableDeletionLatch.countDown(); } public boolean wasDeleteTableHandlerCalled() { @@ -1154,11 +1158,10 @@ public class TestMasterObserver { private static HBaseTestingUtility UTIL = new HBaseTestingUtility(); private static byte[] TEST_SNAPSHOT = Bytes.toBytes("observed_snapshot"); - private static TableName TEST_TABLE = TableName.valueOf("observed_table"); private static TableName TEST_CLONE = TableName.valueOf("observed_clone"); private static byte[] TEST_FAMILY = Bytes.toBytes("fam1"); private static byte[] TEST_FAMILY2 = Bytes.toBytes("fam2"); - private static byte[] TEST_FAMILY3 = Bytes.toBytes("fam3"); + @Rule public TestName name = new TestName(); @BeforeClass public static void setupBeforeClass() throws Exception { @@ -1179,7 +1182,7 @@ public class TestMasterObserver { UTIL.shutdownMiniCluster(); } - @Test + @Test (timeout=180000) public void testStarted() throws Exception { MiniHBaseCluster cluster = UTIL.getHBaseCluster(); @@ -1199,10 +1202,10 @@ public class TestMasterObserver { cp.wasStartMasterCalled()); } - @Test + @Test (timeout=180000) public void testTableOperations() throws Exception { MiniHBaseCluster cluster = UTIL.getHBaseCluster(); - + final TableName tableName = TableName.valueOf(name.getMethodName()); HMaster master = cluster.getMaster(); MasterCoprocessorHost host = master.getMasterCoprocessorHost(); CPMasterObserver cp = (CPMasterObserver)host.findCoprocessor( @@ -1212,7 +1215,7 @@ public class TestMasterObserver { assertFalse("No table created yet", cp.wasCreateTableCalled()); // create a table - HTableDescriptor htd = new HTableDescriptor(TEST_TABLE); + HTableDescriptor htd = new HTableDescriptor(tableName); htd.addFamily(new HColumnDescriptor(TEST_FAMILY)); Admin admin = UTIL.getHBaseAdmin(); @@ -1227,8 +1230,8 @@ public class TestMasterObserver { cp.wasCreateTableHandlerCalled()); tableCreationLatch = new CountDownLatch(1); - admin.disableTable(TEST_TABLE); - assertTrue(admin.isTableDisabled(TEST_TABLE)); + admin.disableTable(tableName); + assertTrue(admin.isTableDisabled(tableName)); // preDisableTable can't bypass default action. assertTrue("Coprocessor should have been called on table disable", cp.wasDisableTableCalled()); @@ -1237,45 +1240,45 @@ public class TestMasterObserver { // enable assertFalse(cp.wasEnableTableCalled()); - admin.enableTable(TEST_TABLE); - assertTrue(admin.isTableEnabled(TEST_TABLE)); + admin.enableTable(tableName); + assertTrue(admin.isTableEnabled(tableName)); // preEnableTable can't bypass default action. assertTrue("Coprocessor should have been called on table enable", cp.wasEnableTableCalled()); assertTrue("Enable table handler should be called.", cp.wasEnableTableHandlerCalled()); - admin.disableTable(TEST_TABLE); - assertTrue(admin.isTableDisabled(TEST_TABLE)); + admin.disableTable(tableName); + assertTrue(admin.isTableDisabled(tableName)); // modify table htd.setMaxFileSize(512 * 1024 * 1024); - modifyTableSync(admin, TEST_TABLE, htd); + modifyTableSync(admin, tableName, htd); // preModifyTable can't bypass default action. assertTrue("Test table should have been modified", cp.wasModifyTableCalled()); // add a column family - admin.addColumn(TEST_TABLE, new HColumnDescriptor(TEST_FAMILY2)); + admin.addColumn(tableName, new HColumnDescriptor(TEST_FAMILY2)); assertTrue("New column family shouldn't have been added to test table", cp.preAddColumnCalledOnly()); // modify a column family HColumnDescriptor hcd1 = new HColumnDescriptor(TEST_FAMILY2); hcd1.setMaxVersions(25); - admin.modifyColumn(TEST_TABLE, hcd1); + admin.modifyColumn(tableName, hcd1); assertTrue("Second column family should be modified", cp.preModifyColumnCalledOnly()); // truncate table - admin.truncateTable(TEST_TABLE, false); + admin.truncateTable(tableName, false); // delete table - admin.disableTable(TEST_TABLE); - assertTrue(admin.isTableDisabled(TEST_TABLE)); - admin.deleteTable(TEST_TABLE); + admin.disableTable(tableName); + assertTrue(admin.isTableDisabled(tableName)); + deleteTable(admin, tableName); assertFalse("Test table should have been deleted", - admin.tableExists(TEST_TABLE)); + admin.tableExists(tableName)); // preDeleteTable can't bypass default action. assertTrue("Coprocessor should have been called on table delete", cp.wasDeleteTableCalled()); @@ -1297,8 +1300,8 @@ public class TestMasterObserver { // disable assertFalse(cp.wasDisableTableCalled()); assertFalse(cp.wasDisableTableHandlerCalled()); - admin.disableTable(TEST_TABLE); - assertTrue(admin.isTableDisabled(TEST_TABLE)); + admin.disableTable(tableName); + assertTrue(admin.isTableDisabled(tableName)); assertTrue("Coprocessor should have been called on table disable", cp.wasDisableTableCalled()); assertTrue("Disable table handler should be called.", @@ -1306,11 +1309,11 @@ public class TestMasterObserver { // modify table htd.setMaxFileSize(512 * 1024 * 1024); - modifyTableSync(admin, TEST_TABLE, htd); + modifyTableSync(admin, tableName, htd); assertTrue("Test table should have been modified", cp.wasModifyTableCalled()); // add a column family - admin.addColumn(TEST_TABLE, new HColumnDescriptor(TEST_FAMILY2)); + admin.addColumn(tableName, new HColumnDescriptor(TEST_FAMILY2)); assertTrue("New column family should have been added to test table", cp.wasAddColumnCalled()); assertTrue("Add column handler should be called.", @@ -1319,7 +1322,7 @@ public class TestMasterObserver { // modify a column family HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAMILY2); hcd.setMaxVersions(25); - admin.modifyColumn(TEST_TABLE, hcd); + admin.modifyColumn(tableName, hcd); assertTrue("Second column family should be modified", cp.wasModifyColumnCalled()); assertTrue("Modify table handler should be called.", @@ -1328,23 +1331,23 @@ public class TestMasterObserver { // enable assertFalse(cp.wasEnableTableCalled()); assertFalse(cp.wasEnableTableHandlerCalled()); - admin.enableTable(TEST_TABLE); - assertTrue(admin.isTableEnabled(TEST_TABLE)); + admin.enableTable(tableName); + assertTrue(admin.isTableEnabled(tableName)); assertTrue("Coprocessor should have been called on table enable", cp.wasEnableTableCalled()); assertTrue("Enable table handler should be called.", cp.wasEnableTableHandlerCalled()); // disable again - admin.disableTable(TEST_TABLE); - assertTrue(admin.isTableDisabled(TEST_TABLE)); + admin.disableTable(tableName); + assertTrue(admin.isTableDisabled(tableName)); // delete column assertFalse("No column family deleted yet", cp.wasDeleteColumnCalled()); assertFalse("Delete table column handler should not be called.", cp.wasDeleteColumnHandlerCalled()); - admin.deleteColumn(TEST_TABLE, TEST_FAMILY2); - HTableDescriptor tableDesc = admin.getTableDescriptor(TEST_TABLE); + admin.deleteColumn(tableName, TEST_FAMILY2); + HTableDescriptor tableDesc = admin.getTableDescriptor(tableName); assertNull("'"+Bytes.toString(TEST_FAMILY2)+"' should have been removed", tableDesc.getFamily(TEST_FAMILY2)); assertTrue("Coprocessor should have been called on column delete", @@ -1356,17 +1359,18 @@ public class TestMasterObserver { assertFalse("No table deleted yet", cp.wasDeleteTableCalled()); assertFalse("Delete table handler should not be called.", cp.wasDeleteTableHandlerCalled()); - admin.deleteTable(TEST_TABLE); + deleteTable(admin, tableName); assertFalse("Test table should have been deleted", - admin.tableExists(TEST_TABLE)); + admin.tableExists(tableName)); assertTrue("Coprocessor should have been called on table delete", cp.wasDeleteTableCalled()); assertTrue("Delete table handler should be called.", cp.wasDeleteTableHandlerCalled()); } - @Test + @Test (timeout=180000) public void testSnapshotOperations() throws Exception { + final TableName tableName = TableName.valueOf(name.getMethodName()); MiniHBaseCluster cluster = UTIL.getHBaseCluster(); HMaster master = cluster.getMaster(); MasterCoprocessorHost host = master.getMasterCoprocessorHost(); @@ -1375,7 +1379,7 @@ public class TestMasterObserver { cp.resetStates(); // create a table - HTableDescriptor htd = new HTableDescriptor(TEST_TABLE); + HTableDescriptor htd = new HTableDescriptor(tableName); htd.addFamily(new HColumnDescriptor(TEST_FAMILY)); Admin admin = UTIL.getHBaseAdmin(); @@ -1384,14 +1388,14 @@ public class TestMasterObserver { tableCreationLatch.await(); tableCreationLatch = new CountDownLatch(1); - admin.disableTable(TEST_TABLE); - assertTrue(admin.isTableDisabled(TEST_TABLE)); + admin.disableTable(tableName); + assertTrue(admin.isTableDisabled(tableName)); try { // Test snapshot operation assertFalse("Coprocessor should not have been called yet", cp.wasSnapshotCalled()); - admin.snapshot(TEST_SNAPSHOT, TEST_TABLE); + admin.snapshot(TEST_SNAPSHOT, tableName); assertTrue("Coprocessor should have been called on snapshot", cp.wasSnapshotCalled()); @@ -1407,8 +1411,8 @@ public class TestMasterObserver { assertFalse("Coprocessor restore should not have been called on snapshot clone", cp.wasRestoreSnapshotCalled()); admin.disableTable(TEST_CLONE); - assertTrue(admin.isTableDisabled(TEST_TABLE)); - admin.deleteTable(TEST_CLONE); + assertTrue(admin.isTableDisabled(tableName)); + deleteTable(admin, TEST_CLONE); // Test restore operation cp.resetStates(); @@ -1422,11 +1426,11 @@ public class TestMasterObserver { assertTrue("Coprocessor should have been called on snapshot delete", cp.wasDeleteSnapshotCalled()); } finally { - admin.deleteTable(TEST_TABLE); + deleteTable(admin, tableName); } } - @Test + @Test (timeout=180000) public void testNamespaceOperations() throws Exception { MiniHBaseCluster cluster = UTIL.getHBaseCluster(); String testNamespace = "observed_ns"; @@ -1513,8 +1517,9 @@ public class TestMasterObserver { } } - @Test + @Test (timeout=180000) public void testRegionTransitionOperations() throws Exception { + final TableName tableName = TableName.valueOf(name.getMethodName()); MiniHBaseCluster cluster = UTIL.getHBaseCluster(); HMaster master = cluster.getMaster(); @@ -1524,10 +1529,10 @@ public class TestMasterObserver { cp.enableBypass(false); cp.resetStates(); - HTable table = UTIL.createMultiRegionTable(TEST_TABLE, TEST_FAMILY); + HTable table = UTIL.createMultiRegionTable(tableName, TEST_FAMILY); try { - UTIL.waitUntilAllRegionsAssigned(TEST_TABLE); + UTIL.waitUntilAllRegionsAssigned(tableName); NavigableMap regions = table.getRegionLocations(); Map.Entry firstGoodPair = null; @@ -1601,7 +1606,9 @@ public class TestMasterObserver { assertTrue("Coprocessor should be called on region rebalancing", cp.wasBalanceCalled()); } finally { - UTIL.deleteTable(TEST_TABLE); + Admin admin = UTIL.getHBaseAdmin(); + admin.disableTable(tableName); + deleteTable(admin, tableName); } } @@ -1615,7 +1622,7 @@ public class TestMasterObserver { } } - @Test + @Test (timeout=180000) public void testTableDescriptorsEnumeration() throws Exception { MiniHBaseCluster cluster = UTIL.getHBaseCluster(); @@ -1633,7 +1640,7 @@ public class TestMasterObserver { cp.wasGetTableDescriptorsCalled()); } - @Test + @Test (timeout=180000) public void testTableNamesEnumeration() throws Exception { MiniHBaseCluster cluster = UTIL.getHBaseCluster(); @@ -1648,4 +1655,13 @@ public class TestMasterObserver { assertTrue("Coprocessor should be called on table names request", cp.wasGetTableNamesCalled()); } + + private void deleteTable(Admin admin, TableName tableName) throws Exception { + // NOTE: We need a latch because admin is not sync, + // so the postOp coprocessor method may be called after the admin operation returned. + tableDeletionLatch = new CountDownLatch(1); + admin.deleteTable(tableName); + tableDeletionLatch.await(); + tableDeletionLatch = new CountDownLatch(1); + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java index 6895fbea084..454a61d65ca 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java @@ -66,8 +66,10 @@ import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.InternalScanner; +import org.apache.hadoop.hbase.regionserver.NoLimitScannerContext; import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost; import org.apache.hadoop.hbase.regionserver.ScanType; +import org.apache.hadoop.hbase.regionserver.ScannerContext; import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.regionserver.StoreFile; import org.apache.hadoop.hbase.testclassification.CoprocessorTests; @@ -112,7 +114,7 @@ public class TestRegionObserverInterface { util.shutdownMiniCluster(); } - @Test + @Test (timeout=300000) public void testRegionObserver() throws IOException { TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".testRegionObserver"); // recreate table every time in order to reset the status of the @@ -176,7 +178,7 @@ public class TestRegionObserverInterface { new Integer[] {1, 1, 1, 1}); } - @Test + @Test (timeout=300000) public void testRowMutation() throws IOException { TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".testRowMutation"); Table table = util.createTable(tableName, new byte[][] {A, B, C}); @@ -213,7 +215,7 @@ public class TestRegionObserverInterface { } } - @Test + @Test (timeout=300000) public void testIncrementHook() throws IOException { TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".testIncrementHook"); Table table = util.createTable(tableName, new byte[][] {A, B, C}); @@ -240,7 +242,7 @@ public class TestRegionObserverInterface { } } - @Test + @Test (timeout=300000) public void testCheckAndPutHooks() throws IOException { TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".testCheckAndPutHooks"); @@ -268,7 +270,7 @@ public class TestRegionObserverInterface { } } - @Test + @Test (timeout=300000) public void testCheckAndDeleteHooks() throws IOException { TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".testCheckAndDeleteHooks"); @@ -298,7 +300,7 @@ public class TestRegionObserverInterface { } } - @Test + @Test (timeout=300000) public void testAppendHook() throws IOException { TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".testAppendHook"); Table table = util.createTable(tableName, new byte[][] {A, B, C}); @@ -325,7 +327,7 @@ public class TestRegionObserverInterface { } } - @Test + @Test (timeout=300000) // HBase-3583 public void testHBase3583() throws IOException { TableName tableName = @@ -377,7 +379,7 @@ public class TestRegionObserverInterface { table.close(); } - @Test + @Test (timeout=300000) // HBase-3758 public void testHBase3758() throws IOException { TableName tableName = @@ -434,16 +436,16 @@ public class TestRegionObserverInterface { return new InternalScanner() { @Override public boolean next(List results) throws IOException { - return next(results, -1); + return next(results, NoLimitScannerContext.getInstance()); } @Override - public boolean next(List results, int limit) - throws IOException{ + public boolean next(List results, ScannerContext scannerContext) + throws IOException { List internalResults = new ArrayList(); boolean hasMore; do { - hasMore = scanner.next(internalResults, limit); + hasMore = scanner.next(internalResults, scannerContext); if (!internalResults.isEmpty()) { long row = Bytes.toLong(CellUtil.cloneValue(internalResults.get(0))); if (row % 2 == 0) { @@ -483,7 +485,7 @@ public class TestRegionObserverInterface { * Tests overriding compaction handling via coprocessor hooks * @throws Exception */ - @Test + @Test (timeout=300000) public void testCompactionOverride() throws Exception { TableName compactTable = TableName.valueOf("TestCompactionOverride"); Admin admin = util.getHBaseAdmin(); @@ -554,7 +556,7 @@ public class TestRegionObserverInterface { table.close(); } - @Test + @Test (timeout=300000) public void bulkLoadHFileTest() throws Exception { String testName = TestRegionObserverInterface.class.getName()+".bulkLoadHFileTest"; TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".bulkLoadHFileTest"); @@ -587,7 +589,7 @@ public class TestRegionObserverInterface { } } - @Test + @Test (timeout=300000) public void testRecovery() throws Exception { LOG.info(TestRegionObserverInterface.class.getName() +".testRecovery"); TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".testRecovery"); @@ -637,7 +639,7 @@ public class TestRegionObserverInterface { } } - @Test + @Test (timeout=300000) public void testLegacyRecovery() throws Exception { LOG.info(TestRegionObserverInterface.class.getName() +".testLegacyRecovery"); TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".testLegacyRecovery"); @@ -687,7 +689,7 @@ public class TestRegionObserverInterface { } } - @Test + @Test (timeout=300000) public void testPreWALRestoreSkip() throws Exception { LOG.info(TestRegionObserverInterface.class.getName() + ".testPreWALRestoreSkip"); TableName tableName = TableName.valueOf(SimpleRegionObserver.TABLE_SKIPPED); @@ -772,5 +774,4 @@ public class TestRegionObserverInterface { writer.close(); } } - } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java index 6c7552a950b..00808bde896 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverScannerOpenHook.java @@ -53,6 +53,7 @@ import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.InternalScanner; import org.apache.hadoop.hbase.regionserver.KeyValueScanner; +import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost; import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.hadoop.hbase.regionserver.ScanType; @@ -143,7 +144,7 @@ public class TestRegionObserverScannerOpenHook { } } - HRegion initHRegion(byte[] tableName, String callingMethod, Configuration conf, + Region initHRegion(byte[] tableName, String callingMethod, Configuration conf, byte[]... families) throws IOException { HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName)); for (byte[] family : families) { @@ -170,7 +171,7 @@ public class TestRegionObserverScannerOpenHook { byte[][] FAMILIES = new byte[][] { A }; Configuration conf = HBaseConfiguration.create(); - HRegion region = initHRegion(TABLE, getClass().getName(), conf, FAMILIES); + Region region = initHRegion(TABLE, getClass().getName(), conf, FAMILIES); RegionCoprocessorHost h = region.getCoprocessorHost(); h.load(NoDataFromScan.class, Coprocessor.PRIORITY_HIGHEST, conf); h.load(EmptyRegionObsever.class, Coprocessor.PRIORITY_USER, conf); @@ -195,7 +196,7 @@ public class TestRegionObserverScannerOpenHook { byte[][] FAMILIES = new byte[][] { A }; Configuration conf = HBaseConfiguration.create(); - HRegion region = initHRegion(TABLE, getClass().getName(), conf, FAMILIES); + Region region = initHRegion(TABLE, getClass().getName(), conf, FAMILIES); RegionCoprocessorHost h = region.getCoprocessorHost(); h.load(NoDataFromFlush.class, Coprocessor.PRIORITY_HIGHEST, conf); h.load(EmptyRegionObsever.class, Coprocessor.PRIORITY_USER, conf); @@ -204,7 +205,7 @@ public class TestRegionObserverScannerOpenHook { Put put = new Put(ROW); put.add(A, A, A); region.put(put); - region.flushcache(); + region.flush(true); Get get = new Get(ROW); Result r = region.get(get); assertNull( @@ -272,10 +273,10 @@ public class TestRegionObserverScannerOpenHook { table.put(put); HRegionServer rs = UTIL.getRSForFirstRegionInTable(desc.getTableName()); - List regions = rs.getOnlineRegions(desc.getTableName()); + List regions = rs.getOnlineRegions(desc.getTableName()); assertEquals("More than 1 region serving test table with 1 row", 1, regions.size()); - HRegion region = regions.get(0); - admin.flushRegion(region.getRegionName()); + Region region = regions.get(0); + admin.flushRegion(region.getRegionInfo().getRegionName()); CountDownLatch latch = ((CompactionCompletionNotifyingRegion)region) .getCompactionStateChangeLatch(); @@ -283,7 +284,7 @@ public class TestRegionObserverScannerOpenHook { put = new Put(Bytes.toBytes("anotherrow")); put.add(A, A, A); table.put(put); - admin.flushRegion(region.getRegionName()); + admin.flushRegion(region.getRegionInfo().getRegionName()); // run a compaction, which normally would should get rid of the data // wait for the compaction checker to complete diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerObserver.java index 2e6eabc2648..e013cbb2c5c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerObserver.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerObserver.java @@ -36,11 +36,12 @@ import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; -import org.apache.hadoop.hbase.regionserver.RegionMergeTransaction; +import org.apache.hadoop.hbase.regionserver.Region; +import org.apache.hadoop.hbase.regionserver.RegionMergeTransactionFactory; +import org.apache.hadoop.hbase.regionserver.RegionMergeTransactionImpl; import org.apache.hadoop.hbase.regionserver.RegionServerCoprocessorHost; import org.apache.hadoop.hbase.testclassification.CoprocessorTests; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -90,7 +91,7 @@ public class TestRegionServerObserver { desc.addFamily(new HColumnDescriptor(FAM)); admin.createTable(desc, new byte[][] { Bytes.toBytes("row") }); assertFalse(regionServerObserver.wasRegionMergeCalled()); - List regions = regionServer.getOnlineRegions(TableName.valueOf(TABLENAME)); + List regions = regionServer.getOnlineRegions(TableName.valueOf(TABLENAME)); admin.mergeRegions(regions.get(0).getRegionInfo().getEncodedNameAsBytes(), regions.get(1) .getRegionInfo().getEncodedNameAsBytes(), true); int regionsCount = regionServer.getOnlineRegions(TableName.valueOf(TABLENAME)).size(); @@ -110,7 +111,7 @@ public class TestRegionServerObserver { } public static class CPRegionServerObserver extends BaseRegionServerObserver { - private RegionMergeTransaction rmt = null; + private RegionMergeTransactionImpl rmt = null; private HRegion mergedRegion = null; private boolean preMergeCalled; @@ -130,20 +131,21 @@ public class TestRegionServerObserver { } @Override - public void preMerge(ObserverContext ctx, HRegion regionA, - HRegion regionB) throws IOException { + public void preMerge(ObserverContext ctx, Region regionA, + Region regionB) throws IOException { preMergeCalled = true; } @Override public void preMergeCommit(ObserverContext ctx, - HRegion regionA, HRegion regionB, List metaEntries) throws IOException { + Region regionA, Region regionB, List metaEntries) throws IOException { preMergeBeforePONRCalled = true; RegionServerCoprocessorEnvironment environment = ctx.getEnvironment(); HRegionServer rs = (HRegionServer) environment.getRegionServerServices(); - List onlineRegions = + List onlineRegions = rs.getOnlineRegions(TableName.valueOf("testRegionServerObserver_2")); - rmt = new RegionMergeTransaction(onlineRegions.get(0), onlineRegions.get(1), true); + rmt = (RegionMergeTransactionImpl) new RegionMergeTransactionFactory(rs.getConfiguration()) + .create(onlineRegions.get(0), onlineRegions.get(1), true); if (!rmt.prepare(rs)) { LOG.error("Prepare for the region merge of table " + onlineRegions.get(0).getTableDesc().getNameAsString() @@ -159,7 +161,7 @@ public class TestRegionServerObserver { @Override public void postMergeCommit(ObserverContext ctx, - HRegion regionA, HRegion regionB, HRegion mr) throws IOException { + Region regionA, Region regionB, Region mr) throws IOException { preMergeAfterPONRCalled = true; RegionServerCoprocessorEnvironment environment = ctx.getEnvironment(); HRegionServer rs = (HRegionServer) environment.getRegionServerServices(); @@ -168,19 +170,19 @@ public class TestRegionServerObserver { @Override public void preRollBackMerge(ObserverContext ctx, - HRegion regionA, HRegion regionB) throws IOException { + Region regionA, Region regionB) throws IOException { preRollBackMergeCalled = true; } @Override public void postRollBackMerge(ObserverContext ctx, - HRegion regionA, HRegion regionB) throws IOException { + Region regionA, Region regionB) throws IOException { postRollBackMergeCalled = true; } @Override - public void postMerge(ObserverContext c, HRegion regionA, - HRegion regionB, HRegion mergedRegion) throws IOException { + public void postMerge(ObserverContext c, Region regionA, + Region regionB, Region mergedRegion) throws IOException { postMergeCalled = true; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestColumnPrefixFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestColumnPrefixFilter.java index 2e51c821bc2..828842d7ae8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestColumnPrefixFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestColumnPrefixFilter.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hbase.filter; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; import java.io.IOException; import java.util.ArrayList; @@ -27,10 +27,17 @@ import java.util.List; import java.util.Map; import java.util.Set; -import org.apache.hadoop.hbase.*; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.KeyValueTestUtil; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.InternalScanner; import org.apache.hadoop.hbase.testclassification.FilterTests; @@ -97,7 +104,8 @@ public class TestColumnPrefixFilter { InternalScanner scanner = region.getScanner(scan); List results = new ArrayList(); - while(scanner.next(results)); + while (scanner.next(results)) + ; assertEquals(prefixMap.get(s).size(), results.size()); } } finally { @@ -162,7 +170,8 @@ public class TestColumnPrefixFilter { InternalScanner scanner = region.getScanner(scan); List results = new ArrayList(); - while(scanner.next(results)); + while (scanner.next(results)) + ; assertEquals(prefixMap.get(s).size(), results.size()); } } finally { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java index 40a4c43734c..add549a9141 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java @@ -19,6 +19,10 @@ package org.apache.hadoop.hbase.filter; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; @@ -26,7 +30,13 @@ import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.*; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; @@ -36,14 +46,9 @@ import org.apache.hadoop.hbase.regionserver.InternalScanner; import org.apache.hadoop.hbase.testclassification.FilterTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; - import org.junit.After; import org.junit.Before; import org.junit.Test; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; - import org.junit.experimental.categories.Category; @Category({FilterTests.class, SmallTests.class}) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java index 61321dd45bd..5fcf64eed7f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java @@ -47,11 +47,12 @@ import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; import org.apache.hadoop.hbase.filter.FilterList.Operator; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.InternalScanner; +import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.RegionScanner; -import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.testclassification.FilterTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.wal.WAL; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -66,7 +67,7 @@ import com.google.common.base.Throwables; @Category({FilterTests.class, SmallTests.class}) public class TestFilter { private final static Log LOG = LogFactory.getLog(TestFilter.class); - private HRegion region; + private Region region; private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); // @@ -164,7 +165,7 @@ public class TestFilter { } // Flush - this.region.flushcache(); + this.region.flush(true); // Insert second half (reverse families) for(byte [] ROW : ROWS_ONE) { @@ -241,7 +242,7 @@ public class TestFilter { this.region.put(p); } // Flush - this.region.flushcache(); + this.region.flush(true); // Insert second half (reverse families) for (byte[] ROW : ROWS_THREE) { @@ -1450,7 +1451,7 @@ public class TestFilter { HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("TestFilter")); htd.addFamily(new HColumnDescriptor(family)); HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false); - HRegion testRegion = HBaseTestingUtility.createRegionAndWAL(info, TEST_UTIL.getDataTestDir(), + Region testRegion = HBaseTestingUtility.createRegionAndWAL(info, TEST_UTIL.getDataTestDir(), TEST_UTIL.getConfiguration(), htd); for(int i=0; i<5; i++) { @@ -1459,7 +1460,7 @@ public class TestFilter { p.add(family, qualifier, Bytes.toBytes(String.valueOf(111+i))); testRegion.put(p); } - testRegion.flushcache(); + testRegion.flush(true); // rows starting with "b" PrefixFilter pf = new PrefixFilter(new byte[] {'b'}) ; @@ -1474,7 +1475,7 @@ public class TestFilter { InternalScanner scanner = testRegion.getScanner(s1); List results = new ArrayList(); int resultCount = 0; - while(scanner.next(results)) { + while (scanner.next(results)) { resultCount++; byte[] row = CellUtil.cloneRow(results.get(0)); LOG.debug("Found row: " + Bytes.toStringBinary(row)); @@ -1485,8 +1486,8 @@ public class TestFilter { assertEquals(2, resultCount); scanner.close(); - WAL wal = testRegion.getWAL(); - testRegion.close(); + WAL wal = ((HRegion)testRegion).getWAL(); + ((HRegion)testRegion).close(); wal.close(); } @@ -1819,7 +1820,7 @@ public class TestFilter { p.setDurability(Durability.SKIP_WAL); p.add(FAMILIES[0], QUALIFIERS_ONE[0], VALUES[0]); this.region.put(p); - this.region.flushcache(); + this.region.flush(true); // Set of KVs (page: 1; pageSize: 1) - the first set of 1 column per row KeyValue [] expectedKVs = { @@ -2010,7 +2011,7 @@ public class TestFilter { HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("testNestedFilterListWithSCVF")); htd.addFamily(new HColumnDescriptor(FAMILIES[0])); HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false); - HRegion testRegion = HBaseTestingUtility.createRegionAndWAL(info, TEST_UTIL.getDataTestDir(), + Region testRegion = HBaseTestingUtility.createRegionAndWAL(info, TEST_UTIL.getDataTestDir(), TEST_UTIL.getConfiguration(), htd); for(int i=0; i<10; i++) { Put p = new Put(Bytes.toBytes("row" + i)); @@ -2018,7 +2019,7 @@ public class TestFilter { p.add(FAMILIES[0], columnStatus, Bytes.toBytes(i%2)); testRegion.put(p); } - testRegion.flushcache(); + testRegion.flush(true); // 1. got rows > "row4" Filter rowFilter = new RowFilter(CompareOp.GREATER,new BinaryComparator(Bytes.toBytes("row4"))); Scan s1 = new Scan(); @@ -2094,8 +2095,8 @@ public class TestFilter { results.clear(); } assertFalse(scanner.next(results)); - WAL wal = testRegion.getWAL(); - testRegion.close(); + WAL wal = ((HRegion)testRegion).getWAL(); + ((HRegion)testRegion).close(); wal.close(); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestInvocationRecordFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestInvocationRecordFilter.java index 5454480dfbe..a8651d83dbc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestInvocationRecordFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestInvocationRecordFilter.java @@ -34,10 +34,11 @@ import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.InternalScanner; -import org.apache.hadoop.hbase.wal.WAL; +import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.testclassification.FilterTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.wal.WAL; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -60,7 +61,7 @@ public class TestInvocationRecordFilter { private static final String VALUE_PREFIX = "value"; private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - private HRegion region; + private Region region; @Before public void setUp() throws Exception { @@ -78,7 +79,7 @@ public class TestInvocationRecordFilter { Bytes.toBytes(VALUE_PREFIX + i)); } this.region.put(put); - this.region.flushcache(); + this.region.flush(true); } @Test @@ -150,8 +151,8 @@ public class TestInvocationRecordFilter { @After public void tearDown() throws Exception { - WAL wal = region.getWAL(); - region.close(); + WAL wal = ((HRegion)region).getWAL(); + ((HRegion)region).close(); wal.close(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestMultipleColumnPrefixFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestMultipleColumnPrefixFilter.java index d2997afb183..7b700b705af 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestMultipleColumnPrefixFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestMultipleColumnPrefixFilter.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hbase.filter; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; import java.io.IOException; import java.util.ArrayList; @@ -27,10 +27,17 @@ import java.util.List; import java.util.Map; import java.util.Set; -import org.apache.hadoop.hbase.*; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.KeyValueTestUtil; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.InternalScanner; import org.apache.hadoop.hbase.testclassification.FilterTests; @@ -102,7 +109,8 @@ public class TestMultipleColumnPrefixFilter { scan.setFilter(filter); List results = new ArrayList(); InternalScanner scanner = region.getScanner(scan); - while(scanner.next(results)); + while (scanner.next(results)) + ; assertEquals(prefixMap.get("p").size() + prefixMap.get("q").size(), results.size()); HBaseTestingUtility.closeRegionAndWAL(region); @@ -174,7 +182,8 @@ public class TestMultipleColumnPrefixFilter { scan.setFilter(filter); List results = new ArrayList(); InternalScanner scanner = region.getScanner(scan); - while(scanner.next(results)); + while (scanner.next(results)) + ; assertEquals(prefixMap.get("p").size() + prefixMap.get("q").size(), results.size()); HBaseTestingUtility.closeRegionAndWAL(region); @@ -218,7 +227,8 @@ public class TestMultipleColumnPrefixFilter { scan1.setFilter(multiplePrefixFilter); List results1 = new ArrayList(); InternalScanner scanner1 = region.getScanner(scan1); - while(scanner1.next(results1)); + while (scanner1.next(results1)) + ; ColumnPrefixFilter singlePrefixFilter; Scan scan2 = new Scan(); @@ -228,7 +238,8 @@ public class TestMultipleColumnPrefixFilter { scan2.setFilter(singlePrefixFilter); List results2 = new ArrayList(); InternalScanner scanner2 = region.getScanner(scan1); - while(scanner2.next(results2)); + while (scanner2.next(results2)) + ; assertEquals(results1.size(), results2.size()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorder.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorder.java index 613d1ea92aa..504350c9ffc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorder.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorder.java @@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener; import org.apache.hadoop.hbase.wal.DefaultWALProvider; import org.apache.hadoop.hbase.testclassification.LargeTests; @@ -285,7 +286,7 @@ public class TestBlockReorder { int nbTest = 0; while (nbTest < 10) { - final List regions = targetRs.getOnlineRegions(h.getName()); + final List regions = targetRs.getOnlineRegions(h.getName()); final CountDownLatch latch = new CountDownLatch(regions.size()); // listen for successful log rolls final WALActionsListener listener = new WALActionsListener.Base() { @@ -294,8 +295,8 @@ public class TestBlockReorder { latch.countDown(); } }; - for (HRegion region : regions) { - region.getWAL().registerWALActionsListener(listener); + for (Region region : regions) { + ((HRegion)region).getWAL().registerWALActionsListener(listener); } htu.getHBaseAdmin().rollWALWriter(targetRs.getServerName()); @@ -308,8 +309,8 @@ public class TestBlockReorder { "tests fail, it's probably because we should still be waiting."); Thread.currentThread().interrupt(); } - for (HRegion region : regions) { - region.getWAL().unregisterWALActionsListener(listener); + for (Region region : regions) { + ((HRegion)region).getWAL().unregisterWALActionsListener(listener); } // We need a sleep as the namenode is informed asynchronously diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/http/ssl/KeyStoreTestUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/http/ssl/KeyStoreTestUtil.java index 248b8205cdb..86687384d04 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/http/ssl/KeyStoreTestUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/http/ssl/KeyStoreTestUtil.java @@ -26,38 +26,32 @@ import java.io.Writer; import java.math.BigInteger; import java.net.URL; import java.security.GeneralSecurityException; +import java.security.InvalidKeyException; import java.security.Key; import java.security.KeyPair; import java.security.KeyPairGenerator; import java.security.KeyStore; import java.security.NoSuchAlgorithmException; -import java.security.PrivateKey; +import java.security.NoSuchProviderException; import java.security.SecureRandom; +import java.security.SignatureException; import java.security.cert.Certificate; +import java.security.cert.CertificateEncodingException; import java.security.cert.X509Certificate; import java.util.Date; import java.util.HashMap; import java.util.Map; +import javax.security.auth.x500.X500Principal; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.ssl.FileBasedKeyStoresFactory; import org.apache.hadoop.security.ssl.SSLFactory; - -import sun.security.x509.AlgorithmId; -import sun.security.x509.CertificateAlgorithmId; -import sun.security.x509.CertificateIssuerName; -import sun.security.x509.CertificateSerialNumber; -import sun.security.x509.CertificateSubjectName; -import sun.security.x509.CertificateValidity; -import sun.security.x509.CertificateVersion; -import sun.security.x509.CertificateX509Key; -import sun.security.x509.X500Name; -import sun.security.x509.X509CertImpl; -import sun.security.x509.X509CertInfo; +import org.bouncycastle.x509.X509V1CertificateGenerator; public class KeyStoreTestUtil { - public static String getClasspathDir(Class klass) throws Exception { + public static String getClasspathDir(Class klass) throws Exception { String file = klass.getName(); file = file.replace('.', '/') + ".class"; URL url = Thread.currentThread().getContextClassLoader().getResource(file); @@ -68,48 +62,31 @@ public class KeyStoreTestUtil { /** * Create a self-signed X.509 Certificate. - * From http://bfo.com/blog/2011/03/08/odds_and_ends_creating_a_new_x_509_certificate.html. * * @param dn the X.509 Distinguished Name, eg "CN=Test, L=London, C=GB" * @param pair the KeyPair * @param days how many days from now the Certificate is valid for * @param algorithm the signing algorithm, eg "SHA1withRSA" * @return the self-signed certificate - * @throws IOException thrown if an IO error ocurred. - * @throws GeneralSecurityException thrown if an Security error ocurred. */ - public static X509Certificate generateCertificate(String dn, KeyPair pair, - int days, String algorithm) - throws GeneralSecurityException, IOException { - PrivateKey privkey = pair.getPrivate(); - X509CertInfo info = new X509CertInfo(); + public static X509Certificate generateCertificate(String dn, KeyPair pair, int days, String algorithm) + throws CertificateEncodingException, InvalidKeyException, IllegalStateException, + NoSuchProviderException, NoSuchAlgorithmException, SignatureException { Date from = new Date(); Date to = new Date(from.getTime() + days * 86400000l); - CertificateValidity interval = new CertificateValidity(from, to); BigInteger sn = new BigInteger(64, new SecureRandom()); - X500Name owner = new X500Name(dn); + KeyPair keyPair = pair; + X509V1CertificateGenerator certGen = new X509V1CertificateGenerator(); + X500Principal dnName = new X500Principal(dn); - info.set(X509CertInfo.VALIDITY, interval); - info.set(X509CertInfo.SERIAL_NUMBER, new CertificateSerialNumber(sn)); - info.set(X509CertInfo.SUBJECT, new CertificateSubjectName(owner)); - info.set(X509CertInfo.ISSUER, new CertificateIssuerName(owner)); - info.set(X509CertInfo.KEY, new CertificateX509Key(pair.getPublic())); - info - .set(X509CertInfo.VERSION, new CertificateVersion(CertificateVersion.V3)); - AlgorithmId algo = new AlgorithmId(AlgorithmId.md5WithRSAEncryption_oid); - info.set(X509CertInfo.ALGORITHM_ID, new CertificateAlgorithmId(algo)); - - // Sign the cert to identify the algorithm that's used. - X509CertImpl cert = new X509CertImpl(info); - cert.sign(privkey, algorithm); - - // Update the algorith, and resign. - algo = (AlgorithmId) cert.get(X509CertImpl.SIG_ALG); - info - .set(CertificateAlgorithmId.NAME + "." + CertificateAlgorithmId.ALGORITHM, - algo); - cert = new X509CertImpl(info); - cert.sign(privkey, algorithm); + certGen.setSerialNumber(sn); + certGen.setIssuerDN(dnName); + certGen.setNotBefore(from); + certGen.setNotAfter(to); + certGen.setSubjectDN(dnName); + certGen.setPublicKey(keyPair.getPublic()); + certGen.setSignatureAlgorithm(algorithm); + X509Certificate cert = certGen.generate(pair.getPrivate()); return cert; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestByteBufferOutputStream.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestByteBufferOutputStream.java new file mode 100644 index 00000000000..55f8dda6031 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestByteBufferOutputStream.java @@ -0,0 +1,49 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.io; + +import static org.junit.Assert.*; + +import java.io.IOException; +import java.nio.ByteBuffer; + +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category(SmallTests.class) +public class TestByteBufferOutputStream { + @Test + public void testByteBufferReuse() throws IOException { + byte [] someBytes = Bytes.toBytes("some bytes"); + ByteBuffer bb = ByteBuffer.allocate(someBytes.length); + ByteBuffer bbToReuse = write(bb, someBytes); + bbToReuse = write(bbToReuse, Bytes.toBytes("less")); + assertTrue(bb == bbToReuse); + } + + private ByteBuffer write(final ByteBuffer bb, final byte [] bytes) throws IOException { + try (ByteBufferOutputStream bbos = new ByteBufferOutputStream(bb)) { + bbos.write(bytes); + assertTrue(Bytes.compareTo(bytes, bbos.toByteArray(0, bytes.length)) == 0); + bbos.flush(); + return bbos.getByteBuffer(); + } + } +} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestFileLink.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestFileLink.java index 777b3cdd10d..0da685fbd38 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestFileLink.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestFileLink.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.io; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertTrue; import java.io.FileNotFoundException; @@ -46,6 +47,41 @@ import org.junit.experimental.categories.Category; */ @Category({IOTests.class, MediumTests.class}) public class TestFileLink { + + @Test + public void testEquals() { + Path p1 = new Path("/p1"); + Path p2 = new Path("/p2"); + Path p3 = new Path("/p3"); + + assertEquals(new FileLink(), new FileLink()); + assertEquals(new FileLink(p1), new FileLink(p1)); + assertEquals(new FileLink(p1, p2), new FileLink(p1, p2)); + assertEquals(new FileLink(p1, p2, p3), new FileLink(p1, p2, p3)); + + assertNotEquals(new FileLink(p1), new FileLink(p3)); + assertNotEquals(new FileLink(p1, p2), new FileLink(p1)); + assertNotEquals(new FileLink(p1, p2), new FileLink(p2)); + assertNotEquals(new FileLink(p1, p2), new FileLink(p2, p1)); // ordering important! + } + + @Test + public void testHashCode() { + Path p1 = new Path("/p1"); + Path p2 = new Path("/p2"); + Path p3 = new Path("/p3"); + + assertEquals(new FileLink().hashCode(), new FileLink().hashCode()); + assertEquals(new FileLink(p1).hashCode(), new FileLink(p1).hashCode()); + assertEquals(new FileLink(p1, p2).hashCode(), new FileLink(p1, p2).hashCode()); + assertEquals(new FileLink(p1, p2, p3).hashCode(), new FileLink(p1, p2, p3).hashCode()); + + assertNotEquals(new FileLink(p1).hashCode(), new FileLink(p3).hashCode()); + assertNotEquals(new FileLink(p1, p2).hashCode(), new FileLink(p1).hashCode()); + assertNotEquals(new FileLink(p1, p2).hashCode(), new FileLink(p2).hashCode()); + assertNotEquals(new FileLink(p1, p2).hashCode(), new FileLink(p2, p1).hashCode()); // ordering + } + /** * Test, on HDFS, that the FileLink is still readable * even when the current file gets renamed. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java index e0874579556..5ccb20652b8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java @@ -38,7 +38,7 @@ import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.hfile.LruBlockCache; import org.apache.hadoop.hbase.regionserver.BloomType; -import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.testclassification.IOTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; @@ -113,7 +113,7 @@ public class TestEncodedSeekers { setBlocksize(BLOCK_SIZE). setBloomFilterType(BloomType.NONE). setCompressTags(compressTags); - HRegion region = testUtil.createTestRegion(TABLE_NAME, hcd); + Region region = testUtil.createTestRegion(TABLE_NAME, hcd); //write the data, but leave some in the memstore doPuts(region); @@ -122,10 +122,9 @@ public class TestEncodedSeekers { doGets(region); //verify correctness again after compacting - region.compactStores(); + region.compact(false); doGets(region); - Map encodingCounts = cache.getEncodingCountsForTest(); // Ensure that compactions don't pollute the cache with unencoded blocks @@ -138,7 +137,7 @@ public class TestEncodedSeekers { } - private void doPuts(HRegion region) throws IOException{ + private void doPuts(Region region) throws IOException{ LoadTestKVGenerator dataGenerator = new LoadTestKVGenerator(MIN_VALUE_SIZE, MAX_VALUE_SIZE); for (int i = 0; i < NUM_ROWS; ++i) { byte[] key = LoadTestKVGenerator.md5PrefixedKey(i).getBytes(); @@ -162,13 +161,13 @@ public class TestEncodedSeekers { region.put(put); } if (i % NUM_ROWS_PER_FLUSH == 0) { - region.flushcache(); + region.flush(true); } } } - private void doGets(HRegion region) throws IOException{ + private void doGets(Region region) throws IOException{ for (int i = 0; i < NUM_ROWS; ++i) { final byte[] rowKey = LoadTestKVGenerator.md5PrefixedKey(i).getBytes(); for (int j = 0; j < NUM_COLS_PER_ROW; ++j) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestPrefixTree.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestPrefixTree.java index 80a50b02ab0..e31a73bb4ff 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestPrefixTree.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestPrefixTree.java @@ -37,7 +37,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.RegionScanner; import org.apache.hadoop.hbase.testclassification.IOTests; import org.apache.hadoop.hbase.testclassification.SmallTests; @@ -65,7 +65,7 @@ public class TestPrefixTree { private final HBaseTestingUtility testUtil = new HBaseTestingUtility(); - private HRegion region; + private Region region; @Before public void setUp() throws Exception { @@ -86,21 +86,21 @@ public class TestPrefixTree { @Test public void testHBASE11728() throws Exception { Put put = new Put(Bytes.toBytes("a-b-0-0")); - put.add(fam, qual1, Bytes.toBytes("c1-value")); + put.addColumn(fam, qual1, Bytes.toBytes("c1-value")); region.put(put); put = new Put(row1_bytes); - put.add(fam, qual1, Bytes.toBytes("c1-value")); + put.addColumn(fam, qual1, Bytes.toBytes("c1-value")); region.put(put); put = new Put(row2_bytes); - put.add(fam, qual2, Bytes.toBytes("c2-value")); + put.addColumn(fam, qual2, Bytes.toBytes("c2-value")); region.put(put); put = new Put(row3_bytes); - put.add(fam, qual2, Bytes.toBytes("c2-value-2")); + put.addColumn(fam, qual2, Bytes.toBytes("c2-value-2")); region.put(put); put = new Put(row4_bytes); - put.add(fam, qual2, Bytes.toBytes("c2-value-3")); + put.addColumn(fam, qual2, Bytes.toBytes("c2-value-3")); region.put(put); - region.flushcache(true); + region.flush(true); String[] rows = new String[3]; rows[0] = row1; rows[1] = row2; @@ -135,9 +135,7 @@ public class TestPrefixTree { scan.setStopRow(Bytes.toBytes("a-b-A-1:")); scanner = region.getScanner(scan); for (int i = 1; i < 3; i++) { - // assertEquals(i < 2, scanner.next(cells)); - scanner.next(cells); - System.out.println(Result.create(cells)); + assertEquals(i < 2, scanner.next(cells)); CellScanner cellScanner = Result.create(cells).cellScanner(); while (cellScanner.advance()) { assertEquals(rows[i], Bytes.toString(cellScanner.current().getRowArray(), cellScanner @@ -176,13 +174,14 @@ public class TestPrefixTree { @Test public void testHBASE12817() throws IOException { for (int i = 0; i < 100; i++) { - region.put(new Put(Bytes.toBytes("obj" + (2900 + i))).add(fam, qual1, Bytes.toBytes(i))); + region + .put(new Put(Bytes.toBytes("obj" + (2900 + i))).addColumn(fam, qual1, Bytes.toBytes(i))); } - region.put(new Put(Bytes.toBytes("obj299")).add(fam, qual1, Bytes.toBytes("whatever"))); - region.put(new Put(Bytes.toBytes("obj29")).add(fam, qual1, Bytes.toBytes("whatever"))); - region.put(new Put(Bytes.toBytes("obj2")).add(fam, qual1, Bytes.toBytes("whatever"))); - region.put(new Put(Bytes.toBytes("obj3")).add(fam, qual1, Bytes.toBytes("whatever"))); - region.flushcache(true); + region.put(new Put(Bytes.toBytes("obj299")).addColumn(fam, qual1, Bytes.toBytes("whatever"))); + region.put(new Put(Bytes.toBytes("obj29")).addColumn(fam, qual1, Bytes.toBytes("whatever"))); + region.put(new Put(Bytes.toBytes("obj2")).addColumn(fam, qual1, Bytes.toBytes("whatever"))); + region.put(new Put(Bytes.toBytes("obj3")).addColumn(fam, qual1, Bytes.toBytes("whatever"))); + region.flush(true); Scan scan = new Scan(Bytes.toBytes("obj29995")); RegionScanner scanner = region.getScanner(scan); List cells = new ArrayList(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java index 5ef8cf0864d..b0a2ba29efc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CacheTestUtils.java @@ -247,11 +247,11 @@ public class CacheTestUtils { assertTrue(toBeTested.getStats().getEvictedCount() > 0); } - private static class ByteArrayCacheable implements Cacheable { + public static class ByteArrayCacheable implements Cacheable { - static final CacheableDeserializer blockDeserializer = + static final CacheableDeserializer blockDeserializer = new CacheableDeserializer() { - + @Override public Cacheable deserialize(ByteBuffer b) throws IOException { int len = b.getInt(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java index c5fcc3c1126..ce78a37a80a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java @@ -283,9 +283,9 @@ public class TestCacheConfig { // TODO: Assert sizes allocated are right and proportions. LruBlockCache lbc = (LruBlockCache)cc.getBlockCache(); assertEquals(lruExpectedSize, lbc.getMaxSize()); - BucketCache bc = lbc.getVictimHandler(); + BlockCache bc = lbc.getVictimHandler(); // getMaxSize comes back in bytes but we specified size in MB - assertEquals(bcExpectedSize, bc.getMaxSize()); + assertEquals(bcExpectedSize, ((BucketCache) bc).getMaxSize()); // Test the L1+L2 deploy works as we'd expect with blocks evicted from L1 going to L2. long initialL1BlockCount = lbc.getBlockCount(); long initialL2BlockCount = bc.getBlockCount(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java index b13c076806b..0622f55f008 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java @@ -20,8 +20,8 @@ package org.apache.hadoop.hbase.io.hfile; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertTrue; import java.io.IOException; @@ -29,7 +29,6 @@ import java.util.ArrayList; import java.util.Collection; import java.util.EnumMap; import java.util.List; -import java.util.Map; import java.util.Random; import org.apache.commons.logging.Log; @@ -37,7 +36,6 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; @@ -51,6 +49,7 @@ import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.StoreFile; import org.apache.hadoop.hbase.testclassification.IOTests; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -59,6 +58,7 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ChecksumType; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.junit.After; +import org.junit.AfterClass; import org.junit.Before; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -66,6 +66,8 @@ import org.junit.runner.RunWith; import org.junit.runners.Parameterized; import org.junit.runners.Parameterized.Parameters; +import com.google.common.collect.Lists; + /** * Tests {@link HFile} cache-on-write functionality for the following block * types: data blocks, non-root index blocks, and Bloom filter blocks. @@ -170,7 +172,7 @@ public class TestCacheOnWrite { this.blockCache = blockCache; testDescription = "[cacheOnWrite=" + cowType + ", compress=" + compress + ", encoderType=" + encoderType + ", cacheCompressedData=" + cacheCompressedData + "]"; - System.out.println(testDescription); + LOG.info(testDescription); } private static List getBlockCaches() throws IOException { @@ -185,10 +187,10 @@ public class TestCacheOnWrite { // bucket cache FileSystem.get(conf).mkdirs(TEST_UTIL.getDataTestDir()); - int[] bucketSizes = {INDEX_BLOCK_SIZE, DATA_BLOCK_SIZE, BLOOM_BLOCK_SIZE, 64 * 1024 }; + int[] bucketSizes = + { INDEX_BLOCK_SIZE, DATA_BLOCK_SIZE, BLOOM_BLOCK_SIZE, 64 * 1024, 128 * 1024 }; BlockCache bucketcache = - new BucketCache("file:" + TEST_UTIL.getDataTestDir() + "/bucket.data", - 128 * 1024 * 1024, 64 * 1024, bucketSizes, 5, 64 * 100, null); + new BucketCache("offheap", 128 * 1024 * 1024, 64 * 1024, bucketSizes, 5, 64 * 100, null); blockcaches.add(bucketcache); return blockcaches; } @@ -201,7 +203,8 @@ public class TestCacheOnWrite { for (Compression.Algorithm compress : HBaseTestingUtility.COMPRESSION_ALGORITHMS) { for (BlockEncoderTestType encoderType : BlockEncoderTestType.values()) { for (boolean cacheCompressedData : new boolean[] { false, true }) { - cowTypes.add(new Object[] { cowType, compress, encoderType, cacheCompressedData, blockache}); + cowTypes.add(new Object[] { cowType, compress, encoderType, cacheCompressedData, + blockache }); } } } @@ -210,47 +213,65 @@ public class TestCacheOnWrite { return cowTypes; } + private void clearBlockCache(BlockCache blockCache) throws InterruptedException { + if (blockCache instanceof LruBlockCache) { + ((LruBlockCache) blockCache).clearCache(); + } else { + // BucketCache may not return all cached blocks(blocks in write queue), so check it here. + for (int clearCount = 0; blockCache.getBlockCount() > 0; clearCount++) { + if (clearCount > 0) { + LOG.warn("clear block cache " + blockCache + " " + clearCount + " times, " + + blockCache.getBlockCount() + " blocks remaining"); + Thread.sleep(10); + } + for (CachedBlock block : Lists.newArrayList(blockCache)) { + BlockCacheKey key = new BlockCacheKey(block.getFilename(), block.getOffset()); + // CombinedBucketCache may need evict two times. + for (int evictCount = 0; blockCache.evictBlock(key); evictCount++) { + if (evictCount > 1) { + LOG.warn("evict block " + block + " in " + blockCache + " " + evictCount + + " times, maybe a bug here"); + } + } + } + } + } + } + @Before public void setUp() throws IOException { conf = TEST_UTIL.getConfiguration(); this.conf.set("dfs.datanode.data.dir.perm", "700"); - conf.setInt(HFile.FORMAT_VERSION_KEY, HFile.MAX_FORMAT_VERSION); conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, INDEX_BLOCK_SIZE); conf.setInt(BloomFilterFactory.IO_STOREFILE_BLOOM_BLOCK_SIZE, BLOOM_BLOCK_SIZE); conf.setBoolean(CacheConfig.CACHE_DATA_BLOCKS_COMPRESSED_KEY, cacheCompressedData); cowType.modifyConf(conf); fs = HFileSystem.get(conf); + CacheConfig.GLOBAL_BLOCK_CACHE_INSTANCE = blockCache; cacheConf = new CacheConfig(blockCache, true, true, cowType.shouldBeCached(BlockType.DATA), cowType.shouldBeCached(BlockType.LEAF_INDEX), - cowType.shouldBeCached(BlockType.BLOOM_CHUNK), false, cacheCompressedData, true, false); + cowType.shouldBeCached(BlockType.BLOOM_CHUNK), false, cacheCompressedData, false, false); } @After - public void tearDown() { - cacheConf = new CacheConfig(conf); - blockCache = cacheConf.getBlockCache(); + public void tearDown() throws IOException, InterruptedException { + clearBlockCache(blockCache); } - @Test - public void testStoreFileCacheOnWrite() throws IOException { - testStoreFileCacheOnWriteInternals(false); - testStoreFileCacheOnWriteInternals(true); + @AfterClass + public static void afterClass() throws IOException { + TEST_UTIL.cleanupTestDir(); } - protected void testStoreFileCacheOnWriteInternals(boolean useTags) throws IOException { + private void testStoreFileCacheOnWriteInternals(boolean useTags) throws IOException { writeStoreFile(useTags); readStoreFile(useTags); } private void readStoreFile(boolean useTags) throws IOException { - AbstractHFileReader reader; - if (useTags) { - reader = (HFileReaderV3) HFile.createReader(fs, storeFilePath, cacheConf, conf); - } else { - reader = (HFileReaderV2) HFile.createReader(fs, storeFilePath, cacheConf, conf); - } + HFile.Reader reader = HFile.createReader(fs, storeFilePath, cacheConf, conf); LOG.info("HFile information: " + reader); HFileContext meta = new HFileContextBuilder().withCompression(compress) .withBytesPerCheckSum(CKBYTES).withChecksumType(ChecksumType.NULL) @@ -323,15 +344,15 @@ public class TestCacheOnWrite { encoderType.encode ? BlockType.ENCODED_DATA : BlockType.DATA; if (useTags) { assertEquals("{" + cachedDataBlockType - + "=1550, LEAF_INDEX=173, BLOOM_CHUNK=9, INTERMEDIATE_INDEX=20}", countByType); + + "=2663, LEAF_INDEX=297, BLOOM_CHUNK=9, INTERMEDIATE_INDEX=34}", countByType); } else { assertEquals("{" + cachedDataBlockType - + "=1379, LEAF_INDEX=154, BLOOM_CHUNK=9, INTERMEDIATE_INDEX=18}", countByType); + + "=2498, LEAF_INDEX=278, BLOOM_CHUNK=9, INTERMEDIATE_INDEX=31}", countByType); } // iterate all the keyvalue from hfile while (scanner.next()) { - Cell cell = scanner.getKeyValue(); + scanner.getKeyValue(); } reader.close(); } @@ -341,23 +362,16 @@ public class TestCacheOnWrite { // Let's make half of KVs puts. return KeyValue.Type.Put; } else { - KeyValue.Type keyType = - KeyValue.Type.values()[1 + rand.nextInt(NUM_VALID_KEY_TYPES)]; - if (keyType == KeyValue.Type.Minimum || keyType == KeyValue.Type.Maximum) - { - throw new RuntimeException("Generated an invalid key type: " + keyType - + ". " + "Probably the layout of KeyValue.Type has changed."); + KeyValue.Type keyType = KeyValue.Type.values()[1 + rand.nextInt(NUM_VALID_KEY_TYPES)]; + if (keyType == KeyValue.Type.Minimum || keyType == KeyValue.Type.Maximum) { + throw new RuntimeException("Generated an invalid key type: " + keyType + ". " + + "Probably the layout of KeyValue.Type has changed."); } return keyType; } } - public void writeStoreFile(boolean useTags) throws IOException { - if(useTags) { - TEST_UTIL.getConfiguration().setInt("hfile.format.version", 3); - } else { - TEST_UTIL.getConfiguration().setInt("hfile.format.version", 2); - } + private void writeStoreFile(boolean useTags) throws IOException { Path storeFileParentDir = new Path(TEST_UTIL.getDataTestDir(), "test_cache_on_write"); HFileContext meta = new HFileContextBuilder().withCompression(compress) @@ -368,12 +382,11 @@ public class TestCacheOnWrite { .withOutputDir(storeFileParentDir).withComparator(KeyValue.COMPARATOR) .withFileContext(meta) .withBloomType(BLOOM_TYPE).withMaxKeyCount(NUM_KV).build(); - - final int rowLen = 32; + byte[] cf = Bytes.toBytes("fam"); for (int i = 0; i < NUM_KV; ++i) { - byte[] k = TestHFileWriterV2.randomOrderedKey(rand, i); - byte[] v = TestHFileWriterV2.randomValue(rand); - int cfLen = rand.nextInt(k.length - rowLen + 1); + byte[] row = TestHFileWriterV2.randomOrderedKey(rand, i); + byte[] qualifier = TestHFileWriterV2.randomRowOrQualifier(rand); + byte[] value = TestHFileWriterV2.randomValue(rand); KeyValue kv; if(useTags) { Tag t = new Tag((byte) 1, "visibility"); @@ -381,21 +394,13 @@ public class TestCacheOnWrite { tagList.add(t); Tag[] tags = new Tag[1]; tags[0] = t; - kv = new KeyValue( - k, 0, rowLen, - k, rowLen, cfLen, - k, rowLen + cfLen, k.length - rowLen - cfLen, - rand.nextLong(), - generateKeyType(rand), - v, 0, v.length, tagList); + kv = + new KeyValue(row, 0, row.length, cf, 0, cf.length, qualifier, 0, qualifier.length, + rand.nextLong(), generateKeyType(rand), value, 0, value.length, tagList); } else { - kv = new KeyValue( - k, 0, rowLen, - k, rowLen, cfLen, - k, rowLen + cfLen, k.length - rowLen - cfLen, - rand.nextLong(), - generateKeyType(rand), - v, 0, v.length); + kv = + new KeyValue(row, 0, row.length, cf, 0, cf.length, qualifier, 0, qualifier.length, + rand.nextLong(), generateKeyType(rand), value, 0, value.length); } sfw.append(kv); } @@ -404,18 +409,8 @@ public class TestCacheOnWrite { storeFilePath = sfw.getPath(); } - @Test - public void testNotCachingDataBlocksDuringCompaction() throws IOException { - testNotCachingDataBlocksDuringCompactionInternals(false); - testNotCachingDataBlocksDuringCompactionInternals(true); - } - - protected void testNotCachingDataBlocksDuringCompactionInternals(boolean useTags) throws IOException { - if (useTags) { - TEST_UTIL.getConfiguration().setInt("hfile.format.version", 3); - } else { - TEST_UTIL.getConfiguration().setInt("hfile.format.version", 2); - } + private void testNotCachingDataBlocksDuringCompactionInternals(boolean useTags) + throws IOException, InterruptedException { // TODO: need to change this test if we add a cache size threshold for // compactions, or if we implement some other kind of intelligent logic for // deciding what blocks to cache-on-write on compaction. @@ -423,7 +418,7 @@ public class TestCacheOnWrite { final String cf = "myCF"; final byte[] cfBytes = Bytes.toBytes(cf); final int maxVersions = 3; - HRegion region = TEST_UTIL.createTestRegion(table, + Region region = TEST_UTIL.createTestRegion(table, new HColumnDescriptor(cf) .setCompressionType(compress) .setBloomFilterType(BLOOM_TYPE) @@ -450,29 +445,36 @@ public class TestCacheOnWrite { HConstants.LATEST_TIMESTAMP, Bytes.toBytes(valueStr), tags); p.add(kv); } else { - p.add(cfBytes, Bytes.toBytes(qualStr), ts++, Bytes.toBytes(valueStr)); + p.addColumn(cfBytes, Bytes.toBytes(qualStr), ts++, Bytes.toBytes(valueStr)); } } } p.setDurability(Durability.ASYNC_WAL); region.put(p); } - region.flushcache(); + region.flush(true); } - LruBlockCache blockCache = - (LruBlockCache) new CacheConfig(conf).getBlockCache(); - blockCache.clearCache(); - assertEquals(0, blockCache.getBlockTypeCountsForTest().size()); - region.compactStores(); + clearBlockCache(blockCache); + assertEquals(0, blockCache.getBlockCount()); + region.compact(false); LOG.debug("compactStores() returned"); - Map blockTypesInCache = - blockCache.getBlockTypeCountsForTest(); - LOG.debug("Block types in cache: " + blockTypesInCache); - assertNull(blockTypesInCache.get(BlockType.ENCODED_DATA)); - assertNull(blockTypesInCache.get(BlockType.DATA)); - region.close(); - blockCache.shutdown(); + for (CachedBlock block: blockCache) { + assertNotEquals(BlockType.ENCODED_DATA, block.getBlockType()); + assertNotEquals(BlockType.DATA, block.getBlockType()); + } + ((HRegion)region).close(); + } + + @Test + public void testStoreFileCacheOnWrite() throws IOException { + testStoreFileCacheOnWriteInternals(false); + testStoreFileCacheOnWriteInternals(true); + } + + @Test + public void testNotCachingDataBlocksDuringCompaction() throws IOException, InterruptedException { + testNotCachingDataBlocksDuringCompactionInternals(false); + testNotCachingDataBlocksDuringCompactionInternals(true); } } - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestFixedFileTrailer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestFixedFileTrailer.java index 1b6731a5d5f..97784cba16e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestFixedFileTrailer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestFixedFileTrailer.java @@ -55,7 +55,7 @@ public class TestFixedFileTrailer { private static final int MAX_COMPARATOR_NAME_LENGTH = 128; /** - * The number of used fields by version. Indexed by version minus two. + * The number of used fields by version. Indexed by version minus two. * Min version that we support is V2 */ private static final int[] NUM_FIELDS_BY_VERSION = new int[] { 14, 15 }; @@ -89,8 +89,8 @@ public class TestFixedFileTrailer { @Test public void testTrailer() throws IOException { - FixedFileTrailer t = new FixedFileTrailer(version, - HFileReaderV2.PBUF_TRAILER_MINOR_VERSION); + FixedFileTrailer t = new FixedFileTrailer(version, + HFileReaderImpl.PBUF_TRAILER_MINOR_VERSION); t.setDataIndexCount(3); t.setEntryCount(((long) Integer.MAX_VALUE) + 1); @@ -122,8 +122,8 @@ public class TestFixedFileTrailer { // Finished writing, trying to read. { DataInputStream dis = new DataInputStream(bais); - FixedFileTrailer t2 = new FixedFileTrailer(version, - HFileReaderV2.PBUF_TRAILER_MINOR_VERSION); + FixedFileTrailer t2 = new FixedFileTrailer(version, + HFileReaderImpl.PBUF_TRAILER_MINOR_VERSION); t2.deserialize(dis); assertEquals(-1, bais.read()); // Ensure we have read everything. checkLoadedTrailer(version, t, t2); @@ -167,12 +167,12 @@ public class TestFixedFileTrailer { trailerStr.split(", ").length); assertEquals(trailerStr, t4.toString()); } - + @Test public void testTrailerForV2NonPBCompatibility() throws Exception { if (version == 2) { FixedFileTrailer t = new FixedFileTrailer(version, - HFileReaderV2.MINOR_VERSION_NO_CHECKSUM); + HFileReaderImpl.MINOR_VERSION_NO_CHECKSUM); t.setDataIndexCount(3); t.setEntryCount(((long) Integer.MAX_VALUE) + 1); t.setLastDataBlockOffset(291); @@ -199,7 +199,7 @@ public class TestFixedFileTrailer { { DataInputStream dis = new DataInputStream(bais); FixedFileTrailer t2 = new FixedFileTrailer(version, - HFileReaderV2.MINOR_VERSION_NO_CHECKSUM); + HFileReaderImpl.MINOR_VERSION_NO_CHECKSUM); t2.deserialize(dis); assertEquals(-1, bais.read()); // Ensure we have read everything. checkLoadedTrailer(version, t, t2); @@ -228,7 +228,7 @@ public class TestFixedFileTrailer { output.writeInt(FixedFileTrailer.materializeVersion(fft.getMajorVersion(), fft.getMinorVersion())); } - + private FixedFileTrailer readTrailer(Path trailerPath) throws IOException { FSDataInputStream fsdis = fs.open(trailerPath); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java index 2af3a6eaeef..cf2aca57eaa 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java @@ -32,7 +32,7 @@ import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.compress.Compression.Algorithm; import org.apache.hadoop.hbase.regionserver.BloomType; -import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.util.Bytes; import org.junit.Before; import org.junit.Test; @@ -82,8 +82,6 @@ public class TestForceCacheImportantBlocks { public static Collection parameters() { // HFile versions return Arrays.asList( - new Object[] { 2, true }, - new Object[] { 2, false }, new Object[] { 3, true }, new Object[] { 3, false } ); @@ -111,7 +109,7 @@ public class TestForceCacheImportantBlocks { setBloomFilterType(BLOOM_TYPE); hcd.setBlocksize(BLOCK_SIZE); hcd.setBlockCacheEnabled(cfCacheEnabled); - HRegion region = TEST_UTIL.createTestRegion(TABLE, hcd); + Region region = TEST_UTIL.createTestRegion(TABLE, hcd); BlockCache cache = region.getStore(hcd.getName()).getCacheConfig().getBlockCache(); CacheStats stats = cache.getStats(); writeTestData(region); @@ -128,7 +126,7 @@ public class TestForceCacheImportantBlocks { else assertTrue(stats.getMissCount() > missCount); } - private void writeTestData(HRegion region) throws IOException { + private void writeTestData(Region region) throws IOException { for (int i = 0; i < NUM_ROWS; ++i) { Put put = new Put(Bytes.toBytes("row" + i)); for (int j = 0; j < NUM_COLS_PER_ROW; ++j) { @@ -139,7 +137,7 @@ public class TestForceCacheImportantBlocks { } region.put(put); if ((i + 1) % ROWS_PER_HFILE == 0) { - region.flushcache(); + region.flush(true); } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java index 3855629781f..9e4b1c7fb1f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java @@ -246,7 +246,7 @@ public class TestHFile extends HBaseTestCase { FSDataOutputStream fout = createFSOutput(ncTFile); HFileContext meta = new HFileContextBuilder() .withBlockSize(minBlockSize) - .withCompression(AbstractHFileWriter.compressionByName(codec)) + .withCompression(HFileWriterImpl.compressionByName(codec)) .build(); Writer writer = HFile.getWriterFactory(conf, cacheConf) .withOutputStream(fout) @@ -339,7 +339,7 @@ public class TestHFile extends HBaseTestCase { Path mFile = new Path(ROOT_DIR, "meta.hfile"); FSDataOutputStream fout = createFSOutput(mFile); HFileContext meta = new HFileContextBuilder() - .withCompression(AbstractHFileWriter.compressionByName(compress)) + .withCompression(HFileWriterImpl.compressionByName(compress)) .withBlockSize(minBlockSize).build(); Writer writer = HFile.getWriterFactory(conf, cacheConf) .withOutputStream(fout) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java index 939c0191a1e..0ee9d1495a2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java @@ -590,7 +590,7 @@ public class TestHFileBlockIndex { } // Manually compute the mid-key and validate it. - HFileReaderV2 reader2 = (HFileReaderV2) reader; + HFile.Reader reader2 = reader; HFileBlock.FSReader fsReader = reader2.getUncachedBlockReader(); HFileBlock.BlockIterator iter = fsReader.blockRange(0, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileEncryption.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileEncryption.java index 0cb3c3cfdf2..2d821ae48bc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileEncryption.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileEncryption.java @@ -70,7 +70,9 @@ public class TestHFileEncryption { fs = FileSystem.get(conf); cryptoContext = Encryption.newContext(conf); - Cipher aes = Encryption.getCipher(conf, "AES"); + String algorithm = + conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); + Cipher aes = Encryption.getCipher(conf, algorithm); assertNotNull(aes); cryptoContext.setCipher(aes); byte[] key = new byte[aes.getKeyLength()]; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileInlineToRootChunkConversion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileInlineToRootChunkConversion.java index c0683f8b2a3..ab811f1db11 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileInlineToRootChunkConversion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileInlineToRootChunkConversion.java @@ -55,8 +55,7 @@ public class TestHFileInlineToRootChunkConversion { CacheConfig cacheConf = new CacheConfig(conf); conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, maxChunkSize); HFileContext context = new HFileContextBuilder().withBlockSize(16).build(); - HFileWriterV2 hfw = - (HFileWriterV2) new HFileWriterV2.WriterFactoryV2(conf, cacheConf) + HFile.Writer hfw = new HFileWriterFactory(conf, cacheConf) .withFileContext(context) .withPath(fs, hfPath).create(); List keys = new ArrayList(); @@ -78,7 +77,7 @@ public class TestHFileInlineToRootChunkConversion { } hfw.close(); - HFileReaderV2 reader = (HFileReaderV2) HFile.createReader(fs, hfPath, cacheConf, conf); + HFile.Reader reader = HFile.createReader(fs, hfPath, cacheConf, conf); // Scanner doesn't do Cells yet. Fix. HFileScanner scanner = reader.getScanner(true, true); for (int i = 0; i < keys.size(); ++i) { @@ -86,4 +85,4 @@ public class TestHFileInlineToRootChunkConversion { } reader.close(); } -} +} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java index 76a8200b82e..26adb492d2a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java @@ -130,7 +130,7 @@ public class TestHFileSeek extends TestCase { try { HFileContext context = new HFileContextBuilder() .withBlockSize(options.minBlockSize) - .withCompression(AbstractHFileWriter.compressionByName(options.compress)) + .withCompression(HFileWriterImpl.compressionByName(options.compress)) .build(); Writer writer = HFile.getWriterFactoryNoCache(conf) .withOutputStream(fout) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV2.java index 42e918a1523..ca063bc1577 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV2.java @@ -56,7 +56,7 @@ import org.junit.experimental.categories.Category; /** * Testing writing a version 2 {@link HFile}. This is a low-level test written - * during the development of {@link HFileWriterV2}. + * during the development of {@link HFileWriterImpl}. */ @Category({IOTests.class, SmallTests.class}) public class TestHFileWriterV2 { @@ -99,8 +99,7 @@ public class TestHFileWriterV2 { .withBlockSize(4096) .withCompression(compressAlgo) .build(); - HFileWriterV2 writer = (HFileWriterV2) - new HFileWriterV2.WriterFactoryV2(conf, new CacheConfig(conf)) + HFile.Writer writer = new HFileWriterFactory(conf, new CacheConfig(conf)) .withPath(fs, hfilePath) .withFileContext(context) .create(); @@ -136,7 +135,6 @@ public class TestHFileWriterV2 { FixedFileTrailer trailer = FixedFileTrailer.readFromStream(fsdis, fileSize); - assertEquals(2, trailer.getMajorVersion()); assertEquals(entryCount, trailer.getEntryCount()); HFileContext meta = new HFileContextBuilder() @@ -177,8 +175,7 @@ public class TestHFileWriterV2 { // File info FileInfo fileInfo = new FileInfo(); fileInfo.read(blockIter.nextBlockWithBlockType(BlockType.FILE_INFO).getByteStream()); - byte [] keyValueFormatVersion = fileInfo.get( - HFileWriterV2.KEY_VALUE_VERSION); + byte [] keyValueFormatVersion = fileInfo.get(HFileWriterImpl.KEY_VALUE_VERSION); boolean includeMemstoreTS = keyValueFormatVersion != null && Bytes.toInt(keyValueFormatVersion) > 0; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.java index f96e8ef757d..2ca92731e49 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.java @@ -60,8 +60,7 @@ import org.junit.runners.Parameterized; import org.junit.runners.Parameterized.Parameters; /** - * Testing writing a version 3 {@link HFile}. This is a low-level test written - * during the development of {@link HFileWriterV3}. + * Testing writing a version 3 {@link HFile}. */ @RunWith(Parameterized.class) @Category({IOTests.class, SmallTests.class}) @@ -120,8 +119,7 @@ public class TestHFileWriterV3 { .withBlockSize(4096) .withIncludesTags(useTags) .withCompression(compressAlgo).build(); - HFileWriterV3 writer = (HFileWriterV3) - new HFileWriterV3.WriterFactoryV3(conf, new CacheConfig(conf)) + HFile.Writer writer = new HFileWriterFactory(conf, new CacheConfig(conf)) .withPath(fs, hfilePath) .withFileContext(context) .withComparator(KeyValue.COMPARATOR) @@ -206,8 +204,7 @@ public class TestHFileWriterV3 { // File info FileInfo fileInfo = new FileInfo(); fileInfo.read(blockIter.nextBlockWithBlockType(BlockType.FILE_INFO).getByteStream()); - byte [] keyValueFormatVersion = fileInfo.get( - HFileWriterV3.KEY_VALUE_VERSION); + byte [] keyValueFormatVersion = fileInfo.get(HFileWriterImpl.KEY_VALUE_VERSION); boolean includeMemstoreTS = keyValueFormatVersion != null && Bytes.toInt(keyValueFormatVersion) > 0; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLazyDataBlockDecompression.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLazyDataBlockDecompression.java index 2fd36847b37..00674179ff9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLazyDataBlockDecompression.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLazyDataBlockDecompression.java @@ -89,8 +89,7 @@ public class TestLazyDataBlockDecompression { */ private static void writeHFile(Configuration conf, CacheConfig cc, FileSystem fs, Path path, HFileContext cxt, int entryCount) throws IOException { - HFileWriterV2 writer = (HFileWriterV2) - new HFileWriterV2.WriterFactoryV2(conf, cc) + HFile.Writer writer = new HFileWriterFactory(conf, cc) .withPath(fs, path) .withFileContext(cxt) .create(); @@ -118,7 +117,7 @@ public class TestLazyDataBlockDecompression { long fileSize = fs.getFileStatus(path).getLen(); FixedFileTrailer trailer = FixedFileTrailer.readFromStream(fsdis.getStream(false), fileSize); - HFileReaderV2 reader = new HFileReaderV2(path, trailer, fsdis, fileSize, cacheConfig, + HFile.Reader reader = new HFileReaderImpl(path, trailer, fsdis, fileSize, cacheConfig, fsdis.getHfs(), conf); reader.loadFileInfo(); long offset = trailer.getFirstDataBlockOffset(), diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java index 4ceafb44898..6a126160528 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java @@ -55,7 +55,6 @@ public class TestPrefetch { @Before public void setUp() throws IOException { conf = TEST_UTIL.getConfiguration(); - conf.setInt(HFile.FORMAT_VERSION_KEY, 3); conf.setBoolean(CacheConfig.PREFETCH_BLOCKS_ON_OPEN_KEY, true); fs = HFileSystem.get(conf); CacheConfig.blockCacheDisabled = false; @@ -70,10 +69,9 @@ public class TestPrefetch { private void readStoreFile(Path storeFilePath) throws Exception { // Open the file - HFileReaderV2 reader = (HFileReaderV2) HFile.createReader(fs, - storeFilePath, cacheConf, conf); + HFile.Reader reader = HFile.createReader(fs, storeFilePath, cacheConf, conf); - while (!((HFileReaderV3)reader).prefetchComplete()) { + while (!reader.prefetchComplete()) { // Sleep for a bit Thread.sleep(1000); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java index 3a0fdf78531..9d7de02bb2f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java @@ -37,7 +37,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; /** - * Test {@link HFileScanner#reseekTo(byte[])} + * Test {@link HFileScanner#reseekTo(org.apache.hadoop.hbase.Cell)} */ @Category({IOTests.class, SmallTests.class}) public class TestReseekTo { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingKeyRange.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingKeyRange.java index e8f6c1beae1..7584cf23cb4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingKeyRange.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingKeyRange.java @@ -32,14 +32,14 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.testclassification.IOTests; -import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.regionserver.BloomType; -import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.InternalScanner; +import org.apache.hadoop.hbase.regionserver.Region; +import org.apache.hadoop.hbase.testclassification.IOTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; import org.junit.AfterClass; import org.junit.Test; @@ -99,7 +99,7 @@ public class TestScannerSelectionUsingKeyRange { HTableDescriptor htd = new HTableDescriptor(TABLE); htd.addFamily(hcd); HRegionInfo info = new HRegionInfo(TABLE); - HRegion region = HBaseTestingUtility.createRegionAndWAL(info, TEST_UTIL.getDataTestDir(), conf, + Region region = HBaseTestingUtility.createRegionAndWAL(info, TEST_UTIL.getDataTestDir(), conf, htd); for (int iFile = 0; iFile < NUM_FILES; ++iFile) { @@ -111,7 +111,7 @@ public class TestScannerSelectionUsingKeyRange { } region.put(put); } - region.flushcache(); + region.flush(true); } Scan scan = new Scan(Bytes.toBytes("aaa"), Bytes.toBytes("aaz")); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java index 1c426e4ad45..d5f4bcd202c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java @@ -28,16 +28,16 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.InternalScanner; +import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.testclassification.IOTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; @@ -106,10 +106,8 @@ public class TestScannerSelectionUsingTTL { HTableDescriptor htd = new HTableDescriptor(TABLE); htd.addFamily(hcd); HRegionInfo info = new HRegionInfo(TABLE); - HRegion region = - HBaseTestingUtility.createRegionAndWAL(info, - TEST_UTIL.getDataTestDir(info.getEncodedName()), - conf, htd); + Region region = HBaseTestingUtility.createRegionAndWAL(info, + TEST_UTIL.getDataTestDir(info.getEncodedName()), conf, htd); long ts = EnvironmentEdgeManager.currentTime(); long version = 0; //make sure each new set of Put's have a new ts @@ -127,7 +125,7 @@ public class TestScannerSelectionUsingTTL { } region.put(put); } - region.flushcache(); + region.flush(true); version++; } @@ -155,7 +153,7 @@ public class TestScannerSelectionUsingTTL { HStore store = (HStore)region.getStore(FAMILY_BYTES); store.compactRecentForTestingAssumingDefaultPolicy(totalNumFiles); } else { - region.compactStores(); + region.compact(false); } HBaseTestingUtility.closeRegionAndWAL(region); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java index b9a126f8441..69bc09d2fd9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java @@ -73,11 +73,6 @@ public class TestSeekTo extends HBaseTestCase { Path makeNewFile(TagUsage tagUsage) throws IOException { Path ncTFile = new Path(testDir, "basic.hfile"); - if (tagUsage != TagUsage.NO_TAG) { - conf.setInt("hfile.format.version", 3); - } else { - conf.setInt("hfile.format.version", 2); - } FSDataOutputStream fout = this.fs.create(ncTFile); int blocksize = toKV("a", tagUsage).getLength() * 3; HFileContext context = new HFileContextBuilder().withBlockSize(blocksize) @@ -142,7 +137,7 @@ public class TestSeekTo extends HBaseTestCase { @Test public void testSeekBeforeWithReSeekTo() throws Exception { - testSeekBeforeWithReSeekToInternals(TagUsage.NO_TAG); + testSeekBeforeInternals(TagUsage.NO_TAG); testSeekBeforeWithReSeekToInternals(TagUsage.ONLY_TAG); testSeekBeforeWithReSeekToInternals(TagUsage.PARTIAL_TAG); } @@ -232,7 +227,7 @@ public class TestSeekTo extends HBaseTestCase { @Test public void testSeekTo() throws Exception { - testSeekToInternals(TagUsage.NO_TAG); + testSeekBeforeInternals(TagUsage.NO_TAG); testSeekToInternals(TagUsage.ONLY_TAG); testSeekToInternals(TagUsage.PARTIAL_TAG); } @@ -262,7 +257,7 @@ public class TestSeekTo extends HBaseTestCase { @Test public void testBlockContainingKey() throws Exception { - testBlockContainingKeyInternals(TagUsage.NO_TAG); + testSeekBeforeInternals(TagUsage.NO_TAG); testBlockContainingKeyInternals(TagUsage.ONLY_TAG); testBlockContainingKeyInternals(TagUsage.PARTIAL_TAG); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java index d29be01cdcb..99f5657c883 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.io.hfile.bucket; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; import java.io.FileNotFoundException; import java.io.IOException; @@ -27,13 +28,14 @@ import java.util.Arrays; import java.util.List; import java.util.Random; -import org.apache.hadoop.hbase.testclassification.IOTests; -import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.io.hfile.BlockCacheKey; import org.apache.hadoop.hbase.io.hfile.CacheTestUtils; import org.apache.hadoop.hbase.io.hfile.Cacheable; import org.apache.hadoop.hbase.io.hfile.bucket.BucketAllocator.BucketSizeInfo; import org.apache.hadoop.hbase.io.hfile.bucket.BucketAllocator.IndexStatistics; +import org.apache.hadoop.hbase.testclassification.IOTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.IdLock; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -44,24 +46,23 @@ import org.junit.runners.Parameterized; /** * Basic test of BucketCache.Puts and gets. *

      - * Tests will ensure that blocks' data correctness under several threads - * concurrency + * Tests will ensure that blocks' data correctness under several threads concurrency */ @RunWith(Parameterized.class) -@Category({IOTests.class, SmallTests.class}) +@Category({ IOTests.class, SmallTests.class }) public class TestBucketCache { private static final Random RAND = new Random(); - @Parameterized.Parameters(name="{index}: blockSize={0}, bucketSizes={1}") + @Parameterized.Parameters(name = "{index}: blockSize={0}, bucketSizes={1}") public static Iterable data() { return Arrays.asList(new Object[][] { - { 8192, null }, // TODO: why is 8k the default blocksize for these tests? - { 16 * 1024, new int[] { - 2 * 1024 + 1024, 4 * 1024 + 1024, 8 * 1024 + 1024, 16 * 1024 + 1024, - 28 * 1024 + 1024, 32 * 1024 + 1024, 64 * 1024 + 1024, 96 * 1024 + 1024, - 128 * 1024 + 1024 } } - }); + { 8192, null }, // TODO: why is 8k the default blocksize for these tests? + { + 16 * 1024, + new int[] { 2 * 1024 + 1024, 4 * 1024 + 1024, 8 * 1024 + 1024, 16 * 1024 + 1024, + 28 * 1024 + 1024, 32 * 1024 + 1024, 64 * 1024 + 1024, 96 * 1024 + 1024, + 128 * 1024 + 1024 } } }); } @Parameterized.Parameter(0) @@ -76,7 +77,7 @@ public class TestBucketCache { final int BLOCK_SIZE = CACHE_SIZE / NUM_BLOCKS; final int NUM_THREADS = 1000; final int NUM_QUERIES = 10000; - + final long capacitySize = 32 * 1024 * 1024; final int writeThreads = BucketCache.DEFAULT_WRITER_THREADS; final int writerQLen = BucketCache.DEFAULT_WRITER_QUEUE_ITEMS; @@ -86,16 +87,16 @@ public class TestBucketCache { private class MockedBucketCache extends BucketCache { public MockedBucketCache(String ioEngineName, long capacity, int blockSize, int[] bucketSizes, - int writerThreads, int writerQLen, String persistencePath) - throws FileNotFoundException, IOException { + int writerThreads, int writerQLen, String persistencePath) throws FileNotFoundException, + IOException { super(ioEngineName, capacity, blockSize, bucketSizes, writerThreads, writerQLen, - persistencePath); + persistencePath); super.wait_when_cache = true; } @Override - public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, - boolean inMemory, boolean cacheDataInL1) { + public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory, + boolean cacheDataInL1) { if (super.getBlock(cacheKey, true, false, true) != null) { throw new RuntimeException("Cached an already cached block"); } @@ -113,8 +114,9 @@ public class TestBucketCache { @Before public void setup() throws FileNotFoundException, IOException { - cache = new MockedBucketCache(ioEngineName, capacitySize, constructedBlockSize, - constructedBlockSizes, writeThreads, writerQLen, persistencePath); + cache = + new MockedBucketCache(ioEngineName, capacitySize, constructedBlockSize, + constructedBlockSizes, writeThreads, writerQLen, persistencePath); } @After @@ -142,7 +144,7 @@ public class TestBucketCache { // Fill the allocated extents by choosing a random blocksize. Continues selecting blocks until // the cache is completely filled. List tmp = new ArrayList(BLOCKSIZES); - for (int i = 0; !full; i++) { + while (!full) { Integer blockSize = null; try { blockSize = randFrom(tmp); @@ -156,9 +158,7 @@ public class TestBucketCache { for (Integer blockSize : BLOCKSIZES) { BucketSizeInfo bucketSizeInfo = mAllocator.roundUpToBucketSizeInfo(blockSize); IndexStatistics indexStatistics = bucketSizeInfo.statistics(); - assertEquals( - "unexpected freeCount for " + bucketSizeInfo, - 0, indexStatistics.freeCount()); + assertEquals("unexpected freeCount for " + bucketSizeInfo, 0, indexStatistics.freeCount()); } for (long offset : allocations) { @@ -182,4 +182,41 @@ public class TestBucketCache { cache.stopWriterThreads(); CacheTestUtils.testHeapSizeChanges(cache, BLOCK_SIZE); } -} \ No newline at end of file + + // BucketCache.cacheBlock is async, it first adds block to ramCache and writeQueue, then writer + // threads will flush it to the bucket and put reference entry in backingMap. + private void cacheAndWaitUntilFlushedToBucket(BucketCache cache, BlockCacheKey cacheKey, + Cacheable block) throws InterruptedException { + cache.cacheBlock(cacheKey, block); + while (!cache.backingMap.containsKey(cacheKey)) { + Thread.sleep(100); + } + } + + @Test + public void testMemoryLeak() throws Exception { + final BlockCacheKey cacheKey = new BlockCacheKey("dummy", 1L); + cacheAndWaitUntilFlushedToBucket(cache, cacheKey, new CacheTestUtils.ByteArrayCacheable( + new byte[10])); + long lockId = cache.backingMap.get(cacheKey).offset(); + IdLock.Entry lockEntry = cache.offsetLock.getLockEntry(lockId); + Thread evictThread = new Thread("evict-block") { + + @Override + public void run() { + cache.evictBlock(cacheKey); + } + + }; + evictThread.start(); + cache.offsetLock.waitForWaiters(lockId, 1); + cache.blockEvicted(cacheKey, cache.backingMap.remove(cacheKey), true); + cacheAndWaitUntilFlushedToBucket(cache, cacheKey, new CacheTestUtils.ByteArrayCacheable( + new byte[10])); + cache.offsetLock.releaseLockEntry(lockEntry); + evictThread.join(); + assertEquals(1L, cache.getBlockCount()); + assertTrue(cache.getCurrentSize() > 0L); + assertTrue("We should have a block!", cache.iterator().hasNext()); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java new file mode 100644 index 00000000000..32eb9f6d61a --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java @@ -0,0 +1,269 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.ipc; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.mockito.Matchers.anyObject; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.verify; +import static org.mockito.internal.verification.VerificationModeFactory.times; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellScanner; +import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.ipc.protobuf.generated.TestProtos.EchoRequestProto; +import org.apache.hadoop.hbase.ipc.protobuf.generated.TestProtos.EchoResponseProto; +import org.apache.hadoop.hbase.ipc.protobuf.generated.TestProtos.EmptyRequestProto; +import org.apache.hadoop.hbase.ipc.protobuf.generated.TestProtos.EmptyResponseProto; +import org.apache.hadoop.hbase.ipc.protobuf.generated.TestRpcServiceProtos; +import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler; +import org.apache.hadoop.hbase.security.User; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.io.compress.GzipCodec; +import org.apache.hadoop.util.StringUtils; +import org.junit.Test; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Lists; +import com.google.protobuf.BlockingService; +import com.google.protobuf.Descriptors.MethodDescriptor; +import com.google.protobuf.Message; +import com.google.protobuf.RpcController; +import com.google.protobuf.ServiceException; + +/** + * Some basic ipc tests. + */ +public abstract class AbstractTestIPC { + + private static final Log LOG = LogFactory.getLog(AbstractTestIPC.class); + + private static byte[] CELL_BYTES = Bytes.toBytes("xyz"); + private static KeyValue CELL = new KeyValue(CELL_BYTES, CELL_BYTES, CELL_BYTES, CELL_BYTES); + static byte[] BIG_CELL_BYTES = new byte[10 * 1024]; + static KeyValue BIG_CELL = new KeyValue(CELL_BYTES, CELL_BYTES, CELL_BYTES, BIG_CELL_BYTES); + static final Configuration CONF = HBaseConfiguration.create(); + // We are using the test TestRpcServiceProtos generated classes and Service because they are + // available and basic with methods like 'echo', and ping. Below we make a blocking service + // by passing in implementation of blocking interface. We use this service in all tests that + // follow. + static final BlockingService SERVICE = + TestRpcServiceProtos.TestProtobufRpcProto + .newReflectiveBlockingService(new TestRpcServiceProtos.TestProtobufRpcProto.BlockingInterface() { + + @Override + public EmptyResponseProto ping(RpcController controller, EmptyRequestProto request) + throws ServiceException { + return null; + } + + @Override + public EmptyResponseProto error(RpcController controller, EmptyRequestProto request) + throws ServiceException { + return null; + } + + @Override + public EchoResponseProto echo(RpcController controller, EchoRequestProto request) + throws ServiceException { + if (controller instanceof PayloadCarryingRpcController) { + PayloadCarryingRpcController pcrc = (PayloadCarryingRpcController) controller; + // If cells, scan them to check we are able to iterate what we were given and since + // this is + // an echo, just put them back on the controller creating a new block. Tests our + // block + // building. + CellScanner cellScanner = pcrc.cellScanner(); + List list = null; + if (cellScanner != null) { + list = new ArrayList(); + try { + while (cellScanner.advance()) { + list.add(cellScanner.current()); + } + } catch (IOException e) { + throw new ServiceException(e); + } + } + cellScanner = CellUtil.createCellScanner(list); + ((PayloadCarryingRpcController) controller).setCellScanner(cellScanner); + } + return EchoResponseProto.newBuilder().setMessage(request.getMessage()).build(); + } + }); + + /** + * Instance of server. We actually don't do anything speical in here so could just use + * HBaseRpcServer directly. + */ + static class TestRpcServer extends RpcServer { + + TestRpcServer() throws IOException { + this(new FifoRpcScheduler(CONF, 1)); + } + + TestRpcServer(RpcScheduler scheduler) throws IOException { + super(null, "testRpcServer", Lists + .newArrayList(new BlockingServiceAndInterface(SERVICE, null)), new InetSocketAddress( + "localhost", 0), CONF, scheduler); + } + + @Override + public Pair call(BlockingService service, MethodDescriptor md, + Message param, CellScanner cellScanner, long receiveTime, MonitoredRPCHandler status) + throws IOException { + return super.call(service, md, param, cellScanner, receiveTime, status); + } + } + + protected abstract AbstractRpcClient createRpcClientNoCodec(Configuration conf); + + /** + * Ensure we do not HAVE TO HAVE a codec. + * @throws InterruptedException + * @throws IOException + */ + @Test + public void testNoCodec() throws InterruptedException, IOException { + Configuration conf = HBaseConfiguration.create(); + AbstractRpcClient client = createRpcClientNoCodec(conf); + TestRpcServer rpcServer = new TestRpcServer(); + try { + rpcServer.start(); + InetSocketAddress address = rpcServer.getListenerAddress(); + MethodDescriptor md = SERVICE.getDescriptorForType().findMethodByName("echo"); + final String message = "hello"; + EchoRequestProto param = EchoRequestProto.newBuilder().setMessage(message).build(); + Pair r = + client.call(null, md, param, md.getOutputType().toProto(), User.getCurrent(), address); + assertTrue(r.getSecond() == null); + // Silly assertion that the message is in the returned pb. + assertTrue(r.getFirst().toString().contains(message)); + } finally { + client.close(); + rpcServer.stop(); + } + } + + protected abstract AbstractRpcClient createRpcClient(Configuration conf); + + /** + * It is hard to verify the compression is actually happening under the wraps. Hope that if + * unsupported, we'll get an exception out of some time (meantime, have to trace it manually to + * confirm that compression is happening down in the client and server). + * @throws IOException + * @throws InterruptedException + * @throws SecurityException + * @throws NoSuchMethodException + */ + @Test + public void testCompressCellBlock() throws IOException, InterruptedException, SecurityException, + NoSuchMethodException, ServiceException { + Configuration conf = new Configuration(HBaseConfiguration.create()); + conf.set("hbase.client.rpc.compressor", GzipCodec.class.getCanonicalName()); + List cells = new ArrayList(); + int count = 3; + for (int i = 0; i < count; i++) { + cells.add(CELL); + } + AbstractRpcClient client = createRpcClient(conf); + TestRpcServer rpcServer = new TestRpcServer(); + try { + rpcServer.start(); + InetSocketAddress address = rpcServer.getListenerAddress(); + MethodDescriptor md = SERVICE.getDescriptorForType().findMethodByName("echo"); + EchoRequestProto param = EchoRequestProto.newBuilder().setMessage("hello").build(); + + PayloadCarryingRpcController pcrc = + new PayloadCarryingRpcController(CellUtil.createCellScanner(cells)); + Pair r = + client.call(pcrc, md, param, md.getOutputType().toProto(), User.getCurrent(), address); + int index = 0; + while (r.getSecond().advance()) { + assertTrue(CELL.equals(r.getSecond().current())); + index++; + } + assertEquals(count, index); + } finally { + client.close(); + rpcServer.stop(); + } + } + + protected abstract AbstractRpcClient createRpcClientRTEDuringConnectionSetup(Configuration conf) + throws IOException; + + @Test + public void testRTEDuringConnectionSetup() throws Exception { + Configuration conf = HBaseConfiguration.create(); + TestRpcServer rpcServer = new TestRpcServer(); + AbstractRpcClient client = createRpcClientRTEDuringConnectionSetup(conf); + try { + rpcServer.start(); + InetSocketAddress address = rpcServer.getListenerAddress(); + MethodDescriptor md = SERVICE.getDescriptorForType().findMethodByName("echo"); + EchoRequestProto param = EchoRequestProto.newBuilder().setMessage("hello").build(); + client.call(null, md, param, null, User.getCurrent(), address); + fail("Expected an exception to have been thrown!"); + } catch (Exception e) { + LOG.info("Caught expected exception: " + e.toString()); + assertTrue(StringUtils.stringifyException(e).contains("Injected fault")); + } finally { + client.close(); + rpcServer.stop(); + } + } + + /** Tests that the rpc scheduler is called when requests arrive. */ + @Test + public void testRpcScheduler() throws IOException, InterruptedException { + RpcScheduler scheduler = spy(new FifoRpcScheduler(CONF, 1)); + RpcServer rpcServer = new TestRpcServer(scheduler); + verify(scheduler).init((RpcScheduler.Context) anyObject()); + AbstractRpcClient client = createRpcClient(CONF); + try { + rpcServer.start(); + verify(scheduler).start(); + MethodDescriptor md = SERVICE.getDescriptorForType().findMethodByName("echo"); + EchoRequestProto param = EchoRequestProto.newBuilder().setMessage("hello").build(); + for (int i = 0; i < 10; i++) { + client.call( + new PayloadCarryingRpcController( + CellUtil.createCellScanner(ImmutableList. of(CELL))), md, param, md + .getOutputType().toProto(), User.getCurrent(), rpcServer.getListenerAddress()); + } + verify(scheduler, times(10)).dispatch((CallRunner) anyObject()); + } finally { + rpcServer.stop(); + verify(scheduler).stop(); + } + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestAsyncIPC.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestAsyncIPC.java new file mode 100644 index 00000000000..69a79ad842d --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestAsyncIPC.java @@ -0,0 +1,298 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.ipc; + +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelInitializer; +import io.netty.channel.ChannelOutboundHandlerAdapter; +import io.netty.channel.ChannelPromise; +import io.netty.channel.epoll.EpollEventLoopGroup; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.channel.socket.SocketChannel; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.CellScannable; +import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.Waiter; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.RowMutations; +import org.apache.hadoop.hbase.codec.Codec; +import org.apache.hadoop.hbase.ipc.protobuf.generated.TestProtos.EchoRequestProto; +import org.apache.hadoop.hbase.protobuf.RequestConverter; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionAction; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; +import org.apache.hadoop.hbase.security.User; +import org.apache.hadoop.hbase.testclassification.RPCTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.util.StringUtils; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +import com.google.protobuf.ByteString; +import com.google.protobuf.Descriptors.MethodDescriptor; +import com.google.protobuf.Message; +import com.google.protobuf.RpcCallback; +import com.google.protobuf.RpcChannel; + +@RunWith(Parameterized.class) +@Category({ RPCTests.class, SmallTests.class }) +public class TestAsyncIPC extends AbstractTestIPC { + + private static final Log LOG = LogFactory.getLog(TestAsyncIPC.class); + + private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + + @Parameters + public static Collection parameters() { + List paramList = new ArrayList(); + paramList.add(new Object[] { false, false }); + paramList.add(new Object[] { false, true }); + paramList.add(new Object[] { true, false }); + paramList.add(new Object[] { true, true }); + return paramList; + } + + private final boolean useNativeTransport; + + private final boolean useGlobalEventLoopGroup; + + public TestAsyncIPC(boolean useNativeTransport, boolean useGlobalEventLoopGroup) { + this.useNativeTransport = useNativeTransport; + this.useGlobalEventLoopGroup = useGlobalEventLoopGroup; + } + + private void setConf(Configuration conf) { + conf.setBoolean(AsyncRpcClient.USE_NATIVE_TRANSPORT, useNativeTransport); + conf.setBoolean(AsyncRpcClient.USE_NATIVE_TRANSPORT, useGlobalEventLoopGroup); + if (useGlobalEventLoopGroup && AsyncRpcClient.GLOBAL_EVENT_LOOP_GROUP != null) { + if (useNativeTransport + && !(AsyncRpcClient.GLOBAL_EVENT_LOOP_GROUP.getFirst() instanceof EpollEventLoopGroup) + || (!useNativeTransport + && !(AsyncRpcClient.GLOBAL_EVENT_LOOP_GROUP.getFirst() instanceof NioEventLoopGroup))) { + AsyncRpcClient.GLOBAL_EVENT_LOOP_GROUP.getFirst().shutdownGracefully(); + AsyncRpcClient.GLOBAL_EVENT_LOOP_GROUP = null; + } + } + } + + @Override + protected AsyncRpcClient createRpcClientNoCodec(Configuration conf) { + setConf(conf); + return new AsyncRpcClient(conf, HConstants.CLUSTER_ID_DEFAULT, null) { + + @Override + Codec getCodec() { + return null; + } + + }; + } + + @Override + protected AsyncRpcClient createRpcClient(Configuration conf) { + setConf(conf); + return new AsyncRpcClient(conf, HConstants.CLUSTER_ID_DEFAULT, null); + } + + @Override + protected AsyncRpcClient createRpcClientRTEDuringConnectionSetup(Configuration conf) { + setConf(conf); + return new AsyncRpcClient(conf, HConstants.CLUSTER_ID_DEFAULT, null, + new ChannelInitializer() { + + @Override + protected void initChannel(SocketChannel ch) throws Exception { + ch.pipeline().addFirst(new ChannelOutboundHandlerAdapter() { + @Override + public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) + throws Exception { + promise.setFailure(new RuntimeException("Injected fault")); + } + }); + } + }); + } + + @Test + public void testAsyncConnectionSetup() throws Exception { + TestRpcServer rpcServer = new TestRpcServer(); + AsyncRpcClient client = createRpcClient(CONF); + try { + rpcServer.start(); + InetSocketAddress address = rpcServer.getListenerAddress(); + MethodDescriptor md = SERVICE.getDescriptorForType().findMethodByName("echo"); + EchoRequestProto param = EchoRequestProto.newBuilder().setMessage("hello").build(); + + RpcChannel channel = + client.createRpcChannel(ServerName.valueOf(address.getHostName(), address.getPort(), + System.currentTimeMillis()), User.getCurrent(), 0); + + final AtomicBoolean done = new AtomicBoolean(false); + + channel.callMethod(md, new PayloadCarryingRpcController(), param, md.getOutputType() + .toProto(), new RpcCallback() { + @Override + public void run(Message parameter) { + done.set(true); + } + }); + + TEST_UTIL.waitFor(1000, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return done.get(); + } + }); + } finally { + client.close(); + rpcServer.stop(); + } + } + + @Test + public void testRTEDuringAsyncConnectionSetup() throws Exception { + TestRpcServer rpcServer = new TestRpcServer(); + AsyncRpcClient client = createRpcClientRTEDuringConnectionSetup(CONF); + try { + rpcServer.start(); + InetSocketAddress address = rpcServer.getListenerAddress(); + MethodDescriptor md = SERVICE.getDescriptorForType().findMethodByName("echo"); + EchoRequestProto param = EchoRequestProto.newBuilder().setMessage("hello").build(); + + RpcChannel channel = + client.createRpcChannel(ServerName.valueOf(address.getHostName(), address.getPort(), + System.currentTimeMillis()), User.getCurrent(), 0); + + final AtomicBoolean done = new AtomicBoolean(false); + + PayloadCarryingRpcController controller = new PayloadCarryingRpcController(); + controller.notifyOnFail(new RpcCallback() { + @Override + public void run(IOException e) { + done.set(true); + LOG.info("Caught expected exception: " + e.toString()); + assertTrue(StringUtils.stringifyException(e).contains("Injected fault")); + } + }); + + channel.callMethod(md, controller, param, md.getOutputType().toProto(), + new RpcCallback() { + @Override + public void run(Message parameter) { + done.set(true); + fail("Expected an exception to have been thrown!"); + } + }); + + TEST_UTIL.waitFor(1000, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return done.get(); + } + }); + } finally { + client.close(); + rpcServer.stop(); + } + } + + public static void main(String[] args) throws IOException, SecurityException, + NoSuchMethodException, InterruptedException { + if (args.length != 2) { + System.out.println("Usage: TestAsyncIPC "); + return; + } + // ((Log4JLogger)HBaseServer.LOG).getLogger().setLevel(Level.INFO); + // ((Log4JLogger)HBaseClient.LOG).getLogger().setLevel(Level.INFO); + int cycles = Integer.parseInt(args[0]); + int cellcount = Integer.parseInt(args[1]); + Configuration conf = HBaseConfiguration.create(); + TestRpcServer rpcServer = new TestRpcServer(); + MethodDescriptor md = SERVICE.getDescriptorForType().findMethodByName("echo"); + EchoRequestProto param = EchoRequestProto.newBuilder().setMessage("hello").build(); + AsyncRpcClient client = new AsyncRpcClient(conf, HConstants.CLUSTER_ID_DEFAULT, null); + KeyValue kv = BIG_CELL; + Put p = new Put(CellUtil.cloneRow(kv)); + for (int i = 0; i < cellcount; i++) { + p.add(kv); + } + RowMutations rm = new RowMutations(CellUtil.cloneRow(kv)); + rm.add(p); + try { + rpcServer.start(); + InetSocketAddress address = rpcServer.getListenerAddress(); + long startTime = System.currentTimeMillis(); + User user = User.getCurrent(); + for (int i = 0; i < cycles; i++) { + List cells = new ArrayList(); + // Message param = RequestConverter.buildMultiRequest(HConstants.EMPTY_BYTE_ARRAY, rm); + ClientProtos.RegionAction.Builder builder = + RequestConverter.buildNoDataRegionAction(HConstants.EMPTY_BYTE_ARRAY, rm, cells, + RegionAction.newBuilder(), ClientProtos.Action.newBuilder(), + MutationProto.newBuilder()); + builder.setRegion(RegionSpecifier + .newBuilder() + .setType(RegionSpecifierType.REGION_NAME) + .setValue( + ByteString.copyFrom(HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes()))); + if (i % 100000 == 0) { + LOG.info("" + i); + // Uncomment this for a thread dump every so often. + // ReflectionUtils.printThreadInfo(new PrintWriter(System.out), + // "Thread dump " + Thread.currentThread().getName()); + } + PayloadCarryingRpcController pcrc = + new PayloadCarryingRpcController(CellUtil.createCellScanner(cells)); + // Pair response = + client.call(pcrc, md, builder.build(), param, user, address); + /* + * int count = 0; while (p.getSecond().advance()) { count++; } assertEquals(cells.size(), + * count); + */ + } + LOG.info("Cycled " + cycles + " time(s) with " + cellcount + " cell(s) in " + + (System.currentTimeMillis() - startTime) + "ms"); + } finally { + client.close(); + rpcServer.stop(); + } + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestCallRunner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestCallRunner.java index be16529f276..6080bcaef36 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestCallRunner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestCallRunner.java @@ -35,7 +35,7 @@ public class TestCallRunner { Mockito.when(mockRpcServer.isStarted()).thenReturn(true); RpcServer.Call mockCall = Mockito.mock(RpcServer.Call.class); mockCall.connection = Mockito.mock(RpcServer.Connection.class); - CallRunner cr = new CallRunner(mockRpcServer, mockCall, new UserProvider()); + CallRunner cr = new CallRunner(mockRpcServer, mockCall); cr.run(); } } \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestGlobalEventLoopGroup.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestGlobalEventLoopGroup.java new file mode 100644 index 00000000000..60dbd1b2f0c --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestGlobalEventLoopGroup.java @@ -0,0 +1,54 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.ipc; + +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNotSame; +import static org.junit.Assert.assertSame; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.testclassification.RPCTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({ RPCTests.class, SmallTests.class }) +public class TestGlobalEventLoopGroup { + + @Test + public void test() { + Configuration conf = HBaseConfiguration.create(); + conf.setBoolean(AsyncRpcClient.USE_GLOBAL_EVENT_LOOP_GROUP, true); + AsyncRpcClient client = new AsyncRpcClient(conf, HConstants.CLUSTER_ID_DEFAULT, null); + assertNotNull(AsyncRpcClient.GLOBAL_EVENT_LOOP_GROUP); + AsyncRpcClient client1 = new AsyncRpcClient(conf, HConstants.CLUSTER_ID_DEFAULT, null); + assertSame(client.bootstrap.group(), client1.bootstrap.group()); + client1.close(); + assertFalse(client.bootstrap.group().isShuttingDown()); + + conf.setBoolean(AsyncRpcClient.USE_GLOBAL_EVENT_LOOP_GROUP, false); + AsyncRpcClient client2 = new AsyncRpcClient(conf, HConstants.CLUSTER_ID_DEFAULT, null); + assertNotSame(client.bootstrap.group(), client2.bootstrap.group()); + client2.close(); + + client.close(); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestIPC.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestIPC.java index 0933f528c7d..67e4e4f802e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestIPC.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestIPC.java @@ -1,76 +1,48 @@ /** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ package org.apache.hadoop.hbase.ipc; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; import static org.mockito.Matchers.anyInt; -import static org.mockito.Matchers.anyObject; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.verify; -import static org.mockito.internal.verification.VerificationModeFactory.times; import java.io.IOException; import java.net.InetSocketAddress; import java.net.Socket; import java.util.ArrayList; import java.util.List; -import java.util.concurrent.atomic.AtomicBoolean; import javax.net.SocketFactory; -import com.google.protobuf.BlockingRpcChannel; -import com.google.protobuf.RpcCallback; -import com.google.protobuf.RpcChannel; -import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.ChannelInitializer; -import io.netty.channel.ChannelOutboundHandlerAdapter; -import io.netty.channel.ChannelPromise; -import io.netty.channel.socket.SocketChannel; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellScannable; -import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValueUtil; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.Waiter; -import org.apache.hadoop.hbase.testclassification.RPCTests; -import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RowMutations; import org.apache.hadoop.hbase.codec.Codec; import org.apache.hadoop.hbase.ipc.protobuf.generated.TestProtos.EchoRequestProto; -import org.apache.hadoop.hbase.ipc.protobuf.generated.TestProtos.EchoResponseProto; -import org.apache.hadoop.hbase.ipc.protobuf.generated.TestProtos.EmptyRequestProto; -import org.apache.hadoop.hbase.ipc.protobuf.generated.TestProtos.EmptyResponseProto; -import org.apache.hadoop.hbase.ipc.protobuf.generated.TestRpcServiceProtos; -import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler; import org.apache.hadoop.hbase.protobuf.RequestConverter; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto; @@ -78,500 +50,55 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionAction; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; import org.apache.hadoop.hbase.security.User; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.Pair; -import org.apache.hadoop.io.compress.GzipCodec; +import org.apache.hadoop.hbase.testclassification.RPCTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.util.StringUtils; -import org.junit.Test; import org.junit.experimental.categories.Category; import org.mockito.Mockito; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.Lists; -import com.google.protobuf.BlockingService; import com.google.protobuf.ByteString; import com.google.protobuf.Descriptors.MethodDescriptor; -import com.google.protobuf.Message; -import com.google.protobuf.RpcController; -import com.google.protobuf.ServiceException; -/** - * Some basic ipc tests. - */ -@Category({RPCTests.class, SmallTests.class}) -public class TestIPC { - private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); +@Category({ RPCTests.class, SmallTests.class }) +public class TestIPC extends AbstractTestIPC { - public static final Log LOG = LogFactory.getLog(TestIPC.class); + private static final Log LOG = LogFactory.getLog(TestIPC.class); - static byte [] CELL_BYTES = Bytes.toBytes("xyz"); - static Cell CELL = new KeyValue(CELL_BYTES, CELL_BYTES, CELL_BYTES, CELL_BYTES); - static byte [] BIG_CELL_BYTES = new byte [10 * 1024]; - static Cell BIG_CELL = new KeyValue(CELL_BYTES, CELL_BYTES, CELL_BYTES, BIG_CELL_BYTES); - private final static Configuration CONF = HBaseConfiguration.create(); - // We are using the test TestRpcServiceProtos generated classes and Service because they are - // available and basic with methods like 'echo', and ping. Below we make a blocking service - // by passing in implementation of blocking interface. We use this service in all tests that - // follow. - private static final BlockingService SERVICE = - TestRpcServiceProtos.TestProtobufRpcProto.newReflectiveBlockingService( - new TestRpcServiceProtos.TestProtobufRpcProto.BlockingInterface() { - - @Override - public EmptyResponseProto ping(RpcController controller, - EmptyRequestProto request) throws ServiceException { - // TODO Auto-generated method stub - return null; - } - - @Override - public EmptyResponseProto error(RpcController controller, - EmptyRequestProto request) throws ServiceException { - // TODO Auto-generated method stub - return null; - } - - @Override - public EchoResponseProto echo(RpcController controller, EchoRequestProto request) - throws ServiceException { - if (controller instanceof PayloadCarryingRpcController) { - PayloadCarryingRpcController pcrc = (PayloadCarryingRpcController)controller; - // If cells, scan them to check we are able to iterate what we were given and since this is - // an echo, just put them back on the controller creating a new block. Tests our block - // building. - CellScanner cellScanner = pcrc.cellScanner(); - List list = null; - if (cellScanner != null) { - list = new ArrayList(); - try { - while(cellScanner.advance()) { - list.add(cellScanner.current()); - } - } catch (IOException e) { - throw new ServiceException(e); - } - } - cellScanner = CellUtil.createCellScanner(list); - ((PayloadCarryingRpcController)controller).setCellScanner(cellScanner); - } - return EchoResponseProto.newBuilder().setMessage(request.getMessage()).build(); - } - }); - - /** - * Instance of server. We actually don't do anything speical in here so could just use - * HBaseRpcServer directly. - */ - private static class TestRpcServer extends RpcServer { - - TestRpcServer() throws IOException { - this(new FifoRpcScheduler(CONF, 1)); - } - - TestRpcServer(RpcScheduler scheduler) throws IOException { - super(null, "testRpcServer", - Lists.newArrayList(new BlockingServiceAndInterface(SERVICE, null)), - new InetSocketAddress("localhost", 0), CONF, scheduler); - } - - @Override - public Pair call(BlockingService service, - MethodDescriptor md, Message param, CellScanner cellScanner, - long receiveTime, MonitoredRPCHandler status) throws IOException { - return super.call(service, md, param, cellScanner, receiveTime, status); - } - } - - /** - * Ensure we do not HAVE TO HAVE a codec. - * @throws InterruptedException - * @throws IOException - */ - @Test - public void testNoCodec() throws InterruptedException, IOException { - Configuration conf = HBaseConfiguration.create(); - RpcClientImpl client = new RpcClientImpl(conf, HConstants.CLUSTER_ID_DEFAULT) { + @Override + protected RpcClientImpl createRpcClientNoCodec(Configuration conf) { + return new RpcClientImpl(conf, HConstants.CLUSTER_ID_DEFAULT) { @Override Codec getCodec() { return null; } }; - TestRpcServer rpcServer = new TestRpcServer(); - try { - rpcServer.start(); - InetSocketAddress address = rpcServer.getListenerAddress(); - MethodDescriptor md = SERVICE.getDescriptorForType().findMethodByName("echo"); - final String message = "hello"; - EchoRequestProto param = EchoRequestProto.newBuilder().setMessage(message).build(); - Pair r = client.call(null, md, param, - md.getOutputType().toProto(), User.getCurrent(), address); - assertTrue(r.getSecond() == null); - // Silly assertion that the message is in the returned pb. - assertTrue(r.getFirst().toString().contains(message)); - } finally { - client.close(); - rpcServer.stop(); - } } - /** - * Ensure we do not HAVE TO HAVE a codec. - * - * @throws InterruptedException - * @throws IOException - */ - @Test public void testNoCodecAsync() throws InterruptedException, IOException, ServiceException { - Configuration conf = HBaseConfiguration.create(); - AsyncRpcClient client = new AsyncRpcClient(conf, HConstants.CLUSTER_ID_DEFAULT, null) { - @Override Codec getCodec() { - return null; - } - }; - TestRpcServer rpcServer = new TestRpcServer(); - try { - rpcServer.start(); - InetSocketAddress address = rpcServer.getListenerAddress(); - MethodDescriptor md = SERVICE.getDescriptorForType().findMethodByName("echo"); - final String message = "hello"; - EchoRequestProto param = EchoRequestProto.newBuilder().setMessage(message).build(); - - BlockingRpcChannel channel = client - .createBlockingRpcChannel(ServerName.valueOf(address.getHostName(), address.getPort(), - System.currentTimeMillis()), User.getCurrent(), 0); - - PayloadCarryingRpcController controller = new PayloadCarryingRpcController(); - Message response = - channel.callBlockingMethod(md, controller, param, md.getOutputType().toProto()); - - assertTrue(controller.cellScanner() == null); - // Silly assertion that the message is in the returned pb. - assertTrue(response.toString().contains(message)); - } finally { - client.close(); - rpcServer.stop(); - } + @Override + protected RpcClientImpl createRpcClient(Configuration conf) { + return new RpcClientImpl(conf, HConstants.CLUSTER_ID_DEFAULT); } - /** - * It is hard to verify the compression is actually happening under the wraps. Hope that if - * unsupported, we'll get an exception out of some time (meantime, have to trace it manually - * to confirm that compression is happening down in the client and server). - * @throws IOException - * @throws InterruptedException - * @throws SecurityException - * @throws NoSuchMethodException - */ - @Test - public void testCompressCellBlock() - throws IOException, InterruptedException, SecurityException, NoSuchMethodException, - ServiceException { - Configuration conf = new Configuration(HBaseConfiguration.create()); - conf.set("hbase.client.rpc.compressor", GzipCodec.class.getCanonicalName()); - doSimpleTest(new RpcClientImpl(conf, HConstants.CLUSTER_ID_DEFAULT)); - - // Another test for the async client - doAsyncSimpleTest(new AsyncRpcClient(conf, HConstants.CLUSTER_ID_DEFAULT, null)); - } - - private void doSimpleTest(final RpcClientImpl client) - throws InterruptedException, IOException { - TestRpcServer rpcServer = new TestRpcServer(); - List cells = new ArrayList(); - int count = 3; - for (int i = 0; i < count; i++) cells.add(CELL); - try { - rpcServer.start(); - InetSocketAddress address = rpcServer.getListenerAddress(); - MethodDescriptor md = SERVICE.getDescriptorForType().findMethodByName("echo"); - EchoRequestProto param = EchoRequestProto.newBuilder().setMessage("hello").build(); - - PayloadCarryingRpcController pcrc = - new PayloadCarryingRpcController(CellUtil.createCellScanner(cells)); - Pair r = client - .call(pcrc, md, param, md.getOutputType().toProto(), User.getCurrent(), address); - int index = 0; - while (r.getSecond().advance()) { - assertTrue(CELL.equals(r.getSecond().current())); - index++; - } - assertEquals(count, index); - } finally { - client.close(); - rpcServer.stop(); - } - } - - private void doAsyncSimpleTest(final AsyncRpcClient client) - throws InterruptedException, IOException, ServiceException { - TestRpcServer rpcServer = new TestRpcServer(); - List cells = new ArrayList(); - int count = 3; - for (int i = 0; i < count; i++) - cells.add(CELL); - try { - rpcServer.start(); - InetSocketAddress address = rpcServer.getListenerAddress(); - MethodDescriptor md = SERVICE.getDescriptorForType().findMethodByName("echo"); - EchoRequestProto param = EchoRequestProto.newBuilder().setMessage("hello").build(); - - PayloadCarryingRpcController pcrc = - new PayloadCarryingRpcController(CellUtil.createCellScanner(cells)); - - BlockingRpcChannel channel = client.createBlockingRpcChannel( - ServerName.valueOf(address.getHostName(), address.getPort(), System.currentTimeMillis()), - User.getCurrent(), 0); - - channel.callBlockingMethod(md, pcrc, param, md.getOutputType().toProto()); - - CellScanner cellScanner = pcrc.cellScanner(); - - int index = 0; - while (cellScanner.advance()) { - assertTrue(CELL.equals(cellScanner.current())); - index++; - } - assertEquals(count, index); - } finally { - client.close(); - rpcServer.stop(); - } - } - - @Test - public void testRTEDuringConnectionSetup() throws Exception { - Configuration conf = HBaseConfiguration.create(); + @Override + protected RpcClientImpl createRpcClientRTEDuringConnectionSetup(Configuration conf) + throws IOException { SocketFactory spyFactory = spy(NetUtils.getDefaultSocketFactory(conf)); Mockito.doAnswer(new Answer() { @Override public Socket answer(InvocationOnMock invocation) throws Throwable { - Socket s = spy((Socket)invocation.callRealMethod()); + Socket s = spy((Socket) invocation.callRealMethod()); doThrow(new RuntimeException("Injected fault")).when(s).setSoTimeout(anyInt()); return s; } }).when(spyFactory).createSocket(); - TestRpcServer rpcServer = new TestRpcServer(); - RpcClientImpl client = new RpcClientImpl(conf, HConstants.CLUSTER_ID_DEFAULT, spyFactory); - try { - rpcServer.start(); - InetSocketAddress address = rpcServer.getListenerAddress(); - MethodDescriptor md = SERVICE.getDescriptorForType().findMethodByName("echo"); - EchoRequestProto param = EchoRequestProto.newBuilder().setMessage("hello").build(); - client.call(null, md, param, null, User.getCurrent(), address); - fail("Expected an exception to have been thrown!"); - } catch (Exception e) { - LOG.info("Caught expected exception: " + e.toString()); - assertTrue(StringUtils.stringifyException(e).contains("Injected fault")); - } finally { - client.close(); - rpcServer.stop(); - } + return new RpcClientImpl(conf, HConstants.CLUSTER_ID_DEFAULT, spyFactory); } - @Test - public void testRTEDuringAsyncBlockingConnectionSetup() throws Exception { - Configuration conf = HBaseConfiguration.create(); - - TestRpcServer rpcServer = new TestRpcServer(); - AsyncRpcClient client = new AsyncRpcClient(conf, HConstants.CLUSTER_ID_DEFAULT, null, - new ChannelInitializer() { - - @Override protected void initChannel(SocketChannel ch) throws Exception { - ch.pipeline().addFirst(new ChannelOutboundHandlerAdapter() { - @Override - public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) - throws Exception { - promise.setFailure(new RuntimeException("Injected fault")); - } - }); - } - }); - try { - rpcServer.start(); - InetSocketAddress address = rpcServer.getListenerAddress(); - MethodDescriptor md = SERVICE.getDescriptorForType().findMethodByName("echo"); - EchoRequestProto param = EchoRequestProto.newBuilder().setMessage("hello").build(); - - BlockingRpcChannel channel = client.createBlockingRpcChannel( - ServerName.valueOf(address.getHostName(), address.getPort(), System.currentTimeMillis()), - User.getCurrent(), 0); - - channel.callBlockingMethod(md, new PayloadCarryingRpcController(), param, - md.getOutputType().toProto()); - - fail("Expected an exception to have been thrown!"); - } catch (Exception e) { - LOG.info("Caught expected exception: " + e.toString()); - assertTrue(StringUtils.stringifyException(e).contains("Injected fault")); - } finally { - client.close(); - rpcServer.stop(); - } - } - - - @Test - public void testRTEDuringAsyncConnectionSetup() throws Exception { - Configuration conf = HBaseConfiguration.create(); - - TestRpcServer rpcServer = new TestRpcServer(); - AsyncRpcClient client = new AsyncRpcClient(conf, HConstants.CLUSTER_ID_DEFAULT, null, - new ChannelInitializer() { - - @Override protected void initChannel(SocketChannel ch) throws Exception { - ch.pipeline().addFirst(new ChannelOutboundHandlerAdapter() { - @Override - public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) - throws Exception { - promise.setFailure(new RuntimeException("Injected fault")); - } - }); - } - }); - try { - rpcServer.start(); - InetSocketAddress address = rpcServer.getListenerAddress(); - MethodDescriptor md = SERVICE.getDescriptorForType().findMethodByName("echo"); - EchoRequestProto param = EchoRequestProto.newBuilder().setMessage("hello").build(); - - RpcChannel channel = client.createRpcChannel( - ServerName.valueOf(address.getHostName(), address.getPort(), System.currentTimeMillis()), - User.getCurrent(), 0); - - final AtomicBoolean done = new AtomicBoolean(false); - - PayloadCarryingRpcController controller = new PayloadCarryingRpcController(); - controller.notifyOnFail(new RpcCallback() { - @Override - public void run(IOException e) { - done.set(true); - LOG.info("Caught expected exception: " + e.toString()); - assertTrue(StringUtils.stringifyException(e).contains("Injected fault")); - } - }); - - channel.callMethod(md, controller, param, - md.getOutputType().toProto(), new RpcCallback() { - @Override - public void run(Message parameter) { - done.set(true); - fail("Expected an exception to have been thrown!"); - } - }); - - TEST_UTIL.waitFor(1000, new Waiter.Predicate() { - @Override - public boolean evaluate() throws Exception { - return done.get(); - } - }); - } finally { - client.close(); - rpcServer.stop(); - } - } - - @Test - public void testAsyncConnectionSetup() throws Exception { - Configuration conf = HBaseConfiguration.create(); - - TestRpcServer rpcServer = new TestRpcServer(); - AsyncRpcClient client = new AsyncRpcClient(conf, HConstants.CLUSTER_ID_DEFAULT, null); - try { - rpcServer.start(); - InetSocketAddress address = rpcServer.getListenerAddress(); - MethodDescriptor md = SERVICE.getDescriptorForType().findMethodByName("echo"); - EchoRequestProto param = EchoRequestProto.newBuilder().setMessage("hello").build(); - - RpcChannel channel = client.createRpcChannel( - ServerName.valueOf(address.getHostName(), address.getPort(), System.currentTimeMillis()), - User.getCurrent(), 0); - - final AtomicBoolean done = new AtomicBoolean(false); - - channel.callMethod(md, new PayloadCarryingRpcController(), param, - md.getOutputType().toProto(), new RpcCallback() { - @Override - public void run(Message parameter) { - done.set(true); - } - }); - - TEST_UTIL.waitFor(1000, new Waiter.Predicate() { - @Override - public boolean evaluate() throws Exception { - return done.get(); - } - }); - } finally { - client.close(); - rpcServer.stop(); - } - } - - /** Tests that the rpc scheduler is called when requests arrive. */ - @Test - public void testRpcScheduler() throws IOException, InterruptedException { - RpcScheduler scheduler = spy(new FifoRpcScheduler(CONF, 1)); - RpcServer rpcServer = new TestRpcServer(scheduler); - verify(scheduler).init((RpcScheduler.Context) anyObject()); - RpcClientImpl client = new RpcClientImpl(CONF, HConstants.CLUSTER_ID_DEFAULT); - try { - rpcServer.start(); - verify(scheduler).start(); - MethodDescriptor md = SERVICE.getDescriptorForType().findMethodByName("echo"); - EchoRequestProto param = EchoRequestProto.newBuilder().setMessage("hello").build(); - for (int i = 0; i < 10; i++) { - client.call( - new PayloadCarryingRpcController(CellUtil.createCellScanner(ImmutableList.of(CELL))), - md, param, md.getOutputType().toProto(), User.getCurrent(), - rpcServer.getListenerAddress()); - } - verify(scheduler, times(10)).dispatch((CallRunner) anyObject()); - } finally { - rpcServer.stop(); - verify(scheduler).stop(); - } - } - - /** - * Tests that the rpc scheduler is called when requests arrive. - */ - @Test - public void testRpcSchedulerAsync() - throws IOException, InterruptedException, ServiceException { - RpcScheduler scheduler = spy(new FifoRpcScheduler(CONF, 1)); - RpcServer rpcServer = new TestRpcServer(scheduler); - verify(scheduler).init((RpcScheduler.Context) anyObject()); - AbstractRpcClient client = new AsyncRpcClient(CONF, HConstants.CLUSTER_ID_DEFAULT, null); - try { - rpcServer.start(); - verify(scheduler).start(); - MethodDescriptor md = SERVICE.getDescriptorForType().findMethodByName("echo"); - EchoRequestProto param = EchoRequestProto.newBuilder().setMessage("hello").build(); - ServerName serverName = ServerName.valueOf(rpcServer.getListenerAddress().getHostName(), - rpcServer.getListenerAddress().getPort(), System.currentTimeMillis()); - - for (int i = 0; i < 10; i++) { - BlockingRpcChannel channel = client.createBlockingRpcChannel( - serverName, User.getCurrent(), 0); - - channel.callBlockingMethod(md, - new PayloadCarryingRpcController(CellUtil.createCellScanner(ImmutableList.of(CELL))), - param, md.getOutputType().toProto()); - } - verify(scheduler, times(10)).dispatch((CallRunner) anyObject()); - } finally { - rpcServer.stop(); - verify(scheduler).stop(); - } - } - - public static void main(String[] args) - throws IOException, SecurityException, NoSuchMethodException, InterruptedException { + public static void main(String[] args) throws IOException, SecurityException, + NoSuchMethodException, InterruptedException { if (args.length != 2) { System.out.println("Usage: TestIPC "); return; @@ -585,12 +112,12 @@ public class TestIPC { MethodDescriptor md = SERVICE.getDescriptorForType().findMethodByName("echo"); EchoRequestProto param = EchoRequestProto.newBuilder().setMessage("hello").build(); RpcClientImpl client = new RpcClientImpl(conf, HConstants.CLUSTER_ID_DEFAULT); - KeyValue kv = KeyValueUtil.ensureKeyValue(BIG_CELL); - Put p = new Put(kv.getRow()); + KeyValue kv = BIG_CELL; + Put p = new Put(CellUtil.cloneRow(kv)); for (int i = 0; i < cellcount; i++) { p.add(kv); } - RowMutations rm = new RowMutations(kv.getRow()); + RowMutations rm = new RowMutations(CellUtil.cloneRow(kv)); rm.add(p); try { rpcServer.start(); @@ -600,35 +127,36 @@ public class TestIPC { for (int i = 0; i < cycles; i++) { List cells = new ArrayList(); // Message param = RequestConverter.buildMultiRequest(HConstants.EMPTY_BYTE_ARRAY, rm); - ClientProtos.RegionAction.Builder builder = RequestConverter.buildNoDataRegionAction( - HConstants.EMPTY_BYTE_ARRAY, rm, cells, - RegionAction.newBuilder(), - ClientProtos.Action.newBuilder(), - MutationProto.newBuilder()); - builder.setRegion(RegionSpecifier.newBuilder().setType(RegionSpecifierType.REGION_NAME). - setValue(ByteString.copyFrom(HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes()))); + ClientProtos.RegionAction.Builder builder = + RequestConverter.buildNoDataRegionAction(HConstants.EMPTY_BYTE_ARRAY, rm, cells, + RegionAction.newBuilder(), ClientProtos.Action.newBuilder(), + MutationProto.newBuilder()); + builder.setRegion(RegionSpecifier + .newBuilder() + .setType(RegionSpecifierType.REGION_NAME) + .setValue( + ByteString.copyFrom(HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes()))); if (i % 100000 == 0) { LOG.info("" + i); // Uncomment this for a thread dump every so often. // ReflectionUtils.printThreadInfo(new PrintWriter(System.out), - // "Thread dump " + Thread.currentThread().getName()); + // "Thread dump " + Thread.currentThread().getName()); } PayloadCarryingRpcController pcrc = new PayloadCarryingRpcController(CellUtil.createCellScanner(cells)); - Pair response = - client.call(pcrc, md, builder.build(), param, user, address); + // Pair response = + client.call(pcrc, md, builder.build(), param, user, address); /* - int count = 0; - while (p.getSecond().advance()) { - count++; - } - assertEquals(cells.size(), count);*/ + * int count = 0; while (p.getSecond().advance()) { count++; } assertEquals(cells.size(), + * count); + */ } - LOG.info("Cycled " + cycles + " time(s) with " + cellcount + " cell(s) in " + - (System.currentTimeMillis() - startTime) + "ms"); + LOG.info("Cycled " + cycles + " time(s) with " + cellcount + " cell(s) in " + + (System.currentTimeMillis() - startTime) + "ms"); } finally { client.close(); rpcServer.stop(); } } + } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcMetrics.java index 443ec781ba3..c2b0344662f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcMetrics.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcMetrics.java @@ -99,8 +99,10 @@ public class TestRpcMetrics { mrpc.dequeuedCall(100); mrpc.processedCall(101); + mrpc.totalCall(102); HELPER.assertCounter("queueCallTime_NumOps", 1, serverSource); HELPER.assertCounter("processCallTime_NumOps", 1, serverSource); + HELPER.assertCounter("totalCallTime_NumOps", 1, serverSource); mrpc.sentBytes(103); mrpc.sentBytes(103); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapred/TestTableInputFormat.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapred/TestTableInputFormat.java index 9fb4eb8dd26..3497cdf52b3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapred/TestTableInputFormat.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapred/TestTableInputFormat.java @@ -35,15 +35,34 @@ import java.util.Map; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.*; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; +import org.apache.hadoop.hbase.filter.Filter; +import org.apache.hadoop.hbase.filter.RegexStringComparator; +import org.apache.hadoop.hbase.filter.RowFilter; +import org.apache.hadoop.hbase.mapreduce.MapreduceTestingShim; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.MapReduceTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.io.NullWritable; +import org.apache.hadoop.mapred.InputFormat; +import org.apache.hadoop.mapred.JobClient; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.JobConfigurable; +import org.apache.hadoop.mapred.MiniMRCluster; +import org.apache.hadoop.mapred.OutputCollector; +import org.apache.hadoop.mapred.Reporter; +import org.apache.hadoop.mapred.RunningJob; +import org.apache.hadoop.mapred.lib.NullOutputFormat; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; @@ -62,6 +81,7 @@ public class TestTableInputFormat { private static final Log LOG = LogFactory.getLog(TestTableInputFormat.class); private final static HBaseTestingUtility UTIL = new HBaseTestingUtility(); + private static MiniMRCluster mrCluster; static final byte[] FAMILY = Bytes.toBytes("family"); private static final byte[][] columns = new byte[][] { FAMILY }; @@ -69,10 +89,12 @@ public class TestTableInputFormat { @BeforeClass public static void beforeClass() throws Exception { UTIL.startMiniCluster(); + mrCluster = UTIL.startMiniMapReduceCluster(); } @AfterClass public static void afterClass() throws Exception { + UTIL.shutdownMiniMapReduceCluster(); UTIL.shutdownMiniCluster(); } @@ -91,12 +113,27 @@ public class TestTableInputFormat { * @throws IOException */ public static Table createTable(byte[] tableName) throws IOException { - Table table = UTIL.createTable(TableName.valueOf(tableName), new byte[][]{FAMILY}); + return createTable(tableName, new byte[][] { FAMILY }); + } + + /** + * Setup a table with two rows and values per column family. + * + * @param tableName + * @return + * @throws IOException + */ + public static Table createTable(byte[] tableName, byte[][] families) throws IOException { + Table table = UTIL.createTable(TableName.valueOf(tableName), families); Put p = new Put("aaa".getBytes()); - p.add(FAMILY, null, "value aaa".getBytes()); + for (byte[] family : families) { + p.add(family, null, "value aaa".getBytes()); + } table.put(p); p = new Put("bbb".getBytes()); - p.add(FAMILY, null, "value bbb".getBytes()); + for (byte[] family : families) { + p.add(family, null, "value bbb".getBytes()); + } table.put(p); return table; } @@ -151,46 +188,6 @@ public class TestTableInputFormat { assertFalse(more); } - /** - * Create table data and run tests on specified htable using the - * o.a.h.hbase.mapreduce API. - * - * @param table - * @throws IOException - * @throws InterruptedException - */ - static void runTestMapreduce(Table table) throws IOException, - InterruptedException { - org.apache.hadoop.hbase.mapreduce.TableRecordReaderImpl trr = - new org.apache.hadoop.hbase.mapreduce.TableRecordReaderImpl(); - Scan s = new Scan(); - s.setStartRow("aaa".getBytes()); - s.setStopRow("zzz".getBytes()); - s.addFamily(FAMILY); - trr.setScan(s); - trr.setHTable(table); - - trr.initialize(null, null); - Result r = new Result(); - ImmutableBytesWritable key = new ImmutableBytesWritable(); - - boolean more = trr.nextKeyValue(); - assertTrue(more); - key = trr.getCurrentKey(); - r = trr.getCurrentValue(); - checkResult(r, key, "aaa".getBytes(), "value aaa".getBytes()); - - more = trr.nextKeyValue(); - assertTrue(more); - key = trr.getCurrentKey(); - r = trr.getCurrentValue(); - checkResult(r, key, "bbb".getBytes(), "value bbb".getBytes()); - - // no more data - more = trr.nextKeyValue(); - assertFalse(more); - } - /** * Create a table that IOE's on first scanner next call * @@ -321,70 +318,148 @@ public class TestTableInputFormat { } /** - * Run test assuming no errors using newer mapreduce api - * - * @throws IOException - * @throws InterruptedException + * Verify the example we present in javadocs on TableInputFormatBase */ @Test - public void testTableRecordReaderMapreduce() throws IOException, - InterruptedException { - Table table = createTable("table1-mr".getBytes()); - runTestMapreduce(table); + public void testExtensionOfTableInputFormatBase() throws IOException { + LOG.info("testing use of an InputFormat taht extends InputFormatBase"); + final Table table = createTable(Bytes.toBytes("exampleTable"), + new byte[][] { Bytes.toBytes("columnA"), Bytes.toBytes("columnB") }); + testInputFormat(ExampleTIF.class); } - /** - * Run test assuming Scanner IOException failure using newer mapreduce api - * - * @throws IOException - * @throws InterruptedException - */ @Test - public void testTableRecordReaderScannerFailMapreduce() throws IOException, - InterruptedException { - Table htable = createIOEScannerTable("table2-mr".getBytes(), 1); - runTestMapreduce(htable); + public void testDeprecatedExtensionOfTableInputFormatBase() throws IOException { + LOG.info("testing use of an InputFormat taht extends InputFormatBase, " + + "as it was given in 0.98."); + final Table table = createTable(Bytes.toBytes("exampleDeprecatedTable"), + new byte[][] { Bytes.toBytes("columnA"), Bytes.toBytes("columnB") }); + testInputFormat(ExampleDeprecatedTIF.class); } - /** - * Run test assuming Scanner IOException failure using newer mapreduce api - * - * @throws IOException - * @throws InterruptedException - */ - @Test(expected = IOException.class) - public void testTableRecordReaderScannerFailMapreduceTwice() throws IOException, - InterruptedException { - Table htable = createIOEScannerTable("table3-mr".getBytes(), 2); - runTestMapreduce(htable); - } - - /** - * Run test assuming UnknownScannerException (which is a type of - * DoNotRetryIOException) using newer mapreduce api - * - * @throws InterruptedException - * @throws org.apache.hadoop.hbase.DoNotRetryIOException - */ @Test - public void testTableRecordReaderScannerTimeoutMapreduce() - throws IOException, InterruptedException { - Table htable = createDNRIOEScannerTable("table4-mr".getBytes(), 1); - runTestMapreduce(htable); + public void testJobConfigurableExtensionOfTableInputFormatBase() throws IOException { + LOG.info("testing use of an InputFormat taht extends InputFormatBase, " + + "using JobConfigurable."); + final Table table = createTable(Bytes.toBytes("exampleJobConfigurableTable"), + new byte[][] { Bytes.toBytes("columnA"), Bytes.toBytes("columnB") }); + testInputFormat(ExampleJobConfigurableTIF.class); } - /** - * Run test assuming UnknownScannerException (which is a type of - * DoNotRetryIOException) using newer mapreduce api - * - * @throws InterruptedException - * @throws org.apache.hadoop.hbase.DoNotRetryIOException - */ - @Test(expected = org.apache.hadoop.hbase.DoNotRetryIOException.class) - public void testTableRecordReaderScannerTimeoutMapreduceTwice() - throws IOException, InterruptedException { - Table htable = createDNRIOEScannerTable("table5-mr".getBytes(), 2); - runTestMapreduce(htable); + void testInputFormat(Class clazz) throws IOException { + final JobConf job = MapreduceTestingShim.getJobConf(mrCluster); + job.setInputFormat(clazz); + job.setOutputFormat(NullOutputFormat.class); + job.setMapperClass(ExampleVerifier.class); + job.setNumReduceTasks(0); + LOG.debug("submitting job."); + final RunningJob run = JobClient.runJob(job); + assertTrue("job failed!", run.isSuccessful()); + assertEquals("Saw the wrong number of instances of the filtered-for row.", 2, run.getCounters() + .findCounter(TestTableInputFormat.class.getName() + ":row", "aaa").getCounter()); + assertEquals("Saw any instances of the filtered out row.", 0, run.getCounters() + .findCounter(TestTableInputFormat.class.getName() + ":row", "bbb").getCounter()); + assertEquals("Saw the wrong number of instances of columnA.", 1, run.getCounters() + .findCounter(TestTableInputFormat.class.getName() + ":family", "columnA").getCounter()); + assertEquals("Saw the wrong number of instances of columnB.", 1, run.getCounters() + .findCounter(TestTableInputFormat.class.getName() + ":family", "columnB").getCounter()); + assertEquals("Saw the wrong count of values for the filtered-for row.", 2, run.getCounters() + .findCounter(TestTableInputFormat.class.getName() + ":value", "value aaa").getCounter()); + assertEquals("Saw the wrong count of values for the filtered-out row.", 0, run.getCounters() + .findCounter(TestTableInputFormat.class.getName() + ":value", "value bbb").getCounter()); + } + + public static class ExampleVerifier implements TableMap { + + @Override + public void configure(JobConf conf) { + } + + @Override + public void map(ImmutableBytesWritable key, Result value, + OutputCollector output, + Reporter reporter) throws IOException { + for (Cell cell : value.listCells()) { + reporter.getCounter(TestTableInputFormat.class.getName() + ":row", + Bytes.toString(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())) + .increment(1l); + reporter.getCounter(TestTableInputFormat.class.getName() + ":family", + Bytes.toString(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength())) + .increment(1l); + reporter.getCounter(TestTableInputFormat.class.getName() + ":value", + Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())) + .increment(1l); + } + } + + @Override + public void close() { + } + + } + + public static class ExampleDeprecatedTIF extends TableInputFormatBase implements JobConfigurable { + + @Override + public void configure(JobConf job) { + try { + Connection connection = ConnectionFactory.createConnection(job); + Table exampleTable = connection.getTable(TableName.valueOf("exampleDeprecatedTable")); + // mandatory + initializeTable(connection, exampleTable.getName()); + byte[][] inputColumns = new byte [][] { Bytes.toBytes("columnA"), + Bytes.toBytes("columnB") }; + // mandatory + setInputColumns(inputColumns); + Filter exampleFilter = new RowFilter(CompareOp.EQUAL, new RegexStringComparator("aa.*")); + // optional + setRowFilter(exampleFilter); + } catch (IOException exception) { + throw new RuntimeException("Failed to configure for job.", exception); + } + } + + } + + public static class ExampleJobConfigurableTIF extends ExampleTIF implements JobConfigurable { + + @Override + public void configure(JobConf job) { + try { + initialize(job); + } catch (IOException exception) { + throw new RuntimeException("Failed to initialize.", exception); + } + } + + @Override + protected void initialize(JobConf job) throws IOException { + initialize(job, "exampleJobConfigurableTable"); + } + } + + + public static class ExampleTIF extends TableInputFormatBase { + + @Override + protected void initialize(JobConf job) throws IOException { + initialize(job, "exampleTable"); + } + + protected void initialize(JobConf job, String table) throws IOException { + Connection connection = ConnectionFactory.createConnection(HBaseConfiguration.create(job)); + TableName tableName = TableName.valueOf(table); + // mandatory + initializeTable(connection, tableName); + byte[][] inputColumns = new byte [][] { Bytes.toBytes("columnA"), + Bytes.toBytes("columnB") }; + // mandatory + setInputColumns(inputColumns); + Filter exampleFilter = new RowFilter(CompareOp.EQUAL, new RegexStringComparator("aa.*")); + // optional + setRowFilter(exampleFilter); + } + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/MapreduceTestingShim.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/MapreduceTestingShim.java index dee42771ade..b080d7f4264 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/MapreduceTestingShim.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/MapreduceTestingShim.java @@ -52,6 +52,8 @@ abstract public class MapreduceTestingShim { abstract public JobContext newJobContext(Configuration jobConf) throws IOException; + + abstract public Job newJob(Configuration conf) throws IOException; abstract public JobConf obtainJobConf(MiniMRCluster cluster); @@ -66,6 +68,10 @@ abstract public class MapreduceTestingShim { return instance.obtainJobConf(cluster); } + public static Job createJob(Configuration conf) throws IOException { + return instance.newJob(conf); + } + public static String getMROutputDirProp() { return instance.obtainMROutputDirProp(); } @@ -84,6 +90,20 @@ abstract public class MapreduceTestingShim { "Failed to instantiate new JobContext(jobConf, new JobID())", e); } } + + @Override + public Job newJob(Configuration conf) throws IOException { + // Implementing: + // return new Job(conf); + Constructor c; + try { + c = Job.class.getConstructor(Configuration.class); + return c.newInstance(conf); + } catch (Exception e) { + throw new IllegalStateException( + "Failed to instantiate new Job(conf)", e); + } + } public JobConf obtainJobConf(MiniMRCluster cluster) { if (cluster == null) return null; @@ -110,11 +130,16 @@ abstract public class MapreduceTestingShim { private static class MapreduceV2Shim extends MapreduceTestingShim { public JobContext newJobContext(Configuration jobConf) { + return newJob(jobConf); + } + + @Override + public Job newJob(Configuration jobConf) { // Implementing: // return Job.getInstance(jobConf); try { Method m = Job.class.getMethod("getInstance", Configuration.class); - return (JobContext) m.invoke(null, jobConf); // static method, then arg + return (Job) m.invoke(null, jobConf); // static method, then arg } catch (Exception e) { e.printStackTrace(); throw new IllegalStateException( diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java index ecea98e4bb4..438266e2261 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java @@ -125,6 +125,7 @@ public class TestHFileOutputFormat { private int valLength; private static final int VALLEN_DEFAULT=10; private static final String VALLEN_CONF="randomkv.val.length"; + private static final byte [] QUALIFIER = Bytes.toBytes("data"); @Override protected void setup(Context context) throws IOException, @@ -159,8 +160,7 @@ public class TestHFileOutputFormat { ImmutableBytesWritable key = new ImmutableBytesWritable(keyBytes); for (byte[] family : TestHFileOutputFormat.FAMILIES) { - KeyValue kv = new KeyValue(keyBytes, family, - PerformanceEvaluation.QUALIFIER_NAME, valBytes); + KeyValue kv = new KeyValue(keyBytes, family, QUALIFIER, valBytes); context.write(key, kv); } } @@ -878,7 +878,7 @@ public class TestHFileOutputFormat { int taskId = context.getTaskAttemptID().getTaskID().getId(); assert taskId < Byte.MAX_VALUE : "Unit tests dont support > 127 tasks!"; - + final byte [] qualifier = Bytes.toBytes("data"); Random random = new Random(); for (int i = 0; i < numRows; i++) { @@ -887,8 +887,7 @@ public class TestHFileOutputFormat { ImmutableBytesWritable key = new ImmutableBytesWritable(keyBytes); for (byte[] family : families) { - KeyValue kv = new KeyValue(keyBytes, family, - PerformanceEvaluation.QUALIFIER_NAME, valBytes); + KeyValue kv = new KeyValue(keyBytes, family, qualifier, valBytes); writer.write(key, kv); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java index 0f60f3b61d2..67a6c0a6b85 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java @@ -124,6 +124,7 @@ public class TestHFileOutputFormat2 { private int valLength; private static final int VALLEN_DEFAULT=10; private static final String VALLEN_CONF="randomkv.val.length"; + private static final byte [] QUALIFIER = Bytes.toBytes("data"); @Override protected void setup(Context context) throws IOException, @@ -159,8 +160,7 @@ public class TestHFileOutputFormat2 { ImmutableBytesWritable key = new ImmutableBytesWritable(keyBytes); for (byte[] family : TestHFileOutputFormat2.FAMILIES) { - Cell kv = new KeyValue(keyBytes, family, - PerformanceEvaluation.QUALIFIER_NAME, valBytes); + Cell kv = new KeyValue(keyBytes, family, QUALIFIER, valBytes); context.write(key, kv); } } @@ -879,7 +879,7 @@ public class TestHFileOutputFormat2 { int taskId = context.getTaskAttemptID().getTaskID().getId(); assert taskId < Byte.MAX_VALUE : "Unit tests dont support > 127 tasks!"; - + final byte [] qualifier = Bytes.toBytes("data"); Random random = new Random(); for (int i = 0; i < numRows; i++) { @@ -888,8 +888,7 @@ public class TestHFileOutputFormat2 { ImmutableBytesWritable key = new ImmutableBytesWritable(keyBytes); for (byte[] family : families) { - Cell kv = new KeyValue(keyBytes, family, - PerformanceEvaluation.QUALIFIER_NAME, valBytes); + Cell kv = new KeyValue(keyBytes, family, qualifier, valBytes); writer.write(key, kv); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java index 935d462d05d..4e3baad0fa7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java @@ -41,14 +41,12 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java index 8bd67718ade..a64b7e05df1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java @@ -38,14 +38,11 @@ import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.MapReduceTests; import org.apache.hadoop.hbase.client.Durability; -import org.apache.hadoop.hbase.client.HBaseAdmin; -import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; @@ -54,7 +51,7 @@ import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver; import org.apache.hadoop.hbase.coprocessor.ObserverContext; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; -import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.util.Tool; @@ -249,11 +246,11 @@ public class TestImportTSVWithOperationAttributes implements Configurable { @Override public void prePut(ObserverContext e, Put put, WALEdit edit, Durability durability) throws IOException { - HRegion region = e.getEnvironment().getRegion(); + Region region = e.getEnvironment().getRegion(); if (!region.getRegionInfo().isMetaTable() && !region.getRegionInfo().getTable().isSystemTable()) { if (put.getAttribute(TEST_ATR_KEY) != null) { - LOG.debug("allow any put to happen " + region.getRegionNameAsString()); + LOG.debug("allow any put to happen " + region.getRegionInfo().getRegionNameAsString()); } else { e.bypass(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithTTLs.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithTTLs.java index a5cceb03f20..53bdf709daf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithTTLs.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithTTLs.java @@ -41,12 +41,11 @@ import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver; import org.apache.hadoop.hbase.coprocessor.ObserverContext; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; -import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; - import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; @@ -159,7 +158,7 @@ public class TestImportTSVWithTTLs implements Configurable { @Override public void prePut(ObserverContext e, Put put, WALEdit edit, Durability durability) throws IOException { - HRegion region = e.getEnvironment().getRegion(); + Region region = e.getEnvironment().getRegion(); if (!region.getRegionInfo().isMetaTable() && !region.getRegionInfo().getTable().isSystemTable()) { // The put carries the TTL attribute diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java index 813f374d51d..570f812a00d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java @@ -27,6 +27,7 @@ import java.io.IOException; import java.util.TreeMap; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -130,7 +131,7 @@ public class TestLoadIncrementalHFiles { new byte[][]{ Bytes.toBytes("fff"), Bytes.toBytes("zzz") }, }); } - + /** * Test loading into a column family that has a ROWCOL bloom filter. */ @@ -270,7 +271,7 @@ public class TestLoadIncrementalHFiles { file.getPath().getName() != "DONOTERASE"); } } - + util.deleteTable(tableName); } @@ -306,6 +307,76 @@ public class TestLoadIncrementalHFiles { } } + @Test(timeout = 60000) + public void testNonHfileFolderWithUnmatchedFamilyName() throws Exception { + testNonHfileFolder("testNonHfileFolderWithUnmatchedFamilyName", true); + } + + @Test(timeout = 60000) + public void testNonHfileFolder() throws Exception { + testNonHfileFolder("testNonHfileFolder", false); + } + + /** + * Write a random data file and a non-file in a dir with a valid family name + * but not part of the table families. we should we able to bulkload without + * getting the unmatched family exception. HBASE-13037/HBASE-13227 + */ + private void testNonHfileFolder(String tableName, boolean preCreateTable) throws Exception { + Path dir = util.getDataTestDirOnTestFS(tableName); + FileSystem fs = util.getTestFileSystem(); + dir = dir.makeQualified(fs); + + Path familyDir = new Path(dir, Bytes.toString(FAMILY)); + HFileTestUtil.createHFile(util.getConfiguration(), fs, new Path(familyDir, "hfile_0"), + FAMILY, QUALIFIER, Bytes.toBytes("begin"), Bytes.toBytes("end"), 500); + createRandomDataFile(fs, new Path(familyDir, "012356789"), 16 * 1024); + + final String NON_FAMILY_FOLDER = "_logs"; + Path nonFamilyDir = new Path(dir, NON_FAMILY_FOLDER); + fs.mkdirs(nonFamilyDir); + fs.mkdirs(new Path(nonFamilyDir, "non-file")); + createRandomDataFile(fs, new Path(nonFamilyDir, "012356789"), 16 * 1024); + + Table table = null; + try { + if (preCreateTable) { + table = util.createTable(TableName.valueOf(tableName), FAMILY); + } else { + table = util.getConnection().getTable(TableName.valueOf(tableName)); + } + + final String[] args = {dir.toString(), tableName}; + new LoadIncrementalHFiles(util.getConfiguration()).run(args); + assertEquals(500, util.countRows(table)); + } finally { + if (table != null) { + table.close(); + } + fs.delete(dir, true); + } + } + + private static void createRandomDataFile(FileSystem fs, Path path, int size) + throws IOException { + FSDataOutputStream stream = fs.create(path); + try { + byte[] data = new byte[1024]; + for (int i = 0; i < data.length; ++i) { + data[i] = (byte)(i & 0xff); + } + while (size >= data.length) { + stream.write(data, 0, data.length); + size -= data.length; + } + if (size > 0) { + stream.write(data, 0, size); + } + } finally { + stream.close(); + } + } + @Test(timeout = 60000) public void testSplitStoreFile() throws IOException { Path dir = util.getDataTestDirOnTestFS("testSplitHFile"); @@ -363,8 +434,8 @@ public class TestLoadIncrementalHFiles { * * Should be inferred as: * a-----------------k m-------------q r--------------t u---------x - * - * The output should be (m,r,u) + * + * The output should be (m,r,u) */ String first; @@ -372,7 +443,7 @@ public class TestLoadIncrementalHFiles { first = "a"; last = "e"; addStartEndKeysForTest(map, first.getBytes(), last.getBytes()); - + first = "r"; last = "s"; addStartEndKeysForTest(map, first.getBytes(), last.getBytes()); @@ -393,14 +464,14 @@ public class TestLoadIncrementalHFiles { first = "s"; last = "t"; addStartEndKeysForTest(map, first.getBytes(), last.getBytes()); - + first = "u"; last = "w"; addStartEndKeysForTest(map, first.getBytes(), last.getBytes()); byte[][] keysArray = LoadIncrementalHFiles.inferBoundaries(map); byte[][] compare = new byte[3][]; compare[0] = "m".getBytes(); - compare[1] = "r".getBytes(); + compare[1] = "r".getBytes(); compare[2] = "u".getBytes(); assertEquals(keysArray.length, 3); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormat.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormat.java new file mode 100644 index 00000000000..bc2d08f839d --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormat.java @@ -0,0 +1,487 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.mapreduce; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.mockito.Matchers.anyObject; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Map; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.*; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.ResultScanner; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; +import org.apache.hadoop.hbase.filter.Filter; +import org.apache.hadoop.hbase.filter.RegexStringComparator; +import org.apache.hadoop.hbase.filter.RowFilter; +import org.apache.hadoop.hbase.io.ImmutableBytesWritable; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.io.NullWritable; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.JobConfigurable; +import org.apache.hadoop.mapred.MiniMRCluster; +import org.apache.hadoop.mapreduce.InputFormat; +import org.apache.hadoop.mapreduce.Job; +import org.apache.hadoop.mapreduce.JobContext; +import org.apache.hadoop.mapreduce.Mapper.Context; +import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +/** + * This tests the TableInputFormat and its recovery semantics + * + */ +@Category(LargeTests.class) +public class TestTableInputFormat { + + private static final Log LOG = LogFactory.getLog(TestTableInputFormat.class); + + private final static HBaseTestingUtility UTIL = new HBaseTestingUtility(); + private static MiniMRCluster mrCluster; + static final byte[] FAMILY = Bytes.toBytes("family"); + + private static final byte[][] columns = new byte[][] { FAMILY }; + + @BeforeClass + public static void beforeClass() throws Exception { + UTIL.startMiniCluster(); + mrCluster = UTIL.startMiniMapReduceCluster(); + } + + @AfterClass + public static void afterClass() throws Exception { + UTIL.shutdownMiniMapReduceCluster(); + UTIL.shutdownMiniCluster(); + } + + @Before + public void before() throws IOException { + LOG.info("before"); + UTIL.ensureSomeRegionServersAvailable(1); + LOG.info("before done"); + } + + /** + * Setup a table with two rows and values. + * + * @param tableName + * @return + * @throws IOException + */ + public static Table createTable(byte[] tableName) throws IOException { + return createTable(tableName, new byte[][] { FAMILY }); + } + + /** + * Setup a table with two rows and values per column family. + * + * @param tableName + * @return + * @throws IOException + */ + public static Table createTable(byte[] tableName, byte[][] families) throws IOException { + Table table = UTIL.createTable(TableName.valueOf(tableName), families); + Put p = new Put("aaa".getBytes()); + for (byte[] family : families) { + p.add(family, null, "value aaa".getBytes()); + } + table.put(p); + p = new Put("bbb".getBytes()); + for (byte[] family : families) { + p.add(family, null, "value bbb".getBytes()); + } + table.put(p); + return table; + } + + /** + * Verify that the result and key have expected values. + * + * @param r + * @param key + * @param expectedKey + * @param expectedValue + * @return + */ + static boolean checkResult(Result r, ImmutableBytesWritable key, + byte[] expectedKey, byte[] expectedValue) { + assertEquals(0, key.compareTo(expectedKey)); + Map vals = r.getFamilyMap(FAMILY); + byte[] value = vals.values().iterator().next(); + assertTrue(Arrays.equals(value, expectedValue)); + return true; // if succeed + } + + /** + * Create table data and run tests on specified htable using the + * o.a.h.hbase.mapreduce API. + * + * @param table + * @throws IOException + * @throws InterruptedException + */ + static void runTestMapreduce(Table table) throws IOException, + InterruptedException { + org.apache.hadoop.hbase.mapreduce.TableRecordReaderImpl trr = + new org.apache.hadoop.hbase.mapreduce.TableRecordReaderImpl(); + Scan s = new Scan(); + s.setStartRow("aaa".getBytes()); + s.setStopRow("zzz".getBytes()); + s.addFamily(FAMILY); + trr.setScan(s); + trr.setHTable(table); + + trr.initialize(null, null); + Result r = new Result(); + ImmutableBytesWritable key = new ImmutableBytesWritable(); + + boolean more = trr.nextKeyValue(); + assertTrue(more); + key = trr.getCurrentKey(); + r = trr.getCurrentValue(); + checkResult(r, key, "aaa".getBytes(), "value aaa".getBytes()); + + more = trr.nextKeyValue(); + assertTrue(more); + key = trr.getCurrentKey(); + r = trr.getCurrentValue(); + checkResult(r, key, "bbb".getBytes(), "value bbb".getBytes()); + + // no more data + more = trr.nextKeyValue(); + assertFalse(more); + } + + /** + * Create a table that IOE's on first scanner next call + * + * @throws IOException + */ + static Table createIOEScannerTable(byte[] name, final int failCnt) + throws IOException { + // build up a mock scanner stuff to fail the first time + Answer a = new Answer() { + int cnt = 0; + + @Override + public ResultScanner answer(InvocationOnMock invocation) throws Throwable { + // first invocation return the busted mock scanner + if (cnt++ < failCnt) { + // create mock ResultScanner that always fails. + Scan scan = mock(Scan.class); + doReturn("bogus".getBytes()).when(scan).getStartRow(); // avoid npe + ResultScanner scanner = mock(ResultScanner.class); + // simulate TimeoutException / IOException + doThrow(new IOException("Injected exception")).when(scanner).next(); + return scanner; + } + + // otherwise return the real scanner. + return (ResultScanner) invocation.callRealMethod(); + } + }; + + Table htable = spy(createTable(name)); + doAnswer(a).when(htable).getScanner((Scan) anyObject()); + return htable; + } + + /** + * Create a table that throws a DoNoRetryIOException on first scanner next + * call + * + * @throws IOException + */ + static Table createDNRIOEScannerTable(byte[] name, final int failCnt) + throws IOException { + // build up a mock scanner stuff to fail the first time + Answer a = new Answer() { + int cnt = 0; + + @Override + public ResultScanner answer(InvocationOnMock invocation) throws Throwable { + // first invocation return the busted mock scanner + if (cnt++ < failCnt) { + // create mock ResultScanner that always fails. + Scan scan = mock(Scan.class); + doReturn("bogus".getBytes()).when(scan).getStartRow(); // avoid npe + ResultScanner scanner = mock(ResultScanner.class); + + invocation.callRealMethod(); // simulate UnknownScannerException + doThrow( + new UnknownScannerException("Injected simulated TimeoutException")) + .when(scanner).next(); + return scanner; + } + + // otherwise return the real scanner. + return (ResultScanner) invocation.callRealMethod(); + } + }; + + Table htable = spy(createTable(name)); + doAnswer(a).when(htable).getScanner((Scan) anyObject()); + return htable; + } + + /** + * Run test assuming no errors using newer mapreduce api + * + * @throws IOException + * @throws InterruptedException + */ + @Test + public void testTableRecordReaderMapreduce() throws IOException, + InterruptedException { + Table table = createTable("table1-mr".getBytes()); + runTestMapreduce(table); + } + + /** + * Run test assuming Scanner IOException failure using newer mapreduce api + * + * @throws IOException + * @throws InterruptedException + */ + @Test + public void testTableRecordReaderScannerFailMapreduce() throws IOException, + InterruptedException { + Table htable = createIOEScannerTable("table2-mr".getBytes(), 1); + runTestMapreduce(htable); + } + + /** + * Run test assuming Scanner IOException failure using newer mapreduce api + * + * @throws IOException + * @throws InterruptedException + */ + @Test(expected = IOException.class) + public void testTableRecordReaderScannerFailMapreduceTwice() throws IOException, + InterruptedException { + Table htable = createIOEScannerTable("table3-mr".getBytes(), 2); + runTestMapreduce(htable); + } + + /** + * Run test assuming UnknownScannerException (which is a type of + * DoNotRetryIOException) using newer mapreduce api + * + * @throws InterruptedException + * @throws org.apache.hadoop.hbase.DoNotRetryIOException + */ + @Test + public void testTableRecordReaderScannerTimeoutMapreduce() + throws IOException, InterruptedException { + Table htable = createDNRIOEScannerTable("table4-mr".getBytes(), 1); + runTestMapreduce(htable); + } + + /** + * Run test assuming UnknownScannerException (which is a type of + * DoNotRetryIOException) using newer mapreduce api + * + * @throws InterruptedException + * @throws org.apache.hadoop.hbase.DoNotRetryIOException + */ + @Test(expected = org.apache.hadoop.hbase.DoNotRetryIOException.class) + public void testTableRecordReaderScannerTimeoutMapreduceTwice() + throws IOException, InterruptedException { + Table htable = createDNRIOEScannerTable("table5-mr".getBytes(), 2); + runTestMapreduce(htable); + } + + /** + * Verify the example we present in javadocs on TableInputFormatBase + */ + @Test + public void testExtensionOfTableInputFormatBase() + throws IOException, InterruptedException, ClassNotFoundException { + LOG.info("testing use of an InputFormat taht extends InputFormatBase"); + final Table htable = createTable(Bytes.toBytes("exampleTable"), + new byte[][] { Bytes.toBytes("columnA"), Bytes.toBytes("columnB") }); + testInputFormat(ExampleTIF.class); + } + + @Test + public void testJobConfigurableExtensionOfTableInputFormatBase() + throws IOException, InterruptedException, ClassNotFoundException { + LOG.info("testing use of an InputFormat taht extends InputFormatBase, " + + "using JobConfigurable."); + final Table htable = createTable(Bytes.toBytes("exampleJobConfigurableTable"), + new byte[][] { Bytes.toBytes("columnA"), Bytes.toBytes("columnB") }); + testInputFormat(ExampleJobConfigurableTIF.class); + } + + @Test + public void testDeprecatedExtensionOfTableInputFormatBase() + throws IOException, InterruptedException, ClassNotFoundException { + LOG.info("testing use of an InputFormat taht extends InputFormatBase, " + + "using the approach documented in 0.98."); + final Table htable = createTable(Bytes.toBytes("exampleDeprecatedTable"), + new byte[][] { Bytes.toBytes("columnA"), Bytes.toBytes("columnB") }); + testInputFormat(ExampleDeprecatedTIF.class); + } + + void testInputFormat(Class clazz) + throws IOException, InterruptedException, ClassNotFoundException { + final Job job = MapreduceTestingShim.createJob(UTIL.getConfiguration()); + job.setInputFormatClass(clazz); + job.setOutputFormatClass(NullOutputFormat.class); + job.setMapperClass(ExampleVerifier.class); + job.setNumReduceTasks(0); + + LOG.debug("submitting job."); + assertTrue("job failed!", job.waitForCompletion(true)); + assertEquals("Saw the wrong number of instances of the filtered-for row.", 2, job.getCounters() + .findCounter(TestTableInputFormat.class.getName() + ":row", "aaa").getValue()); + assertEquals("Saw any instances of the filtered out row.", 0, job.getCounters() + .findCounter(TestTableInputFormat.class.getName() + ":row", "bbb").getValue()); + assertEquals("Saw the wrong number of instances of columnA.", 1, job.getCounters() + .findCounter(TestTableInputFormat.class.getName() + ":family", "columnA").getValue()); + assertEquals("Saw the wrong number of instances of columnB.", 1, job.getCounters() + .findCounter(TestTableInputFormat.class.getName() + ":family", "columnB").getValue()); + assertEquals("Saw the wrong count of values for the filtered-for row.", 2, job.getCounters() + .findCounter(TestTableInputFormat.class.getName() + ":value", "value aaa").getValue()); + assertEquals("Saw the wrong count of values for the filtered-out row.", 0, job.getCounters() + .findCounter(TestTableInputFormat.class.getName() + ":value", "value bbb").getValue()); + } + + public static class ExampleVerifier extends TableMapper { + + @Override + public void map(ImmutableBytesWritable key, Result value, Context context) + throws IOException { + for (Cell cell : value.listCells()) { + context.getCounter(TestTableInputFormat.class.getName() + ":row", + Bytes.toString(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())) + .increment(1l); + context.getCounter(TestTableInputFormat.class.getName() + ":family", + Bytes.toString(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength())) + .increment(1l); + context.getCounter(TestTableInputFormat.class.getName() + ":value", + Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())) + .increment(1l); + } + } + + } + + public static class ExampleDeprecatedTIF extends TableInputFormatBase implements JobConfigurable { + + @Override + public void configure(JobConf job) { + try { + Connection connection = ConnectionFactory.createConnection(job); + Table exampleTable = connection.getTable(TableName.valueOf(("exampleDeprecatedTable"))); + // mandatory + initializeTable(connection, exampleTable.getName()); + byte[][] inputColumns = new byte [][] { Bytes.toBytes("columnA"), + Bytes.toBytes("columnB") }; + // optional + Scan scan = new Scan(); + for (byte[] family : inputColumns) { + scan.addFamily(family); + } + Filter exampleFilter = new RowFilter(CompareOp.EQUAL, new RegexStringComparator("aa.*")); + scan.setFilter(exampleFilter); + setScan(scan); + } catch (IOException exception) { + throw new RuntimeException("Failed to configure for job.", exception); + } + } + + } + + + public static class ExampleJobConfigurableTIF extends TableInputFormatBase + implements JobConfigurable { + + @Override + public void configure(JobConf job) { + try { + Connection connection = ConnectionFactory.createConnection(HBaseConfiguration.create(job)); + TableName tableName = TableName.valueOf("exampleJobConfigurableTable"); + // mandatory + initializeTable(connection, tableName); + byte[][] inputColumns = new byte [][] { Bytes.toBytes("columnA"), + Bytes.toBytes("columnB") }; + //optional + Scan scan = new Scan(); + for (byte[] family : inputColumns) { + scan.addFamily(family); + } + Filter exampleFilter = new RowFilter(CompareOp.EQUAL, new RegexStringComparator("aa.*")); + scan.setFilter(exampleFilter); + setScan(scan); + } catch (IOException exception) { + throw new RuntimeException("Failed to initialize.", exception); + } + } + } + + + public static class ExampleTIF extends TableInputFormatBase { + + @Override + protected void initialize(JobContext job) throws IOException { + Connection connection = ConnectionFactory.createConnection(HBaseConfiguration.create( + job.getConfiguration())); + TableName tableName = TableName.valueOf("exampleTable"); + // mandatory + initializeTable(connection, tableName); + byte[][] inputColumns = new byte [][] { Bytes.toBytes("columnA"), + Bytes.toBytes("columnB") }; + //optional + Scan scan = new Scan(); + for (byte[] family : inputColumns) { + scan.addFamily(family); + } + Filter exampleFilter = new RowFilter(CompareOp.EQUAL, new RegexStringComparator("aa.*")); + scan.setFilter(exampleFilter); + setScan(scan); + } + + } +} + diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java index e21e951b6b6..cb92a7425af 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java @@ -66,6 +66,8 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsReques import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsResponse; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionResponse; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionResponse; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRequest; @@ -97,6 +99,7 @@ import org.apache.hadoop.hbase.regionserver.FlushRequester; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HeapMemoryManager; import org.apache.hadoop.hbase.regionserver.Leases; +import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.RegionServerAccounting; import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.hadoop.hbase.regionserver.ServerNonceManager; @@ -253,12 +256,12 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices { } @Override - public void addToOnlineRegions(HRegion r) { + public void addToOnlineRegions(Region r) { // TODO Auto-generated method stub } @Override - public boolean removeFromOnlineRegions(HRegion r, ServerName destination) { + public boolean removeFromOnlineRegions(Region r, ServerName destination) { // TODO Auto-generated method stub return false; } @@ -332,8 +335,7 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices { } @Override - public void postOpenDeployTasks(HRegion r) - throws KeeperException, IOException { + public void postOpenDeployTasks(Region r) throws KeeperException, IOException { // TODO Auto-generated method stub } @@ -459,6 +461,12 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices { return null; } + @Override + public WarmupRegionResponse warmupRegion(RpcController controller, + WarmupRegionRequest request) throws ServiceException { + //TODO Auto-generated method stub + return null; + } @Override public CloseRegionResponse closeRegion(RpcController controller, CloseRegionRequest request) throws ServiceException { @@ -523,7 +531,7 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices { } @Override - public List getOnlineRegions(TableName tableName) throws IOException { + public List getOnlineRegions(TableName tableName) throws IOException { // TODO Auto-generated method stub return null; } @@ -575,7 +583,7 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices { } @Override - public Map getRecoveringRegions() { + public Map getRecoveringRegions() { // TODO Auto-generated method stub return null; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentListener.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentListener.java index f482dfa837a..abeab3febae 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentListener.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentListener.java @@ -34,17 +34,15 @@ import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.JVMClusterUtil; - import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; - import org.junit.experimental.categories.Category; @Category({MasterTests.class, MediumTests.class}) @@ -233,8 +231,8 @@ public class TestAssignmentListener { admin.majorCompact(TABLE_NAME); mergeable = 0; for (JVMClusterUtil.RegionServerThread regionThread: miniCluster.getRegionServerThreads()) { - for (HRegion region: regionThread.getRegionServer().getOnlineRegions(TABLE_NAME)) { - mergeable += region.isMergeable() ? 1 : 0; + for (Region region: regionThread.getRegionServer().getOnlineRegions(TABLE_NAME)) { + mergeable += ((HRegion)region).isMergeable() ? 1 : 0; } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java index 8ed49ff5cf4..00cad06d586 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java @@ -63,6 +63,8 @@ import org.apache.hadoop.hbase.coordination.SplitLogManagerCoordination.SplitLog import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.io.Reference; import org.apache.hadoop.hbase.master.CatalogJanitor.SplitParentFirstComparator; +import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; +import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; @@ -225,9 +227,10 @@ public class TestCatalogJanitor { } @Override - public void createTable(HTableDescriptor desc, byte[][] splitKeys) + public long createTable(HTableDescriptor desc, byte[][] splitKeys) throws IOException { // no-op + return -1; } @Override @@ -260,6 +263,11 @@ public class TestCatalogJanitor { return null; } + @Override + public ProcedureExecutor getMasterProcedureExecutor() { + return null; + } + @Override public ServerManager getServerManager() { return null; @@ -420,7 +428,9 @@ public class TestCatalogJanitor { } @Override - public void deleteTable(TableName tableName) throws IOException { } + public long deleteTable(TableName tableName) throws IOException { + return -1; + } @Override public void truncateTable(TableName tableName, boolean preserveSplits) throws IOException { } @@ -431,10 +441,14 @@ public class TestCatalogJanitor { throws IOException { } @Override - public void enableTable(TableName tableName) throws IOException { } + public long enableTable(TableName tableName) throws IOException { + return -1; + } @Override - public void disableTable(TableName tableName) throws IOException { } + public long disableTable(TableName tableName) throws IOException { + return -1; + } @Override public void addColumn(TableName tableName, HColumnDescriptor column) @@ -912,7 +926,7 @@ public class TestCatalogJanitor { MasterServices services = new MockMasterServices(server); // create the janitor - + CatalogJanitor janitor = new CatalogJanitor(server, services); // Create regions. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java index dd733ad3257..a19d5d86edd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestClockSkewDetection.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.ClusterConnection; +import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; @@ -103,7 +104,11 @@ public class TestClockSkewDetection { LOG.debug("regionServerStartup 1"); InetAddress ia1 = InetAddress.getLocalHost(); - sm.regionServerStartup(ia1, 1234, -1, System.currentTimeMillis()); + RegionServerStartupRequest.Builder request = RegionServerStartupRequest.newBuilder(); + request.setPort(1234); + request.setServerStartCode(-1); + request.setServerCurrentTime(System.currentTimeMillis()); + sm.regionServerStartup(request.build(), ia1); final Configuration c = HBaseConfiguration.create(); long maxSkew = c.getLong("hbase.master.maxclockskew", 30000); @@ -114,7 +119,11 @@ public class TestClockSkewDetection { LOG.debug("Test: Master Time > Region Server Time"); LOG.debug("regionServerStartup 2"); InetAddress ia2 = InetAddress.getLocalHost(); - sm.regionServerStartup(ia2, 1235, -1, System.currentTimeMillis() - maxSkew * 2); + request = RegionServerStartupRequest.newBuilder(); + request.setPort(1235); + request.setServerStartCode(-1); + request.setServerCurrentTime(System.currentTimeMillis() - maxSkew * 2); + sm.regionServerStartup(request.build(), ia2); fail("HMaster should have thrown a ClockOutOfSyncException but didn't."); } catch(ClockOutOfSyncException e) { //we want an exception @@ -126,7 +135,11 @@ public class TestClockSkewDetection { LOG.debug("Test: Master Time < Region Server Time"); LOG.debug("regionServerStartup 3"); InetAddress ia3 = InetAddress.getLocalHost(); - sm.regionServerStartup(ia3, 1236, -1, System.currentTimeMillis() + maxSkew * 2); + request = RegionServerStartupRequest.newBuilder(); + request.setPort(1236); + request.setServerStartCode(-1); + request.setServerCurrentTime(System.currentTimeMillis() + maxSkew * 2); + sm.regionServerStartup(request.build(), ia3); fail("HMaster should have thrown a ClockOutOfSyncException but didn't."); } catch (ClockOutOfSyncException e) { // we want an exception @@ -136,12 +149,20 @@ public class TestClockSkewDetection { // make sure values above warning threshold but below max threshold don't kill LOG.debug("regionServerStartup 4"); InetAddress ia4 = InetAddress.getLocalHost(); - sm.regionServerStartup(ia4, 1237, -1, System.currentTimeMillis() - warningSkew * 2); + request = RegionServerStartupRequest.newBuilder(); + request.setPort(1237); + request.setServerStartCode(-1); + request.setServerCurrentTime(System.currentTimeMillis() - warningSkew * 2); + sm.regionServerStartup(request.build(), ia4); // make sure values above warning threshold but below max threshold don't kill LOG.debug("regionServerStartup 5"); InetAddress ia5 = InetAddress.getLocalHost(); - sm.regionServerStartup(ia5, 1238, -1, System.currentTimeMillis() + warningSkew * 2); + request = RegionServerStartupRequest.newBuilder(); + request.setPort(1238); + request.setServerStartCode(-1); + request.setServerCurrentTime(System.currentTimeMillis() + warningSkew * 2); + sm.regionServerStartup(request.build(), ia5); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java index 0038d71befa..8a16c0df8d4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java @@ -90,6 +90,7 @@ import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.wal.HLogKey; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; import org.apache.hadoop.hbase.testclassification.LargeTests; @@ -1479,7 +1480,7 @@ public class TestDistributedLogSplitting { } LOG.debug("adding data to rs = " + rst.getName() + " region = "+ hri.getRegionNameAsString()); - HRegion region = hrs.getOnlineRegion(hri.getRegionName()); + Region region = hrs.getOnlineRegion(hri.getRegionName()); assertTrue(region != null); putData(region, hri.getStartKey(), nrows, Bytes.toBytes("q"), family); } @@ -1500,7 +1501,7 @@ public class TestDistributedLogSplitting { } LOG.debug("adding data to rs = " + mt.getName() + " region = "+ hri.getRegionNameAsString()); - HRegion region = hrs.getOnlineRegion(hri.getRegionName()); + Region region = hrs.getOnlineRegion(hri.getRegionName()); assertTrue(region != null); putData(region, hri.getStartKey(), nrows, Bytes.toBytes("q"), family); } @@ -1614,7 +1615,7 @@ public class TestDistributedLogSplitting { TEST_UTIL.waitUntilNoRegionsInTransition(60000); } - private void putData(HRegion region, byte[] startRow, int numRows, byte [] qf, + private void putData(Region region, byte[] startRow, int numRows, byte [] qf, byte [] ...families) throws IOException { for(int i = 0; i < numRows; i++) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestGetLastFlushedSequenceId.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestGetLastFlushedSequenceId.java index 0f7c281b298..5e6bff809ca 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestGetLastFlushedSequenceId.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestGetLastFlushedSequenceId.java @@ -31,8 +31,9 @@ import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds; import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.JVMClusterUtil; @@ -76,24 +77,30 @@ public class TestGetLastFlushedSequenceId { table.flushCommits(); MiniHBaseCluster cluster = testUtil.getMiniHBaseCluster(); List rsts = cluster.getRegionServerThreads(); - HRegion region = null; + Region region = null; for (int i = 0; i < cluster.getRegionServerThreads().size(); i++) { HRegionServer hrs = rsts.get(i).getRegionServer(); - for (HRegion r : hrs.getOnlineRegions(tableName)) { + for (Region r : hrs.getOnlineRegions(tableName)) { region = r; break; } } assertNotNull(region); Thread.sleep(2000); - assertEquals( - HConstants.NO_SEQNUM, - testUtil.getHBaseCluster().getMaster() - .getLastSequenceId(region.getRegionInfo().getEncodedNameAsBytes())); + RegionStoreSequenceIds ids = + testUtil.getHBaseCluster().getMaster() + .getLastSequenceId(region.getRegionInfo().getEncodedNameAsBytes()); + assertEquals(HConstants.NO_SEQNUM, ids.getLastFlushedSequenceId()); + long storeSequenceId = ids.getStoreSequenceId(0).getSequenceId(); + assertTrue(storeSequenceId > 0); testUtil.getHBaseAdmin().flush(tableName); Thread.sleep(2000); - assertTrue(testUtil.getHBaseCluster().getMaster() - .getLastSequenceId(region.getRegionInfo().getEncodedNameAsBytes()) > 0); + ids = + testUtil.getHBaseCluster().getMaster() + .getLastSequenceId(region.getRegionInfo().getEncodedNameAsBytes()); + assertTrue(ids.getLastFlushedSequenceId() + " > " + storeSequenceId, + ids.getLastFlushedSequenceId() > storeSequenceId); + assertEquals(ids.getLastFlushedSequenceId(), ids.getStoreSequenceId(0).getSequenceId()); table.close(); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestHMasterRPCException.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestHMasterRPCException.java index 2419918a34b..37d69409ab2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestHMasterRPCException.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestHMasterRPCException.java @@ -19,15 +19,18 @@ package org.apache.hadoop.hbase.master; -import static org.junit.Assert.fail; +import static org.junit.Assert.assertTrue; import java.io.IOException; -import java.net.SocketTimeoutException; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.*; import org.apache.hadoop.hbase.CoordinatedStateManager; import org.apache.hadoop.hbase.CoordinatedStateManagerFactory; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ipc.RpcClient; import org.apache.hadoop.hbase.ipc.RpcClientFactory; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; @@ -36,60 +39,80 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRe import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.zookeeper.ZKUtil; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.zookeeper.KeeperException; +import org.junit.After; +import org.junit.Before; import org.junit.Test; import org.junit.experimental.categories.Category; import com.google.protobuf.BlockingRpcChannel; import com.google.protobuf.ServiceException; -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestHMasterRPCException { - @Test - public void testRPCException() throws Exception { - HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - TEST_UTIL.startMiniZKCluster(); - Configuration conf = TEST_UTIL.getConfiguration(); + private static final Log LOG = LogFactory.getLog(TestHMasterRPCException.class); + + private final HBaseTestingUtility testUtil = HBaseTestingUtility.createLocalHTU(); + + private HMaster master; + + private RpcClient rpcClient; + + @Before + public void setUp() throws Exception { + Configuration conf = testUtil.getConfiguration(); conf.set(HConstants.MASTER_PORT, "0"); + conf.setInt(HConstants.ZK_SESSION_TIMEOUT, 2000); + testUtil.startMiniZKCluster(); + CoordinatedStateManager cp = CoordinatedStateManagerFactory.getCoordinatedStateManager(conf); - HMaster hm = new HMaster(conf, cp); - ServerName sm = hm.getServerName(); - RpcClient rpcClient = RpcClientFactory.createClient(conf, HConstants.CLUSTER_ID_DEFAULT); - try { - int i = 0; - //retry the RPC a few times; we have seen SocketTimeoutExceptions if we - //try to connect too soon. Retry on SocketTimeoutException. - while (i < 20) { - try { - BlockingRpcChannel channel = - rpcClient.createBlockingRpcChannel(sm, User.getCurrent(), 0); - MasterProtos.MasterService.BlockingInterface stub = - MasterProtos.MasterService.newBlockingStub(channel); - stub.isMasterRunning(null, IsMasterRunningRequest.getDefaultInstance()); - fail(); - } catch (ServiceException ex) { - IOException ie = ProtobufUtil.getRemoteException(ex); - if (!(ie instanceof SocketTimeoutException)) { - if (ie.getMessage().startsWith("org.apache.hadoop.hbase.ipc." + - "ServerNotRunningYetException: Server is not running yet")) { - // Done. Got the exception we wanted. - System.out.println("Expected exception: " + ie.getMessage()); - return; - } else { - throw ex; - } - } else { - System.err.println("Got SocketTimeoutException. Will retry. "); - } - } catch (Throwable t) { - fail("Unexpected throwable: " + t); - } - Thread.sleep(100); - i++; - } - fail(); - } finally { + ZooKeeperWatcher watcher = testUtil.getZooKeeperWatcher(); + ZKUtil.createWithParents(watcher, watcher.getMasterAddressZNode(), Bytes.toBytes("fake:123")); + master = new HMaster(conf, cp); + rpcClient = RpcClientFactory.createClient(conf, HConstants.CLUSTER_ID_DEFAULT); + } + + @After + public void tearDown() throws IOException { + if (rpcClient != null) { rpcClient.close(); } + if (master != null) { + master.stopMaster(); + } + testUtil.shutdownMiniZKCluster(); } -} \ No newline at end of file + + @Test + public void testRPCException() throws IOException, InterruptedException, KeeperException { + ServerName sm = master.getServerName(); + boolean fakeZNodeDelete = false; + for (int i = 0; i < 20; i++) { + try { + BlockingRpcChannel channel = rpcClient.createBlockingRpcChannel(sm, User.getCurrent(), 0); + MasterProtos.MasterService.BlockingInterface stub = + MasterProtos.MasterService.newBlockingStub(channel); + assertTrue(stub.isMasterRunning(null, IsMasterRunningRequest.getDefaultInstance()) + .getIsMasterRunning()); + return; + } catch (ServiceException ex) { + IOException ie = ProtobufUtil.getRemoteException(ex); + // No SocketTimeoutException here. RpcServer is already started after the construction of + // HMaster. + assertTrue(ie.getMessage().startsWith( + "org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet")); + LOG.info("Expected exception: ", ie); + if (!fakeZNodeDelete) { + testUtil.getZooKeeperWatcher().getRecoverableZooKeeper() + .delete(testUtil.getZooKeeperWatcher().getMasterAddressZNode(), -1); + fakeZNodeDelete = true; + } + } + Thread.sleep(1000); + } + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java index f0c6bc2c6dc..3daa711c34f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java @@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.master.RegionState.State; import org.apache.hadoop.hbase.protobuf.RequestConverter; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.testclassification.FlakeyTests; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; @@ -383,9 +384,9 @@ public class TestMasterFailover { // region server should expire (how it can be verified?) MetaTableLocator.setMetaLocation(activeMaster.getZooKeeper(), rs.getServerName(), State.PENDING_OPEN); - HRegion meta = rs.getFromOnlineRegions(HRegionInfo.FIRST_META_REGIONINFO.getEncodedName()); + Region meta = rs.getFromOnlineRegions(HRegionInfo.FIRST_META_REGIONINFO.getEncodedName()); rs.removeFromOnlineRegions(meta, null); - meta.close(); + ((HRegion)meta).close(); log("Aborting master"); activeMaster.abort("test-kill"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement.java index 25dd13e67c8..a00741f1eab 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement.java @@ -42,24 +42,21 @@ import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; -import org.apache.hadoop.hbase.client.ConnectionFactory; -import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HTable; -import org.apache.hadoop.hbase.client.MetaScanner; -import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.master.balancer.FavoredNodeAssignmentHelper; import org.apache.hadoop.hbase.master.balancer.FavoredNodeLoadBalancer; import org.apache.hadoop.hbase.master.balancer.FavoredNodesPlan; import org.apache.hadoop.hbase.master.balancer.FavoredNodesPlan.Position; -import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; @@ -192,10 +189,10 @@ public class TestRegionPlacement { // kill a random non-meta server carrying at least one region killIndex = random.nextInt(SLAVES); serverToKill = TEST_UTIL.getHBaseCluster().getRegionServer(killIndex).getServerName(); - Collection regs = + Collection regs = TEST_UTIL.getHBaseCluster().getRegionServer(killIndex).getOnlineRegionsLocalContext(); isNamespaceServer = false; - for (HRegion r : regs) { + for (Region r : regs) { if (r.getRegionInfo().getTable().getNamespaceAsString() .equals(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR)) { isNamespaceServer = true; @@ -419,8 +416,7 @@ public class TestRegionPlacement { MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); for (int i = 0; i < SLAVES; i++) { HRegionServer rs = cluster.getRegionServer(i); - for (HRegion region: rs.getOnlineRegions( - TableName.valueOf("testRegionAssignment"))) { + for (Region region: rs.getOnlineRegions(TableName.valueOf("testRegionAssignment"))) { InetSocketAddress[] favoredSocketAddress = rs.getFavoredNodesForRegion( region.getRegionInfo().getEncodedName()); List favoredServerList = plan.getAssignmentMap().get(region.getRegionInfo()); @@ -449,7 +445,7 @@ public class TestRegionPlacement { assertNotNull(addrFromPlan); assertTrue("Region server " + rs.getServerName().getHostAndPort() + " has the " + positions[j] + - " for region " + region.getRegionNameAsString() + " is " + + " for region " + region.getRegionInfo().getRegionNameAsString() + " is " + addrFromRS + " which is inconsistent with the plan " + addrFromPlan, addrFromRS.equals(addrFromPlan)); } @@ -470,11 +466,11 @@ public class TestRegionPlacement { final AtomicInteger regionOnPrimaryNum = new AtomicInteger(0); final AtomicInteger totalRegionNum = new AtomicInteger(0); LOG.info("The start of region placement verification"); - MetaScannerVisitor visitor = new MetaScannerVisitor() { - public boolean processRow(Result result) throws IOException { + MetaTableAccessor.Visitor visitor = new MetaTableAccessor.Visitor() { + public boolean visit(Result result) throws IOException { try { @SuppressWarnings("deprecation") - HRegionInfo info = MetaScanner.getHRegionInfo(result); + HRegionInfo info = MetaTableAccessor.getHRegionInfo(result); if(info.getTable().getNamespaceAsString() .equals(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR)) { return true; @@ -522,11 +518,8 @@ public class TestRegionPlacement { throw e; } } - - @Override - public void close() throws IOException {} }; - MetaScanner.metaScan(CONNECTION, visitor); + MetaTableAccessor.fullScanRegions(CONNECTION, visitor); LOG.info("There are " + regionOnPrimaryNum.intValue() + " out of " + totalRegionNum.intValue() + " regions running on the primary" + " region servers" ); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java index b51f7c710f4..692b5a0e1a8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java @@ -27,15 +27,14 @@ import java.util.Map; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableExistsException; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.MetaScanner; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.util.Bytes; @@ -75,8 +74,7 @@ public class TestRestartCluster { UTIL.waitTableEnabled(TABLE); } - List allRegions = - MetaScanner.listAllRegions(UTIL.getConfiguration(), UTIL.getConnection(), true); + List allRegions = MetaTableAccessor.getAllRegions(UTIL.getConnection(), false); assertEquals(4, allRegions.size()); LOG.info("\n\nShutting down cluster"); @@ -91,8 +89,7 @@ public class TestRestartCluster { // Need to use a new 'Configuration' so we make a new HConnection. // Otherwise we're reusing an HConnection that has gone stale because // the shutdown of the cluster also called shut of the connection. - allRegions = MetaScanner - .listAllRegions(new Configuration(UTIL.getConfiguration()), UTIL.getConnection(), true); + allRegions = MetaTableAccessor.getAllRegions(UTIL.getConnection(), false); assertEquals(4, allRegions.size()); LOG.info("\n\nWaiting for tables to be available"); for(TableName TABLE: TABLES) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java index 55b1e1f1d48..44b98033070 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java @@ -103,37 +103,6 @@ public class TestTableLockManager { TEST_UTIL.shutdownMiniCluster(); } - @Test(timeout = 600000) - public void testLockTimeoutException() throws Exception { - Configuration conf = TEST_UTIL.getConfiguration(); - conf.setInt(TableLockManager.TABLE_WRITE_LOCK_TIMEOUT_MS, 3000); - prepareMiniCluster(); - HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); - master.getMasterCoprocessorHost().load(TestLockTimeoutExceptionMasterObserver.class, - 0, TEST_UTIL.getConfiguration()); - - ExecutorService executor = Executors.newSingleThreadExecutor(); - Future shouldFinish = executor.submit(new Callable() { - @Override - public Object call() throws Exception { - Admin admin = TEST_UTIL.getHBaseAdmin(); - admin.deleteColumn(TABLE_NAME, FAMILY); - return null; - } - }); - - deleteColumn.await(); - - try { - Admin admin = TEST_UTIL.getHBaseAdmin(); - admin.addColumn(TABLE_NAME, new HColumnDescriptor(NEW_FAMILY)); - fail("Was expecting TableLockTimeoutException"); - } catch (LockTimeoutException ex) { - //expected - } - shouldFinish.get(); - } - public static class TestLockTimeoutExceptionMasterObserver extends BaseMasterObserver { @Override public void preDeleteColumnHandler(ObserverContext ctx, @@ -374,7 +343,7 @@ public class TestTableLockManager { try { HRegion region = TEST_UTIL.getSplittableRegion(tableName, -1); if (region != null) { - byte[] regionName = region.getRegionName(); + byte[] regionName = region.getRegionInfo().getRegionName(); admin.flushRegion(regionName); admin.compactRegion(regionName); admin.splitRegion(regionName); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableStateManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableStateManager.java new file mode 100644 index 00000000000..e0e969ed23d --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableStateManager.java @@ -0,0 +1,82 @@ +/* + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master; + +import java.io.IOException; + +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.TableState; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.zookeeper.ZKUtil; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.zookeeper.KeeperException; +import org.junit.After; +import org.junit.Assert; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +/** + * Tests the default table lock manager + */ +@Category({ MasterTests.class, LargeTests.class }) +public class TestTableStateManager { + + private final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + + @After + public void tearDown() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + @Test(timeout = 60000) + public void testUpgradeFromZk() throws Exception { + TableName tableName = + TableName.valueOf("testUpgradeFromZk"); + TEST_UTIL.startMiniCluster(2, 1); + TEST_UTIL.shutdownMiniHBaseCluster(); + ZooKeeperWatcher watcher = TEST_UTIL.getZooKeeperWatcher(); + setTableStateInZK(watcher, tableName, ZooKeeperProtos.DeprecatedTableState.State.DISABLED); + TEST_UTIL.restartHBaseCluster(1); + + HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); + Assert.assertEquals( + master.getTableStateManager().getTableState(tableName), + TableState.State.DISABLED); + } + + private void setTableStateInZK(ZooKeeperWatcher watcher, final TableName tableName, + final ZooKeeperProtos.DeprecatedTableState.State state) + throws KeeperException, IOException { + String znode = ZKUtil.joinZNode(watcher.tableZNode, tableName.getNameAsString()); + if (ZKUtil.checkExists(watcher, znode) == -1) { + ZKUtil.createAndFailSilent(watcher, znode); + } + ZooKeeperProtos.DeprecatedTableState.Builder builder = + ZooKeeperProtos.DeprecatedTableState.newBuilder(); + builder.setState(state); + byte[] data = ProtobufUtil.prependPBMagic(builder.build().toByteArray()); + ZKUtil.setData(watcher, znode, data); + } + +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestWarmupRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestWarmupRegion.java new file mode 100644 index 00000000000..bc45120bcfc --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestWarmupRegion.java @@ -0,0 +1,164 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master; + +import static org.apache.hadoop.hbase.regionserver.HRegion.warmupHRegion; +import java.io.IOException; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.MiniHBaseCluster; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.Waiter; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos; +import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.experimental.categories.Category; +import org.junit.BeforeClass; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.After; +import org.junit.Test; + +/** + * Run tests that use the HBase clients; {@link HTable}. + * Sets up the HBase mini cluster once at start and runs through all client tests. + * Each creates a table named for the method and does its stuff against that. + */ +@Category({MasterTests.class, LargeTests.class}) +@SuppressWarnings ("deprecation") +public class TestWarmupRegion { + final Log LOG = LogFactory.getLog(getClass()); + protected TableName TABLENAME = TableName.valueOf("testPurgeFutureDeletes"); + protected final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + private static byte [] ROW = Bytes.toBytes("testRow"); + private static byte [] FAMILY = Bytes.toBytes("testFamily"); + private static byte [] QUALIFIER = Bytes.toBytes("testQualifier"); + private static byte [] VALUE = Bytes.toBytes("testValue"); + private static byte[] COLUMN = Bytes.toBytes("column"); + private static int numRows = 10000; + protected static int SLAVES = 3; + private static MiniHBaseCluster myCluster; + private static Table table; + + /** + * @throws java.lang.Exception + */ + @BeforeClass + public static void setUpBeforeClass() throws Exception { + Configuration conf = TEST_UTIL.getConfiguration(); + TEST_UTIL.startMiniCluster(SLAVES); + } + + /** + * @throws java.lang.Exception + */ + @AfterClass + public static void tearDownAfterClass() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + /** + * @throws java.lang.Exception + */ + @Before + public void setUp() throws Exception { + table = TEST_UTIL.createTable(TABLENAME, FAMILY); + + // future timestamp + for (int i = 0; i < numRows; i++) { + long ts = System.currentTimeMillis() * 2; + Put put = new Put(ROW, ts); + put.add(FAMILY, COLUMN, VALUE); + table.put(put); + } + + // major compaction, purged future deletes + TEST_UTIL.getHBaseAdmin().flush(TABLENAME); + TEST_UTIL.getHBaseAdmin().majorCompact(TABLENAME); + + // waiting for the major compaction to complete + TEST_UTIL.waitFor(6000, new Waiter.Predicate() { + @Override + public boolean evaluate() throws IOException { + return TEST_UTIL.getHBaseAdmin().getCompactionState(TABLENAME) == + AdminProtos.GetRegionInfoResponse.CompactionState.NONE; + } + }); + + table.close(); + } + + + /** + * @throws java.lang.Exception + */ + @After + public void tearDown() throws Exception { + // Nothing to do. + } + + protected void runwarmup() throws InterruptedException{ + Thread thread = new Thread(new Runnable() { + @Override + public void run() { + HRegionServer rs = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0); + HRegion region = TEST_UTIL.getMiniHBaseCluster().getRegions(TABLENAME).get(0); + HRegionInfo info = region.getRegionInfo(); + + try { + HTableDescriptor htd = table.getTableDescriptor(); + for (int i = 0; i < 10; i++) { + warmupHRegion(info, htd, rs.getWAL(info), rs.getConfiguration(), rs, null); + } + + } catch (IOException ie) { + LOG.error("Failed warming up region " + info.getRegionNameAsString(), ie); + } + } + }); + thread.start(); + thread.join(); + } + + /** + * Basic client side validation of HBASE-4536 + */ + @Test + public void testWarmup() throws Exception { + int serverid = 0; + HRegion region = TEST_UTIL.getMiniHBaseCluster().getRegions(TABLENAME).get(0); + HRegionInfo info = region.getRegionInfo(); + runwarmup(); + for (int i = 0; i < 10; i++) { + HRegionServer rs = TEST_UTIL.getMiniHBaseCluster().getRegionServer(serverid); + byte [] destName = Bytes.toBytes(rs.getServerName().toString()); + TEST_UTIL.getMiniHBaseCluster().getMaster().move(info.getEncodedNameAsBytes(), destName); + serverid = (serverid + 1) % 2; + } + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestEnableTableHandler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestEnableTableHandler.java index d3d62394485..f5c8b90d557 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestEnableTableHandler.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestEnableTableHandler.java @@ -18,20 +18,36 @@ */ package org.apache.hadoop.hbase.master.handler; +import java.util.ArrayList; import java.util.List; +import java.io.IOException; +import java.util.concurrent.CountDownLatch; +import com.google.common.base.Predicate; +import com.google.common.collect.Iterables; +import com.google.common.collect.Iterators; +import com.google.common.collect.Lists; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.ResultScanner; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.coprocessor.BaseMasterObserver; +import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; +import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment; +import org.apache.hadoop.hbase.coprocessor.ObserverContext; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; @@ -43,13 +59,7 @@ import org.junit.experimental.categories.Category; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; - -import java.io.IOException; -import org.apache.hadoop.hbase.client.Delete; -import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.client.ResultScanner; -import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.client.Table; +import static org.junit.Assert.fail; @Category({ MasterTests.class, MediumTests.class }) public class TestEnableTableHandler { @@ -59,6 +69,8 @@ public class TestEnableTableHandler { @Before public void setUp() throws Exception { + TEST_UTIL.getConfiguration().set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, + MasterSyncObserver.class.getName()); TEST_UTIL.startMiniCluster(1); } @@ -81,7 +93,6 @@ public class TestEnableTableHandler { admin.enableTable(tableName); TEST_UTIL.waitTableEnabled(tableName); - // disable once more admin.disableTable(tableName); @@ -91,21 +102,36 @@ public class TestEnableTableHandler { rs.getRegionServer().stop("stop"); cluster.waitForRegionServerToStop(rs.getRegionServer().getServerName(), 10000); - EnableTableHandler handler = - new EnableTableHandler(m, tableName, m.getAssignmentManager(), m.getTableLockManager(), - true); - handler.prepare(); - handler.process(); + LOG.debug("Now enabling table " + tableName); + admin.enableTable(tableName); assertTrue(admin.isTableEnabled(tableName)); JVMClusterUtil.RegionServerThread rs2 = cluster.startRegionServer(); - m.getAssignmentManager().assign(admin.getTableRegions(tableName)); + cluster.waitForRegionServerToStart(rs2.getRegionServer().getServerName().getHostname(), + rs2.getRegionServer().getServerName().getPort(), 60000); + + List regions = TEST_UTIL.getHBaseAdmin().getTableRegions(tableName); + assertEquals(1, regions.size()); + for (HRegionInfo region : regions) { + TEST_UTIL.getHBaseAdmin().assign(region.getEncodedNameAsBytes()); + } + LOG.debug("Waiting for table assigned " + tableName); TEST_UTIL.waitUntilAllRegionsAssigned(tableName); List onlineRegions = admin.getOnlineRegions( rs2.getRegionServer().getServerName()); - assertEquals(1, onlineRegions.size()); - assertEquals(tableName, onlineRegions.get(0).getTable()); + ArrayList tableRegions = filterTableRegions(tableName, onlineRegions); + assertEquals(1, tableRegions.size()); + } + + private ArrayList filterTableRegions(final TableName tableName, + List onlineRegions) { + return Lists.newArrayList(Iterables.filter(onlineRegions, new Predicate() { + @Override + public boolean apply(HRegionInfo input) { + return input.getTable().equals(tableName); + } + })); } /** @@ -119,36 +145,118 @@ public class TestEnableTableHandler { public void testDeleteForSureClearsAllTableRowsFromMeta() throws IOException, InterruptedException { final TableName tableName = TableName.valueOf("testDeleteForSureClearsAllTableRowsFromMeta"); - final MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); - final HMaster m = cluster.getMaster(); final HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); final HTableDescriptor desc = new HTableDescriptor(tableName); desc.addFamily(new HColumnDescriptor(FAMILYNAME)); - admin.createTable(desc, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE); + try { + createTable(TEST_UTIL, desc, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE); + } catch (Exception e) { + e.printStackTrace(); + fail("Got an exception while creating " + tableName); + } // Now I have a nice table, mangle it by removing the HConstants.REGIONINFO_QUALIFIER_STR // content from a few of the rows. - Scan metaScannerForMyTable = MetaTableAccessor.getScanForTableName(tableName); try (Table metaTable = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME)) { - try (ResultScanner scanner = metaTable.getScanner(metaScannerForMyTable)) { + try (ResultScanner scanner = + metaTable.getScanner(MetaTableAccessor.getScanForTableName(TEST_UTIL.getConnection(), tableName))) { for (Result result : scanner) { // Just delete one row. Delete d = new Delete(result.getRow()); d.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); + LOG.info("Mangled: " + d); metaTable.delete(d); break; } } admin.disableTable(tableName); TEST_UTIL.waitTableDisabled(tableName.getName()); - // Presume this synchronous all is. - admin.deleteTable(tableName); + // Rely on the coprocessor based latch to make the operation synchronous. + try { + deleteTable(TEST_UTIL, tableName); + } catch (Exception e) { + e.printStackTrace(); + fail("Got an exception while deleting " + tableName); + } int rowCount = 0; - try (ResultScanner scanner = metaTable.getScanner(metaScannerForMyTable)) { + try (ResultScanner scanner = + metaTable.getScanner(MetaTableAccessor.getScanForTableName(TEST_UTIL.getConnection(), tableName))) { for (Result result : scanner) { + LOG.info("Found when none expected: " + result); rowCount++; } } assertEquals(0, rowCount); } } + + public static class MasterSyncObserver extends BaseMasterObserver { + volatile CountDownLatch tableCreationLatch = null; + volatile CountDownLatch tableDeletionLatch = null; + + @Override + public void postCreateTableHandler(final ObserverContext ctx, + HTableDescriptor desc, HRegionInfo[] regions) throws IOException { + // the AccessController test, some times calls only and directly the postCreateTableHandler() + if (tableCreationLatch != null) { + tableCreationLatch.countDown(); + } + } + + @Override + public void postDeleteTableHandler(final ObserverContext ctx, + TableName tableName) + throws IOException { + // the AccessController test, some times calls only and directly the postDeleteTableHandler() + if (tableDeletionLatch != null) { + tableDeletionLatch.countDown(); + } + } + } + + public static void createTable(HBaseTestingUtility testUtil, HTableDescriptor htd, + byte [][] splitKeys) + throws Exception { + createTable(testUtil, testUtil.getHBaseAdmin(), htd, splitKeys); + } + + public static void createTable(HBaseTestingUtility testUtil, HBaseAdmin admin, + HTableDescriptor htd, byte [][] splitKeys) + throws Exception { + // NOTE: We need a latch because admin is not sync, + // so the postOp coprocessor method may be called after the admin operation returned. + MasterSyncObserver observer = (MasterSyncObserver)testUtil.getHBaseCluster().getMaster() + .getMasterCoprocessorHost().findCoprocessor(MasterSyncObserver.class.getName()); + observer.tableCreationLatch = new CountDownLatch(1); + if (splitKeys != null) { + admin.createTable(htd, splitKeys); + } else { + admin.createTable(htd); + } + observer.tableCreationLatch.await(); + observer.tableCreationLatch = null; + testUtil.waitUntilAllRegionsAssigned(htd.getTableName()); + } + + public static void deleteTable(HBaseTestingUtility testUtil, TableName tableName) + throws Exception { + deleteTable(testUtil, testUtil.getHBaseAdmin(), tableName); + } + + public static void deleteTable(HBaseTestingUtility testUtil, HBaseAdmin admin, + TableName tableName) + throws Exception { + // NOTE: We need a latch because admin is not sync, + // so the postOp coprocessor method may be called after the admin operation returned. + MasterSyncObserver observer = (MasterSyncObserver)testUtil.getHBaseCluster().getMaster() + .getMasterCoprocessorHost().findCoprocessor(MasterSyncObserver.class.getName()); + observer.tableDeletionLatch = new CountDownLatch(1); + try { + admin.disableTable(tableName); + } catch (Exception e) { + LOG.debug("Table: " + tableName + " already disabled, so just deleting it."); + } + admin.deleteTable(tableName); + observer.tableDeletionLatch.await(); + observer.tableDeletionLatch = null; + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestTableDeleteFamilyHandler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestTableDeleteFamilyHandler.java index 5b2f4f6fcf7..b5c82e1a006 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestTableDeleteFamilyHandler.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestTableDeleteFamilyHandler.java @@ -29,20 +29,22 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.InvalidFamilyOperationException; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.wal.WALSplitter; +import org.junit.After; import org.junit.AfterClass; +import org.junit.Assert; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; @@ -64,10 +66,17 @@ public class TestTableDeleteFamilyHandler { */ @BeforeClass public static void beforeAllTests() throws Exception { - TEST_UTIL.getConfiguration().setBoolean("dfs.support.append", true); TEST_UTIL.startMiniCluster(2); + } + @AfterClass + public static void afterAllTests() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + @Before + public void setup() throws IOException, InterruptedException { // Create a table of three families. This will assign a region. TEST_UTIL.createTable(TABLENAME, FAMILIES); Table t = TEST_UTIL.getConnection().getTable(TABLENAME); @@ -86,22 +95,17 @@ public class TestTableDeleteFamilyHandler { TEST_UTIL.flush(); t.close(); - } - @AfterClass - public static void afterAllTests() throws Exception { - TEST_UTIL.deleteTable(TABLENAME); - TEST_UTIL.shutdownMiniCluster(); - } - - @Before - public void setup() throws IOException, InterruptedException { TEST_UTIL.ensureSomeRegionServersAvailable(2); } + @After + public void cleanup() throws Exception { + TEST_UTIL.deleteTable(TABLENAME); + } + @Test public void deleteColumnFamilyWithMultipleRegions() throws Exception { - Admin admin = TEST_UTIL.getHBaseAdmin(); HTableDescriptor beforehtd = admin.getTableDescriptor(TABLENAME); @@ -114,7 +118,6 @@ public class TestTableDeleteFamilyHandler { assertEquals(3, beforehtd.getColumnFamilies().length); HColumnDescriptor[] families = beforehtd.getColumnFamilies(); for (int i = 0; i < families.length; i++) { - assertTrue(families[i].getNameAsString().equals("cf" + (i + 1))); } @@ -179,4 +182,95 @@ public class TestTableDeleteFamilyHandler { } } + @Test + public void deleteColumnFamilyTwice() throws Exception { + + Admin admin = TEST_UTIL.getHBaseAdmin(); + HTableDescriptor beforehtd = admin.getTableDescriptor(TABLENAME); + String cfToDelete = "cf1"; + + FileSystem fs = TEST_UTIL.getDFSCluster().getFileSystem(); + + // 1 - Check if table exists in descriptor + assertTrue(admin.isTableAvailable(TABLENAME)); + + // 2 - Check if all the target column family exist in descriptor + HColumnDescriptor[] families = beforehtd.getColumnFamilies(); + Boolean foundCF = false; + int i; + for (i = 0; i < families.length; i++) { + if (families[i].getNameAsString().equals(cfToDelete)) { + foundCF = true; + break; + } + } + assertTrue(foundCF); + + // 3 - Check if table exists in FS + Path tableDir = FSUtils.getTableDir(TEST_UTIL.getDefaultRootDirPath(), TABLENAME); + assertTrue(fs.exists(tableDir)); + + // 4 - Check if all the target column family exist in FS + FileStatus[] fileStatus = fs.listStatus(tableDir); + foundCF = false; + for (i = 0; i < fileStatus.length; i++) { + if (fileStatus[i].isDirectory() == true) { + FileStatus[] cf = fs.listStatus(fileStatus[i].getPath(), new PathFilter() { + @Override + public boolean accept(Path p) { + if (p.getName().contains(HConstants.RECOVERED_EDITS_DIR)) { + return false; + } + return true; + } + }); + for (int j = 0; j < cf.length; j++) { + if (cf[j].isDirectory() == true && cf[j].getPath().getName().equals(cfToDelete)) { + foundCF = true; + break; + } + } + } + if (foundCF) { + break; + } + } + assertTrue(foundCF); + + // TEST - Disable and delete the column family + if (admin.isTableEnabled(TABLENAME)) { + admin.disableTable(TABLENAME); + } + admin.deleteColumn(TABLENAME, Bytes.toBytes(cfToDelete)); + + // 5 - Check if the target column family is gone from the FS + fileStatus = fs.listStatus(tableDir); + for (i = 0; i < fileStatus.length; i++) { + if (fileStatus[i].isDirectory() == true) { + FileStatus[] cf = fs.listStatus(fileStatus[i].getPath(), new PathFilter() { + @Override + public boolean accept(Path p) { + if (WALSplitter.isSequenceIdFile(p)) { + return false; + } + return true; + } + }); + for (int j = 0; j < cf.length; j++) { + if (cf[j].isDirectory() == true) { + assertFalse(cf[j].getPath().getName().equals(cfToDelete)); + } + } + } + } + + try { + // Test: delete again + admin.deleteColumn(TABLENAME, Bytes.toBytes(cfToDelete)); + Assert.fail("Delete a non-exist column family should fail"); + } catch (InvalidFamilyOperationException e) { + // Expected. + } + } + } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestTableDescriptorModification.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestTableDescriptorModification.java index 0d51875eb2e..c4772ab8136 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestTableDescriptorModification.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestTableDescriptorModification.java @@ -29,6 +29,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.InvalidFamilyOperationException; import org.apache.hadoop.hbase.TableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; @@ -39,6 +40,7 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.FSUtils; import org.junit.AfterClass; +import org.junit.Assert; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Rule; @@ -52,7 +54,7 @@ import org.junit.rules.TestName; */ @Category({MasterTests.class, LargeTests.class}) public class TestTableDescriptorModification { - + @Rule public TestName name = new TestName(); private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); private static TableName TABLE_NAME = null; @@ -74,7 +76,7 @@ public class TestTableDescriptorModification { TABLE_NAME = TableName.valueOf(name.getMethodName()); } - + @AfterClass public static void afterAllTests() throws Exception { TEST_UTIL.shutdownMiniCluster(); @@ -123,6 +125,95 @@ public class TestTableDescriptorModification { } } + @Test + public void testAddSameColumnFamilyTwice() throws IOException { + Admin admin = TEST_UTIL.getHBaseAdmin(); + // Create a table with one families + HTableDescriptor baseHtd = new HTableDescriptor(TABLE_NAME); + baseHtd.addFamily(new HColumnDescriptor(FAMILY_0)); + admin.createTable(baseHtd); + admin.disableTable(TABLE_NAME); + try { + // Verify the table descriptor + verifyTableDescriptor(TABLE_NAME, FAMILY_0); + + // Modify the table removing one family and verify the descriptor + admin.addColumn(TABLE_NAME, new HColumnDescriptor(FAMILY_1)); + verifyTableDescriptor(TABLE_NAME, FAMILY_0, FAMILY_1); + + try { + // Add same column family again - expect failure + admin.addColumn(TABLE_NAME, new HColumnDescriptor(FAMILY_1)); + Assert.fail("Delete a non-exist column family should fail"); + } catch (InvalidFamilyOperationException e) { + // Expected. + } + + } finally { + admin.deleteTable(TABLE_NAME); + } + } + + @Test + public void testModifyColumnFamily() throws IOException { + Admin admin = TEST_UTIL.getHBaseAdmin(); + + HColumnDescriptor cfDescriptor = new HColumnDescriptor(FAMILY_0); + int blockSize = cfDescriptor.getBlocksize(); + // Create a table with one families + HTableDescriptor baseHtd = new HTableDescriptor(TABLE_NAME); + baseHtd.addFamily(cfDescriptor); + admin.createTable(baseHtd); + admin.disableTable(TABLE_NAME); + try { + // Verify the table descriptor + verifyTableDescriptor(TABLE_NAME, FAMILY_0); + + int newBlockSize = 2 * blockSize; + cfDescriptor.setBlocksize(newBlockSize); + + // Modify colymn family + admin.modifyColumn(TABLE_NAME, cfDescriptor); + + HTableDescriptor htd = admin.getTableDescriptor(TABLE_NAME); + HColumnDescriptor hcfd = htd.getFamily(FAMILY_0); + assertTrue(hcfd.getBlocksize() == newBlockSize); + } finally { + admin.deleteTable(TABLE_NAME); + } + } + + @Test + public void testModifyNonExistingColumnFamily() throws IOException { + Admin admin = TEST_UTIL.getHBaseAdmin(); + + HColumnDescriptor cfDescriptor = new HColumnDescriptor(FAMILY_1); + int blockSize = cfDescriptor.getBlocksize(); + // Create a table with one families + HTableDescriptor baseHtd = new HTableDescriptor(TABLE_NAME); + baseHtd.addFamily(new HColumnDescriptor(FAMILY_0)); + admin.createTable(baseHtd); + admin.disableTable(TABLE_NAME); + try { + // Verify the table descriptor + verifyTableDescriptor(TABLE_NAME, FAMILY_0); + + int newBlockSize = 2 * blockSize; + cfDescriptor.setBlocksize(newBlockSize); + + // Modify a column family that is not in the table. + try { + admin.modifyColumn(TABLE_NAME, cfDescriptor); + Assert.fail("Modify a non-exist column family should fail"); + } catch (InvalidFamilyOperationException e) { + // Expected. + } + + } finally { + admin.deleteTable(TABLE_NAME); + } + } + @Test public void testDeleteColumn() throws IOException { Admin admin = TEST_UTIL.getHBaseAdmin(); @@ -144,6 +235,35 @@ public class TestTableDescriptorModification { } } + @Test + public void testDeleteSameColumnFamilyTwice() throws IOException { + Admin admin = TEST_UTIL.getHBaseAdmin(); + // Create a table with two families + HTableDescriptor baseHtd = new HTableDescriptor(TABLE_NAME); + baseHtd.addFamily(new HColumnDescriptor(FAMILY_0)); + baseHtd.addFamily(new HColumnDescriptor(FAMILY_1)); + admin.createTable(baseHtd); + admin.disableTable(TABLE_NAME); + try { + // Verify the table descriptor + verifyTableDescriptor(TABLE_NAME, FAMILY_0, FAMILY_1); + + // Modify the table removing one family and verify the descriptor + admin.deleteColumn(TABLE_NAME, FAMILY_1); + verifyTableDescriptor(TABLE_NAME, FAMILY_0); + + try { + // Delete again - expect failure + admin.deleteColumn(TABLE_NAME, FAMILY_1); + Assert.fail("Delete a non-exist column family should fail"); + } catch (Exception e) { + // Expected. + } + } finally { + admin.deleteTable(TABLE_NAME); + } + } + private void verifyTableDescriptor(final TableName tableName, final byte[]... families) throws IOException { Admin admin = TEST_UTIL.getHBaseAdmin(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java new file mode 100644 index 00000000000..57a15e81a91 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java @@ -0,0 +1,408 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master.procedure; + +import java.io.IOException; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.RegionLocations; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.TableDescriptor; +import org.apache.hadoop.hbase.client.BufferedMutator; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.Durability; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.TableState; +import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.master.TableStateManager; +import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; +import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; +import org.apache.hadoop.hbase.util.ModifyRegionUtils; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.FSUtils; +import org.apache.hadoop.hbase.util.MD5Hash; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +public class MasterProcedureTestingUtility { + private static final Log LOG = LogFactory.getLog(MasterProcedureTestingUtility.class); + + private MasterProcedureTestingUtility() { + } + + public static HTableDescriptor createHTD(final TableName tableName, final String... family) { + HTableDescriptor htd = new HTableDescriptor(tableName); + for (int i = 0; i < family.length; ++i) { + htd.addFamily(new HColumnDescriptor(family[i])); + } + return htd; + } + + public static HRegionInfo[] createTable(final ProcedureExecutor procExec, + final TableName tableName, final byte[][] splitKeys, String... family) throws IOException { + HTableDescriptor htd = createHTD(tableName, family); + HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, splitKeys); + long procId = ProcedureTestingUtility.submitAndWait(procExec, + new CreateTableProcedure(procExec.getEnvironment(), htd, regions)); + ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId)); + return regions; + } + + public static void validateTableCreation(final HMaster master, final TableName tableName, + final HRegionInfo[] regions, String... family) throws IOException { + validateTableCreation(master, tableName, regions, true, family); + } + + public static void validateTableCreation(final HMaster master, final TableName tableName, + final HRegionInfo[] regions, boolean hasFamilyDirs, String... family) throws IOException { + // check filesystem + final FileSystem fs = master.getMasterFileSystem().getFileSystem(); + final Path tableDir = FSUtils.getTableDir(master.getMasterFileSystem().getRootDir(), tableName); + assertTrue(fs.exists(tableDir)); + FSUtils.logFileSystemState(fs, tableDir, LOG); + List allRegionDirs = FSUtils.getRegionDirs(fs, tableDir); + for (int i = 0; i < regions.length; ++i) { + Path regionDir = new Path(tableDir, regions[i].getEncodedName()); + assertTrue(regions[i] + " region dir does not exist", fs.exists(regionDir)); + assertTrue(allRegionDirs.remove(regionDir)); + List allFamilyDirs = FSUtils.getFamilyDirs(fs, regionDir); + for (int j = 0; j < family.length; ++j) { + final Path familyDir = new Path(regionDir, family[j]); + if (hasFamilyDirs) { + assertTrue(family[j] + " family dir does not exist", fs.exists(familyDir)); + assertTrue(allFamilyDirs.remove(familyDir)); + } else { + // TODO: WARN: Modify Table/Families does not create a family dir + if (!fs.exists(familyDir)) { + LOG.warn(family[j] + " family dir does not exist"); + } + allFamilyDirs.remove(familyDir); + } + } + assertTrue("found extraneous families: " + allFamilyDirs, allFamilyDirs.isEmpty()); + } + assertTrue("found extraneous regions: " + allRegionDirs, allRegionDirs.isEmpty()); + + // check meta + assertTrue(MetaTableAccessor.tableExists(master.getConnection(), tableName)); + assertEquals(regions.length, countMetaRegions(master, tableName)); + + // check htd + TableDescriptor tableDesc = master.getTableDescriptors().getDescriptor(tableName); + assertTrue("table descriptor not found", tableDesc != null); + HTableDescriptor htd = tableDesc.getHTableDescriptor(); + assertTrue("table descriptor not found", htd != null); + for (int i = 0; i < family.length; ++i) { + assertTrue("family not found " + family[i], htd.getFamily(Bytes.toBytes(family[i])) != null); + } + assertEquals(family.length, htd.getFamilies().size()); + } + + public static void validateTableDeletion(final HMaster master, final TableName tableName, + final HRegionInfo[] regions, String... family) throws IOException { + // check filesystem + final FileSystem fs = master.getMasterFileSystem().getFileSystem(); + final Path tableDir = FSUtils.getTableDir(master.getMasterFileSystem().getRootDir(), tableName); + assertFalse(fs.exists(tableDir)); + + // check meta + assertFalse(MetaTableAccessor.tableExists(master.getConnection(), tableName)); + assertEquals(0, countMetaRegions(master, tableName)); + + // check htd + assertTrue("found htd of deleted table", + master.getTableDescriptors().getDescriptor(tableName) == null); + } + + private static int countMetaRegions(final HMaster master, final TableName tableName) + throws IOException { + final AtomicInteger actualRegCount = new AtomicInteger(0); + final MetaTableAccessor.Visitor visitor = new MetaTableAccessor.Visitor() { + @Override + public boolean visit(Result rowResult) throws IOException { + RegionLocations list = MetaTableAccessor.getRegionLocations(rowResult); + if (list == null) { + LOG.warn("No serialized HRegionInfo in " + rowResult); + return true; + } + HRegionLocation l = list.getRegionLocation(); + if (l == null) { + return true; + } + if (!l.getRegionInfo().getTable().equals(tableName)) { + return false; + } + if (l.getRegionInfo().isOffline() || l.getRegionInfo().isSplit()) return true; + HRegionLocation[] locations = list.getRegionLocations(); + for (HRegionLocation location : locations) { + if (location == null) continue; + ServerName serverName = location.getServerName(); + // Make sure that regions are assigned to server + if (serverName != null && serverName.getHostAndPort() != null) { + actualRegCount.incrementAndGet(); + } + } + return true; + } + }; + MetaTableAccessor.scanMetaForTableRegions(master.getConnection(), visitor, tableName); + return actualRegCount.get(); + } + + public static void validateTableIsEnabled(final HMaster master, final TableName tableName) + throws IOException { + TableStateManager tsm = master.getAssignmentManager().getTableStateManager(); + assertTrue(tsm.getTableState(tableName).equals(TableState.State.ENABLED)); + } + + public static void validateTableIsDisabled(final HMaster master, final TableName tableName) + throws IOException { + TableStateManager tsm = master.getAssignmentManager().getTableStateManager(); + assertTrue(tsm.getTableState(tableName).equals(TableState.State.DISABLED)); + } + + public static void testRecoveryAndDoubleExecution( + final ProcedureExecutor procExec, final long procId, + final int numSteps, final TState[] states) throws Exception { + ProcedureTestingUtility.waitProcedure(procExec, procId); + assertEquals(false, procExec.isRunning()); + // Restart the executor and execute the step twice + // execute step N - kill before store update + // restart executor/store + // execute step N - save on store + for (int i = 0; i < numSteps; ++i) { + LOG.info("Restart "+ i +" exec state: " + states[i]); + ProcedureTestingUtility.assertProcNotYetCompleted(procExec, procId); + ProcedureTestingUtility.restart(procExec); + ProcedureTestingUtility.waitProcedure(procExec, procId); + } + assertEquals(true, procExec.isRunning()); + ProcedureTestingUtility.assertProcNotFailed(procExec, procId); + } + + public static void testRollbackAndDoubleExecution( + final ProcedureExecutor procExec, final long procId, + final int lastStep, final TState[] states) throws Exception { + ProcedureTestingUtility.waitProcedure(procExec, procId); + + // Restart the executor and execute the step twice + // execute step N - kill before store update + // restart executor/store + // execute step N - save on store + for (int i = 0; i < lastStep; ++i) { + LOG.info("Restart "+ i +" exec state: " + states[i]); + ProcedureTestingUtility.assertProcNotYetCompleted(procExec, procId); + ProcedureTestingUtility.restart(procExec); + ProcedureTestingUtility.waitProcedure(procExec, procId); + } + + // Restart the executor and rollback the step twice + // rollback step N - kill before store update + // restart executor/store + // rollback step N - save on store + MasterProcedureTestingUtility.InjectAbortOnLoadListener abortListener = + new MasterProcedureTestingUtility.InjectAbortOnLoadListener(procExec); + procExec.registerListener(abortListener); + try { + for (int i = lastStep + 1; i >= 0; --i) { + LOG.info("Restart " + i +" rollback state: "+ states[i]); + ProcedureTestingUtility.assertProcNotYetCompleted(procExec, procId); + ProcedureTestingUtility.restart(procExec); + ProcedureTestingUtility.waitProcedure(procExec, procId); + } + } finally { + assertTrue(procExec.unregisterListener(abortListener)); + } + + ProcedureTestingUtility.assertIsAbortException(procExec.getResult(procId)); + } + + public static void testRollbackAndDoubleExecutionAfterPONR( + final ProcedureExecutor procExec, final long procId, + final int lastStep, final TState[] states) throws Exception { + ProcedureTestingUtility.waitProcedure(procExec, procId); + + // Restart the executor and execute the step twice + // execute step N - kill before store update + // restart executor/store + // execute step N - save on store + for (int i = 0; i < lastStep; ++i) { + LOG.info("Restart "+ i +" exec state: " + states[i]); + ProcedureTestingUtility.assertProcNotYetCompleted(procExec, procId); + ProcedureTestingUtility.restart(procExec); + ProcedureTestingUtility.waitProcedure(procExec, procId); + } + + // try to inject the abort + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, false); + MasterProcedureTestingUtility.InjectAbortOnLoadListener abortListener = + new MasterProcedureTestingUtility.InjectAbortOnLoadListener(procExec); + procExec.registerListener(abortListener); + try { + ProcedureTestingUtility.assertProcNotYetCompleted(procExec, procId); + ProcedureTestingUtility.restart(procExec); + LOG.info("Restart and execute"); + ProcedureTestingUtility.waitProcedure(procExec, procId); + } finally { + assertTrue(procExec.unregisterListener(abortListener)); + } + + assertEquals(true, procExec.isRunning()); + ProcedureTestingUtility.assertProcNotFailed(procExec, procId); + } + + public static void testRollbackRetriableFailure( + final ProcedureExecutor procExec, final long procId, + final int lastStep, final TState[] states) throws Exception { + ProcedureTestingUtility.waitProcedure(procExec, procId); + + // Restart the executor and execute the step twice + // execute step N - kill before store update + // restart executor/store + // execute step N - save on store + for (int i = 0; i < lastStep; ++i) { + LOG.info("Restart "+ i +" exec state: " + states[i]); + ProcedureTestingUtility.assertProcNotYetCompleted(procExec, procId); + ProcedureTestingUtility.restart(procExec); + ProcedureTestingUtility.waitProcedure(procExec, procId); + } + + // execute the rollback + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, false); + MasterProcedureTestingUtility.InjectAbortOnLoadListener abortListener = + new MasterProcedureTestingUtility.InjectAbortOnLoadListener(procExec); + procExec.registerListener(abortListener); + try { + ProcedureTestingUtility.assertProcNotYetCompleted(procExec, procId); + ProcedureTestingUtility.restart(procExec); + LOG.info("Restart and rollback"); + ProcedureTestingUtility.waitProcedure(procExec, procId); + } finally { + assertTrue(procExec.unregisterListener(abortListener)); + } + + ProcedureTestingUtility.assertIsAbortException(procExec.getResult(procId)); + } + + public static void validateColumnFamilyAddition(final HMaster master, final TableName tableName, + final String family) throws IOException { + TableDescriptor htd = master.getTableDescriptors().getDescriptor(tableName); + assertTrue(htd != null); + + assertTrue(htd.getHTableDescriptor().hasFamily(family.getBytes())); + } + + public static void validateColumnFamilyDeletion(final HMaster master, final TableName tableName, + final String family) throws IOException { + // verify htd + TableDescriptor htd = master.getTableDescriptors().getDescriptor(tableName); + assertTrue(htd != null); + assertFalse(htd.getHTableDescriptor().hasFamily(family.getBytes())); + + // verify fs + final FileSystem fs = master.getMasterFileSystem().getFileSystem(); + final Path tableDir = FSUtils.getTableDir(master.getMasterFileSystem().getRootDir(), tableName); + for (Path regionDir: FSUtils.getRegionDirs(fs, tableDir)) { + final Path familyDir = new Path(regionDir, family); + assertFalse(family + " family dir should not exist", fs.exists(familyDir)); + } + } + + public static void validateColumnFamilyModification(final HMaster master, + final TableName tableName, final String family, HColumnDescriptor columnDescriptor) + throws IOException { + TableDescriptor htd = master.getTableDescriptors().getDescriptor(tableName); + assertTrue(htd != null); + + HColumnDescriptor hcfd = htd.getHTableDescriptor().getFamily(family.getBytes()); + assertTrue(hcfd.equals(columnDescriptor)); + } + + public static void loadData(final Connection connection, final TableName tableName, + int rows, final byte[][] splitKeys, final String... sfamilies) throws IOException { + byte[][] families = new byte[sfamilies.length][]; + for (int i = 0; i < families.length; ++i) { + families[i] = Bytes.toBytes(sfamilies[i]); + } + + BufferedMutator mutator = connection.getBufferedMutator(tableName); + + // Ensure one row per region + assertTrue(rows >= splitKeys.length); + for (byte[] k: splitKeys) { + byte[] value = Bytes.add(Bytes.toBytes(System.currentTimeMillis()), k); + byte[] key = Bytes.add(k, Bytes.toBytes(MD5Hash.getMD5AsHex(value))); + mutator.mutate(createPut(families, key, value)); + rows--; + } + + // Add other extra rows. more rows, more files + while (rows-- > 0) { + byte[] value = Bytes.add(Bytes.toBytes(System.currentTimeMillis()), Bytes.toBytes(rows)); + byte[] key = Bytes.toBytes(MD5Hash.getMD5AsHex(value)); + mutator.mutate(createPut(families, key, value)); + } + mutator.flush(); + } + + private static Put createPut(final byte[][] families, final byte[] key, final byte[] value) { + byte[] q = Bytes.toBytes("q"); + Put put = new Put(key); + put.setDurability(Durability.SKIP_WAL); + for (byte[] family: families) { + put.add(family, q, value); + } + return put; + } + + public static class InjectAbortOnLoadListener + implements ProcedureExecutor.ProcedureExecutorListener { + private final ProcedureExecutor procExec; + + public InjectAbortOnLoadListener(final ProcedureExecutor procExec) { + this.procExec = procExec; + } + + @Override + public void procedureLoaded(long procId) { + procExec.abort(procId); + } + + @Override + public void procedureAdded(long procId) { /* no-op */ } + + @Override + public void procedureFinished(long procId) { /* no-op */ } + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestAddColumnFamilyProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestAddColumnFamilyProcedure.java new file mode 100644 index 00000000000..1490aa1148d --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestAddColumnFamilyProcedure.java @@ -0,0 +1,246 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master.procedure; + +import static org.junit.Assert.assertTrue; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.InvalidFamilyOperationException; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; +import org.apache.hadoop.hbase.procedure2.ProcedureResult; +import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; +import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.AddColumnFamilyState; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({MasterTests.class, MediumTests.class}) +public class TestAddColumnFamilyProcedure { + private static final Log LOG = LogFactory.getLog(TestAddColumnFamilyProcedure.class); + + protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); + + private static void setupConf(Configuration conf) { + conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1); + } + + @BeforeClass + public static void setupCluster() throws Exception { + setupConf(UTIL.getConfiguration()); + UTIL.startMiniCluster(1); + } + + @AfterClass + public static void cleanupTest() throws Exception { + try { + UTIL.shutdownMiniCluster(); + } catch (Exception e) { + LOG.warn("failure shutting down cluster", e); + } + } + + @Before + public void setup() throws Exception { + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(getMasterProcedureExecutor(), false); + } + + @After + public void tearDown() throws Exception { + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(getMasterProcedureExecutor(), false); + for (HTableDescriptor htd: UTIL.getHBaseAdmin().listTables()) { + LOG.info("Tear down, remove table=" + htd.getTableName()); + UTIL.deleteTable(htd.getTableName()); + } + } + + @Test(timeout = 60000) + public void testAddColumnFamily() throws Exception { + final TableName tableName = TableName.valueOf("testAddColumnFamily"); + final String cf1 = "cf1"; + final String cf2 = "cf2"; + final HColumnDescriptor columnDescriptor1 = new HColumnDescriptor(cf1); + final HColumnDescriptor columnDescriptor2 = new HColumnDescriptor(cf2); + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f3"); + + // Test 1: Add a column family online + long procId1 = + procExec.submitProcedure(new AddColumnFamilyProcedure(procExec.getEnvironment(), tableName, + columnDescriptor1)); + // Wait the completion + ProcedureTestingUtility.waitProcedure(procExec, procId1); + ProcedureTestingUtility.assertProcNotFailed(procExec, procId1); + + MasterProcedureTestingUtility.validateColumnFamilyAddition(UTIL.getHBaseCluster().getMaster(), + tableName, cf1); + + // Test 2: Add a column family offline + UTIL.getHBaseAdmin().disableTable(tableName); + long procId2 = + procExec.submitProcedure(new AddColumnFamilyProcedure(procExec.getEnvironment(), tableName, + columnDescriptor2)); + // Wait the completion + ProcedureTestingUtility.waitProcedure(procExec, procId2); + ProcedureTestingUtility.assertProcNotFailed(procExec, procId2); + MasterProcedureTestingUtility.validateColumnFamilyAddition(UTIL.getHBaseCluster().getMaster(), + tableName, cf2); + } + + @Test(timeout=60000) + public void testAddSameColumnFamilyTwice() throws Exception { + final TableName tableName = TableName.valueOf("testAddColumnFamilyTwice"); + final String cf2 = "cf2"; + final HColumnDescriptor columnDescriptor = new HColumnDescriptor(cf2); + + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f1"); + + // add the column family + long procId1 = + procExec.submitProcedure(new AddColumnFamilyProcedure(procExec.getEnvironment(), tableName, + columnDescriptor)); + // Wait the completion + ProcedureTestingUtility.waitProcedure(procExec, procId1); + ProcedureTestingUtility.assertProcNotFailed(procExec, procId1); + MasterProcedureTestingUtility.validateColumnFamilyAddition(UTIL.getHBaseCluster().getMaster(), + tableName, cf2); + + // add the column family that exists + long procId2 = + procExec.submitProcedure(new AddColumnFamilyProcedure(procExec.getEnvironment(), tableName, + columnDescriptor)); + // Wait the completion + ProcedureTestingUtility.waitProcedure(procExec, procId2); + + // Second add should fail with InvalidFamilyOperationException + ProcedureResult result = procExec.getResult(procId2); + assertTrue(result.isFailed()); + LOG.debug("Add failed with exception: " + result.getException()); + assertTrue(result.getException().getCause() instanceof InvalidFamilyOperationException); + + // Do the same add the existing column family - this time offline + UTIL.getHBaseAdmin().disableTable(tableName); + long procId3 = + procExec.submitProcedure(new AddColumnFamilyProcedure(procExec.getEnvironment(), tableName, + columnDescriptor)); + // Wait the completion + ProcedureTestingUtility.waitProcedure(procExec, procId3); + + // Second add should fail with InvalidFamilyOperationException + result = procExec.getResult(procId3); + assertTrue(result.isFailed()); + LOG.debug("Add failed with exception: " + result.getException()); + assertTrue(result.getException().getCause() instanceof InvalidFamilyOperationException); + } + + @Test(timeout = 60000) + public void testRecoveryAndDoubleExecutionOffline() throws Exception { + final TableName tableName = TableName.valueOf("testRecoveryAndDoubleExecutionOffline"); + final String cf4 = "cf4"; + final HColumnDescriptor columnDescriptor = new HColumnDescriptor(cf4); + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + // create the table + MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f1", "f2", "f3"); + UTIL.getHBaseAdmin().disableTable(tableName); + + ProcedureTestingUtility.waitNoProcedureRunning(procExec); + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); + + // Start the AddColumnFamily procedure && kill the executor + long procId = + procExec.submitProcedure(new AddColumnFamilyProcedure(procExec.getEnvironment(), tableName, + columnDescriptor)); + + // Restart the executor and execute the step twice + int numberOfSteps = AddColumnFamilyState.values().length; + MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId, numberOfSteps, + AddColumnFamilyState.values()); + + MasterProcedureTestingUtility.validateColumnFamilyAddition(UTIL.getHBaseCluster().getMaster(), + tableName, cf4); + } + + @Test(timeout = 60000) + public void testRecoveryAndDoubleExecutionOnline() throws Exception { + final TableName tableName = TableName.valueOf("testRecoveryAndDoubleExecutionOnline"); + final String cf5 = "cf5"; + final HColumnDescriptor columnDescriptor = new HColumnDescriptor(cf5); + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + // create the table + MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f1", "f2", "f3"); + + ProcedureTestingUtility.waitNoProcedureRunning(procExec); + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); + + // Start the AddColumnFamily procedure && kill the executor + long procId = + procExec.submitProcedure(new AddColumnFamilyProcedure(procExec.getEnvironment(), tableName, + columnDescriptor)); + + // Restart the executor and execute the step twice + int numberOfSteps = AddColumnFamilyState.values().length; + MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId, numberOfSteps, + AddColumnFamilyState.values()); + + MasterProcedureTestingUtility.validateColumnFamilyAddition(UTIL.getHBaseCluster().getMaster(), + tableName, cf5); + } + + @Test(timeout = 60000) + public void testRollbackAndDoubleExecution() throws Exception { + final TableName tableName = TableName.valueOf("testRollbackAndDoubleExecution"); + final String cf6 = "cf6"; + final HColumnDescriptor columnDescriptor = new HColumnDescriptor(cf6); + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + // create the table + MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f1", "f2"); + ProcedureTestingUtility.waitNoProcedureRunning(procExec); + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); + + // Start the AddColumnFamily procedure && kill the executor + long procId = + procExec.submitProcedure(new AddColumnFamilyProcedure(procExec.getEnvironment(), tableName, + columnDescriptor)); + + int numberOfSteps = AddColumnFamilyState.values().length - 2; // failing in the middle of proc + MasterProcedureTestingUtility.testRollbackAndDoubleExecution(procExec, procId, numberOfSteps, + AddColumnFamilyState.values()); + + MasterProcedureTestingUtility.validateColumnFamilyDeletion(UTIL.getHBaseCluster().getMaster(), + tableName, cf6); + } + + private ProcedureExecutor getMasterProcedureExecutor() { + return UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor(); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java new file mode 100644 index 00000000000..7cd64b671a5 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java @@ -0,0 +1,257 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master.procedure; + +import java.io.IOException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableExistsException; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; +import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; +import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableState; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.ModifyRegionUtils; + +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +@Category({MasterTests.class, MediumTests.class}) +public class TestCreateTableProcedure { + private static final Log LOG = LogFactory.getLog(TestCreateTableProcedure.class); + + protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); + + private static void setupConf(Configuration conf) { + conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1); + } + + @BeforeClass + public static void setupCluster() throws Exception { + setupConf(UTIL.getConfiguration()); + UTIL.startMiniCluster(1); + } + + @AfterClass + public static void cleanupTest() throws Exception { + try { + UTIL.shutdownMiniCluster(); + } catch (Exception e) { + LOG.warn("failure shutting down cluster", e); + } + } + + @Before + public void setup() throws Exception { + resetProcExecutorTestingKillFlag(); + } + + @After + public void tearDown() throws Exception { + resetProcExecutorTestingKillFlag(); + for (HTableDescriptor htd: UTIL.getHBaseAdmin().listTables()) { + LOG.info("Tear down, remove table=" + htd.getTableName()); + UTIL.deleteTable(htd.getTableName()); + } + } + + private void resetProcExecutorTestingKillFlag() { + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, false); + assertTrue("expected executor to be running", procExec.isRunning()); + } + + @Test(timeout=60000) + public void testSimpleCreate() throws Exception { + final TableName tableName = TableName.valueOf("testSimpleCreate"); + final byte[][] splitKeys = null; + testSimpleCreate(tableName, splitKeys); + } + + @Test(timeout=60000) + public void testSimpleCreateWithSplits() throws Exception { + final TableName tableName = TableName.valueOf("testSimpleCreateWithSplits"); + final byte[][] splitKeys = new byte[][] { + Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c") + }; + testSimpleCreate(tableName, splitKeys); + } + + private void testSimpleCreate(final TableName tableName, byte[][] splitKeys) throws Exception { + HRegionInfo[] regions = MasterProcedureTestingUtility.createTable( + getMasterProcedureExecutor(), tableName, splitKeys, "f1", "f2"); + MasterProcedureTestingUtility.validateTableCreation( + UTIL.getHBaseCluster().getMaster(), tableName, regions, "f1", "f2"); + } + + @Test(timeout=60000, expected=TableExistsException.class) + public void testCreateExisting() throws Exception { + final TableName tableName = TableName.valueOf("testCreateExisting"); + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + final HTableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, "f"); + final HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, null); + + // create the table + long procId1 = procExec.submitProcedure( + new CreateTableProcedure(procExec.getEnvironment(), htd, regions)); + + // create another with the same name + ProcedurePrepareLatch latch2 = new ProcedurePrepareLatch.CompatibilityLatch(); + long procId2 = procExec.submitProcedure( + new CreateTableProcedure(procExec.getEnvironment(), htd, regions, latch2)); + + ProcedureTestingUtility.waitProcedure(procExec, procId1); + ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId1)); + + ProcedureTestingUtility.waitProcedure(procExec, procId2); + latch2.await(); + } + + @Test(timeout=60000) + public void testRecoveryAndDoubleExecution() throws Exception { + final TableName tableName = TableName.valueOf("testRecoveryAndDoubleExecution"); + + // create the table + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); + + // Start the Create procedure && kill the executor + byte[][] splitKeys = null; + HTableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, "f1", "f2"); + HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, splitKeys); + long procId = procExec.submitProcedure( + new CreateTableProcedure(procExec.getEnvironment(), htd, regions)); + + // Restart the executor and execute the step twice + // NOTE: the 6 (number of CreateTableState steps) is hardcoded, + // so you have to look at this test at least once when you add a new step. + MasterProcedureTestingUtility.testRecoveryAndDoubleExecution( + procExec, procId, 6, CreateTableState.values()); + + MasterProcedureTestingUtility.validateTableCreation( + UTIL.getHBaseCluster().getMaster(), tableName, regions, "f1", "f2"); + } + + @Test(timeout=90000) + public void testRollbackAndDoubleExecution() throws Exception { + final TableName tableName = TableName.valueOf("testRollbackAndDoubleExecution"); + + // create the table + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); + + // Start the Create procedure && kill the executor + final byte[][] splitKeys = new byte[][] { + Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c") + }; + HTableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, "f1", "f2"); + htd.setRegionReplication(3); + HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, splitKeys); + long procId = procExec.submitProcedure( + new CreateTableProcedure(procExec.getEnvironment(), htd, regions)); + + // NOTE: the 4 (number of CreateTableState steps) is hardcoded, + // so you have to look at this test at least once when you add a new step. + MasterProcedureTestingUtility.testRollbackAndDoubleExecution( + procExec, procId, 4, CreateTableState.values()); + + MasterProcedureTestingUtility.validateTableDeletion( + UTIL.getHBaseCluster().getMaster(), tableName, regions, "f1", "f2"); + + // are we able to create the table after a rollback? + resetProcExecutorTestingKillFlag(); + testSimpleCreate(tableName, splitKeys); + } + + @Test(timeout=90000) + public void testRollbackRetriableFailure() throws Exception { + final TableName tableName = TableName.valueOf("testRollbackRetriableFailure"); + + // create the table + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); + + // Start the Create procedure && kill the executor + final byte[][] splitKeys = new byte[][] { + Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c") + }; + HTableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, "f1", "f2"); + HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, splitKeys); + long procId = procExec.submitProcedure( + new FaultyCreateTableProcedure(procExec.getEnvironment(), htd, regions)); + + // NOTE: the 4 (number of CreateTableState steps) is hardcoded, + // so you have to look at this test at least once when you add a new step. + MasterProcedureTestingUtility.testRollbackRetriableFailure( + procExec, procId, 4, CreateTableState.values()); + + MasterProcedureTestingUtility.validateTableDeletion( + UTIL.getHBaseCluster().getMaster(), tableName, regions, "f1", "f2"); + + // are we able to create the table after a rollback? + resetProcExecutorTestingKillFlag(); + testSimpleCreate(tableName, splitKeys); + } + + private ProcedureExecutor getMasterProcedureExecutor() { + return UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor(); + } + + public static class FaultyCreateTableProcedure extends CreateTableProcedure { + private int retries = 0; + + public FaultyCreateTableProcedure() { + // Required by the Procedure framework to create the procedure on replay + } + + public FaultyCreateTableProcedure(final MasterProcedureEnv env, + final HTableDescriptor hTableDescriptor, final HRegionInfo[] newRegions) + throws IOException { + super(env, hTableDescriptor, newRegions); + } + + @Override + protected void rollbackState(final MasterProcedureEnv env, final CreateTableState state) + throws IOException { + if (retries++ < 3) { + LOG.info("inject rollback failure state=" + state); + throw new IOException("injected failure number " + retries); + } else { + super.rollbackState(env, state); + retries = 0; + } + } + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteColumnFamilyProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteColumnFamilyProcedure.java new file mode 100644 index 00000000000..dcf194085cf --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteColumnFamilyProcedure.java @@ -0,0 +1,302 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master.procedure; + +import static org.junit.Assert.assertTrue; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.InvalidFamilyOperationException; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; +import org.apache.hadoop.hbase.procedure2.ProcedureResult; +import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; +import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteColumnFamilyState; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({MasterTests.class, MediumTests.class}) +public class TestDeleteColumnFamilyProcedure { + private static final Log LOG = LogFactory.getLog(TestDeleteColumnFamilyProcedure.class); + + protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); + + private static void setupConf(Configuration conf) { + conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1); + } + + @BeforeClass + public static void setupCluster() throws Exception { + setupConf(UTIL.getConfiguration()); + UTIL.startMiniCluster(1); + } + + @AfterClass + public static void cleanupTest() throws Exception { + try { + UTIL.shutdownMiniCluster(); + } catch (Exception e) { + LOG.warn("failure shutting down cluster", e); + } + } + + @Before + public void setup() throws Exception { + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(getMasterProcedureExecutor(), false); + } + + @After + public void tearDown() throws Exception { + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(getMasterProcedureExecutor(), false); + for (HTableDescriptor htd: UTIL.getHBaseAdmin().listTables()) { + LOG.info("Tear down, remove table=" + htd.getTableName()); + UTIL.deleteTable(htd.getTableName()); + } + } + + @Test(timeout = 60000) + public void testDeleteColumnFamily() throws Exception { + final TableName tableName = TableName.valueOf("testDeleteColumnFamily"); + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + final String cf1 = "cf1"; + final String cf2 = "cf2"; + + MasterProcedureTestingUtility.createTable(procExec, tableName, null, cf1, cf2, "f3"); + + // Test 1: delete the column family that exists online + long procId1 = + procExec.submitProcedure(new DeleteColumnFamilyProcedure(procExec.getEnvironment(), + tableName, cf1.getBytes())); + // Wait the completion + ProcedureTestingUtility.waitProcedure(procExec, procId1); + ProcedureTestingUtility.assertProcNotFailed(procExec, procId1); + + MasterProcedureTestingUtility.validateColumnFamilyDeletion(UTIL.getHBaseCluster().getMaster(), + tableName, cf1); + + // Test 2: delete the column family that exists offline + UTIL.getHBaseAdmin().disableTable(tableName); + long procId2 = + procExec.submitProcedure(new DeleteColumnFamilyProcedure(procExec.getEnvironment(), + tableName, cf2.getBytes())); + // Wait the completion + ProcedureTestingUtility.waitProcedure(procExec, procId2); + ProcedureTestingUtility.assertProcNotFailed(procExec, procId2); + } + + @Test(timeout=60000) + public void testDeleteColumnFamilyTwice() throws Exception { + final TableName tableName = TableName.valueOf("testDeleteColumnFamilyTwice"); + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + final String cf2 = "cf2"; + + MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f1", cf2); + + // delete the column family that exists + long procId1 = + procExec.submitProcedure(new DeleteColumnFamilyProcedure(procExec.getEnvironment(), + tableName, cf2.getBytes())); + // Wait the completion + ProcedureTestingUtility.waitProcedure(procExec, procId1); + // First delete should succeed + ProcedureTestingUtility.assertProcNotFailed(procExec, procId1); + + MasterProcedureTestingUtility.validateColumnFamilyDeletion(UTIL.getHBaseCluster().getMaster(), + tableName, cf2); + + // delete the column family that does not exist + long procId2 = + procExec.submitProcedure(new DeleteColumnFamilyProcedure(procExec.getEnvironment(), + tableName, cf2.getBytes())); + + // Wait the completion + ProcedureTestingUtility.waitProcedure(procExec, procId2); + + // Second delete should fail with InvalidFamilyOperationException + ProcedureResult result = procExec.getResult(procId2); + assertTrue(result.isFailed()); + LOG.debug("Delete online failed with exception: " + result.getException()); + assertTrue(result.getException().getCause() instanceof InvalidFamilyOperationException); + + // Try again, this time with table disabled. + UTIL.getHBaseAdmin().disableTable(tableName); + long procId3 = + procExec.submitProcedure(new DeleteColumnFamilyProcedure(procExec.getEnvironment(), + tableName, cf2.getBytes())); + // Wait the completion + ProcedureTestingUtility.waitProcedure(procExec, procId3); + // Expect fail with InvalidFamilyOperationException + result = procExec.getResult(procId2); + assertTrue(result.isFailed()); + LOG.debug("Delete offline failed with exception: " + result.getException()); + assertTrue(result.getException().getCause() instanceof InvalidFamilyOperationException); + } + + @Test(timeout=60000) + public void testDeleteNonExistingColumnFamily() throws Exception { + final TableName tableName = TableName.valueOf("testDeleteNonExistingColumnFamily"); + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + final String cf3 = "cf3"; + + MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f1", "f2"); + + // delete the column family that does not exist + long procId1 = + procExec.submitProcedure(new DeleteColumnFamilyProcedure(procExec.getEnvironment(), + tableName, cf3.getBytes())); + // Wait the completion + ProcedureTestingUtility.waitProcedure(procExec, procId1); + + ProcedureResult result = procExec.getResult(procId1); + assertTrue(result.isFailed()); + LOG.debug("Delete failed with exception: " + result.getException()); + assertTrue(result.getException().getCause() instanceof InvalidFamilyOperationException); + } + + @Test(timeout=60000) + public void testRecoveryAndDoubleExecutionOffline() throws Exception { + final TableName tableName = TableName.valueOf("testRecoveryAndDoubleExecutionOffline"); + final String cf4 = "cf4"; + + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + // create the table + MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f1", "f2", "f3", cf4); + UTIL.getHBaseAdmin().disableTable(tableName); + ProcedureTestingUtility.waitNoProcedureRunning(procExec); + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); + + // Start the Delete procedure && kill the executor + long procId = + procExec.submitProcedure(new DeleteColumnFamilyProcedure(procExec.getEnvironment(), + tableName, cf4.getBytes())); + + // Restart the executor and execute the step twice + int numberOfSteps = DeleteColumnFamilyState.values().length; + MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId, numberOfSteps, + DeleteColumnFamilyState.values()); + + MasterProcedureTestingUtility.validateColumnFamilyDeletion(UTIL.getHBaseCluster().getMaster(), + tableName, cf4); + } + + @Test(timeout = 60000) + public void testRecoveryAndDoubleExecutionOnline() throws Exception { + final TableName tableName = TableName.valueOf("testRecoveryAndDoubleExecutionOnline"); + final String cf5 = "cf5"; + + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + // create the table + MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f1", "f2", "f3", cf5); + ProcedureTestingUtility.waitNoProcedureRunning(procExec); + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); + + // Start the Delete procedure && kill the executor + long procId = + procExec.submitProcedure(new DeleteColumnFamilyProcedure(procExec.getEnvironment(), + tableName, cf5.getBytes())); + + // Restart the executor and execute the step twice + int numberOfSteps = DeleteColumnFamilyState.values().length; + MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId, numberOfSteps, + DeleteColumnFamilyState.values()); + + MasterProcedureTestingUtility.validateColumnFamilyDeletion(UTIL.getHBaseCluster().getMaster(), + tableName, cf5); + } + + @Test(timeout = 60000) + public void testRollbackAndDoubleExecution() throws Exception { + final TableName tableName = TableName.valueOf("testRollbackAndDoubleExecution"); + final String cf5 = "cf5"; + + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + // create the table + HRegionInfo[] regions = MasterProcedureTestingUtility.createTable( + procExec, tableName, null, "f1", "f2", "f3", cf5); + ProcedureTestingUtility.waitNoProcedureRunning(procExec); + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); + + // Start the Delete procedure && kill the executor + long procId = procExec.submitProcedure( + new DeleteColumnFamilyProcedure(procExec.getEnvironment(), tableName, cf5.getBytes())); + + // Failing before DELETE_COLUMN_FAMILY_DELETE_FS_LAYOUT we should trigger the rollback + // NOTE: the 1 (number before DELETE_COLUMN_FAMILY_DELETE_FS_LAYOUT step) is hardcoded, + // so you have to look at this test at least once when you add a new step. + int numberOfSteps = 1; + MasterProcedureTestingUtility.testRollbackAndDoubleExecution( + procExec, + procId, + numberOfSteps, + DeleteColumnFamilyState.values()); + + MasterProcedureTestingUtility.validateTableCreation( + UTIL.getHBaseCluster().getMaster(), tableName, regions, "f1", "f2", "f3", cf5); + } + + @Test(timeout = 60000) + public void testRollbackAndDoubleExecutionAfterPONR() throws Exception { + final TableName tableName = TableName.valueOf("testRollbackAndDoubleExecutionAfterPONR"); + final String cf5 = "cf5"; + + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + // create the table + HRegionInfo[] regions = MasterProcedureTestingUtility.createTable( + procExec, tableName, null, "f1", "f2", "f3", cf5); + ProcedureTestingUtility.waitNoProcedureRunning(procExec); + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); + + // Start the Delete procedure && kill the executor + long procId = procExec.submitProcedure( + new DeleteColumnFamilyProcedure(procExec.getEnvironment(), tableName, cf5.getBytes())); + + // Failing after DELETE_COLUMN_FAMILY_DELETE_FS_LAYOUT we should not trigger the rollback. + // NOTE: the 4 (number of DELETE_COLUMN_FAMILY_DELETE_FS_LAYOUT + 1 step) is hardcoded, + // so you have to look at this test at least once when you add a new step. + int numberOfSteps = 4; + MasterProcedureTestingUtility.testRollbackAndDoubleExecutionAfterPONR( + procExec, + procId, + numberOfSteps, + DeleteColumnFamilyState.values()); + + MasterProcedureTestingUtility.validateColumnFamilyDeletion( + UTIL.getHBaseCluster().getMaster(), tableName, cf5); + } + + private ProcedureExecutor getMasterProcedureExecutor() { + return UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor(); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteTableProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteTableProcedure.java new file mode 100644 index 00000000000..6795b222b06 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteTableProcedure.java @@ -0,0 +1,208 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master.procedure; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.TableNotDisabledException; +import org.apache.hadoop.hbase.TableNotFoundException; +import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; +import org.apache.hadoop.hbase.procedure2.ProcedureResult; +import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; +import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableState; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Bytes; + +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +@Category({MasterTests.class, MediumTests.class}) +public class TestDeleteTableProcedure { + private static final Log LOG = LogFactory.getLog(TestDeleteTableProcedure.class); + + protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); + + private static void setupConf(Configuration conf) { + conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1); + } + + @BeforeClass + public static void setupCluster() throws Exception { + setupConf(UTIL.getConfiguration()); + UTIL.startMiniCluster(1); + } + + @AfterClass + public static void cleanupTest() throws Exception { + try { + UTIL.shutdownMiniCluster(); + } catch (Exception e) { + LOG.warn("failure shutting down cluster", e); + } + } + + @Before + public void setup() throws Exception { + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, false); + assertTrue("expected executor to be running", procExec.isRunning()); + } + + @After + public void tearDown() throws Exception { + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(getMasterProcedureExecutor(), false); + for (HTableDescriptor htd: UTIL.getHBaseAdmin().listTables()) { + LOG.info("Tear down, remove table=" + htd.getTableName()); + UTIL.deleteTable(htd.getTableName()); + } + } + + @Test(timeout=60000, expected=TableNotFoundException.class) + public void testDeleteNotExistentTable() throws Exception { + final TableName tableName = TableName.valueOf("testDeleteNotExistentTable"); + + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + ProcedurePrepareLatch latch = new ProcedurePrepareLatch.CompatibilityLatch(); + long procId = ProcedureTestingUtility.submitAndWait(procExec, + new DeleteTableProcedure(procExec.getEnvironment(), tableName, latch)); + latch.await(); + } + + @Test(timeout=60000, expected=TableNotDisabledException.class) + public void testDeleteNotDisabledTable() throws Exception { + final TableName tableName = TableName.valueOf("testDeleteNotDisabledTable"); + + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f"); + + ProcedurePrepareLatch latch = new ProcedurePrepareLatch.CompatibilityLatch(); + long procId = ProcedureTestingUtility.submitAndWait(procExec, + new DeleteTableProcedure(procExec.getEnvironment(), tableName, latch)); + latch.await(); + } + + @Test(timeout=60000) + public void testDeleteDeletedTable() throws Exception { + final TableName tableName = TableName.valueOf("testDeleteDeletedTable"); + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + HRegionInfo[] regions = MasterProcedureTestingUtility.createTable( + procExec, tableName, null, "f"); + UTIL.getHBaseAdmin().disableTable(tableName); + + // delete the table (that exists) + long procId1 = procExec.submitProcedure( + new DeleteTableProcedure(procExec.getEnvironment(), tableName)); + // delete the table (that will no longer exist) + long procId2 = procExec.submitProcedure( + new DeleteTableProcedure(procExec.getEnvironment(), tableName)); + + // Wait the completion + ProcedureTestingUtility.waitProcedure(procExec, procId1); + ProcedureTestingUtility.waitProcedure(procExec, procId2); + + // First delete should succeed + ProcedureTestingUtility.assertProcNotFailed(procExec, procId1); + MasterProcedureTestingUtility.validateTableDeletion( + UTIL.getHBaseCluster().getMaster(), tableName, regions, "f"); + + // Second delete should fail with TableNotFound + ProcedureResult result = procExec.getResult(procId2); + assertTrue(result.isFailed()); + LOG.debug("Delete failed with exception: " + result.getException()); + assertTrue(result.getException().getCause() instanceof TableNotFoundException); + } + + @Test(timeout=60000) + public void testSimpleDelete() throws Exception { + final TableName tableName = TableName.valueOf("testSimpleDelete"); + final byte[][] splitKeys = null; + testSimpleDelete(tableName, splitKeys); + } + + @Test(timeout=60000) + public void testSimpleDeleteWithSplits() throws Exception { + final TableName tableName = TableName.valueOf("testSimpleDeleteWithSplits"); + final byte[][] splitKeys = new byte[][] { + Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c") + }; + testSimpleDelete(tableName, splitKeys); + } + + private void testSimpleDelete(final TableName tableName, byte[][] splitKeys) throws Exception { + HRegionInfo[] regions = MasterProcedureTestingUtility.createTable( + getMasterProcedureExecutor(), tableName, splitKeys, "f1", "f2"); + UTIL.getHBaseAdmin().disableTable(tableName); + + // delete the table + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + long procId = ProcedureTestingUtility.submitAndWait(procExec, + new DeleteTableProcedure(procExec.getEnvironment(), tableName)); + ProcedureTestingUtility.assertProcNotFailed(procExec, procId); + MasterProcedureTestingUtility.validateTableDeletion( + UTIL.getHBaseCluster().getMaster(), tableName, regions, "f1", "f2"); + } + + @Test(timeout=60000) + public void testRecoveryAndDoubleExecution() throws Exception { + final TableName tableName = TableName.valueOf("testRecoveryAndDoubleExecution"); + + // create the table + byte[][] splitKeys = null; + HRegionInfo[] regions = MasterProcedureTestingUtility.createTable( + getMasterProcedureExecutor(), tableName, splitKeys, "f1", "f2"); + UTIL.getHBaseAdmin().disableTable(tableName); + + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + ProcedureTestingUtility.waitNoProcedureRunning(procExec); + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); + + // Start the Delete procedure && kill the executor + long procId = procExec.submitProcedure( + new DeleteTableProcedure(procExec.getEnvironment(), tableName)); + + // Restart the executor and execute the step twice + // NOTE: the 6 (number of DeleteTableState steps) is hardcoded, + // so you have to look at this test at least once when you add a new step. + MasterProcedureTestingUtility.testRecoveryAndDoubleExecution( + procExec, procId, 6, DeleteTableState.values()); + + MasterProcedureTestingUtility.validateTableDeletion( + UTIL.getHBaseCluster().getMaster(), tableName, regions, "f1", "f2"); + } + + private ProcedureExecutor getMasterProcedureExecutor() { + return UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor(); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDisableTableProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDisableTableProcedure.java new file mode 100644 index 00000000000..0537ccc3473 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDisableTableProcedure.java @@ -0,0 +1,182 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master.procedure; + +import static org.junit.Assert.assertTrue; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.TableNotEnabledException; +import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; +import org.apache.hadoop.hbase.procedure2.ProcedureResult; +import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; +import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableState; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Bytes; + +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({MasterTests.class, MediumTests.class}) +public class TestDisableTableProcedure { + private static final Log LOG = LogFactory.getLog(TestDisableTableProcedure.class); + + protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); + + private static void setupConf(Configuration conf) { + conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1); + } + + @BeforeClass + public static void setupCluster() throws Exception { + setupConf(UTIL.getConfiguration()); + UTIL.startMiniCluster(1); + } + + @AfterClass + public static void cleanupTest() throws Exception { + try { + UTIL.shutdownMiniCluster(); + } catch (Exception e) { + LOG.warn("failure shutting down cluster", e); + } + } + + @Before + public void setup() throws Exception { + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(getMasterProcedureExecutor(), false); + } + + @After + public void tearDown() throws Exception { + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(getMasterProcedureExecutor(), false); + for (HTableDescriptor htd: UTIL.getHBaseAdmin().listTables()) { + LOG.info("Tear down, remove table=" + htd.getTableName()); + UTIL.deleteTable(htd.getTableName()); + } + } + + @Test(timeout = 60000) + public void testDisableTable() throws Exception { + final TableName tableName = TableName.valueOf("testDisableTable"); + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f1", "f2"); + + // Disable the table + long procId = procExec.submitProcedure( + new DisableTableProcedure(procExec.getEnvironment(), tableName, false)); + // Wait the completion + ProcedureTestingUtility.waitProcedure(procExec, procId); + ProcedureTestingUtility.assertProcNotFailed(procExec, procId); + MasterProcedureTestingUtility.validateTableIsDisabled(UTIL.getHBaseCluster().getMaster(), + tableName); + } + + @Test(timeout = 60000) + public void testDisableTableMultipleTimes() throws Exception { + final TableName tableName = TableName.valueOf("testDisableTableMultipleTimes"); + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f1", "f2"); + + // Disable the table + long procId1 = procExec.submitProcedure(new DisableTableProcedure( + procExec.getEnvironment(), tableName, false)); + // Wait the completion + ProcedureTestingUtility.waitProcedure(procExec, procId1); + ProcedureTestingUtility.assertProcNotFailed(procExec, procId1); + MasterProcedureTestingUtility.validateTableIsDisabled(UTIL.getHBaseCluster().getMaster(), + tableName); + + // Disable the table again - expect failure + long procId2 = procExec.submitProcedure(new DisableTableProcedure( + procExec.getEnvironment(), tableName, false)); + // Wait the completion + ProcedureTestingUtility.waitProcedure(procExec, procId2); + ProcedureResult result = procExec.getResult(procId2); + assertTrue(result.isFailed()); + LOG.debug("Disable failed with exception: " + result.getException()); + assertTrue(result.getException().getCause() instanceof TableNotEnabledException); + + // Disable the table - expect failure from ProcedurePrepareLatch + try { + final ProcedurePrepareLatch prepareLatch = new ProcedurePrepareLatch.CompatibilityLatch(); + + long procId3 = procExec.submitProcedure(new DisableTableProcedure( + procExec.getEnvironment(), tableName, false, prepareLatch)); + prepareLatch.await(); + Assert.fail("Disable should throw exception through latch."); + } catch (TableNotEnabledException tnee) { + // Expected + LOG.debug("Disable failed with expected exception."); + } + + // Disable the table again with skipping table state check flag (simulate recovery scenario) + long procId4 = procExec.submitProcedure(new DisableTableProcedure( + procExec.getEnvironment(), tableName, true)); + // Wait the completion + ProcedureTestingUtility.waitProcedure(procExec, procId4); + ProcedureTestingUtility.assertProcNotFailed(procExec, procId4); + MasterProcedureTestingUtility.validateTableIsDisabled(UTIL.getHBaseCluster().getMaster(), + tableName); + } + + @Test(timeout=60000) + public void testRecoveryAndDoubleExecution() throws Exception { + final TableName tableName = TableName.valueOf("testRecoveryAndDoubleExecution"); + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + final byte[][] splitKeys = new byte[][] { + Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c") + }; + MasterProcedureTestingUtility.createTable(procExec, tableName, splitKeys, "f1", "f2"); + + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); + + // Start the Disable procedure && kill the executor + long procId = + procExec.submitProcedure(new DisableTableProcedure(procExec.getEnvironment(), tableName, + false)); + + // Restart the executor and execute the step twice + int numberOfSteps = DisableTableState.values().length; + MasterProcedureTestingUtility.testRecoveryAndDoubleExecution( + procExec, + procId, + numberOfSteps, + DisableTableState.values()); + MasterProcedureTestingUtility.validateTableIsDisabled(UTIL.getHBaseCluster().getMaster(), + tableName); + } + + private ProcedureExecutor getMasterProcedureExecutor() { + return UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor(); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestEnableTableProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestEnableTableProcedure.java new file mode 100644 index 00000000000..12c78e865af --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestEnableTableProcedure.java @@ -0,0 +1,193 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master.procedure; + +import static org.junit.Assert.assertTrue; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.TableNotDisabledException; +import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; +import org.apache.hadoop.hbase.procedure2.ProcedureResult; +import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; +import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableState; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Bytes; + +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({MasterTests.class, MediumTests.class}) +public class TestEnableTableProcedure { + private static final Log LOG = LogFactory.getLog(TestEnableTableProcedure.class); + + protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); + + private static void setupConf(Configuration conf) { + conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1); + } + + @BeforeClass + public static void setupCluster() throws Exception { + setupConf(UTIL.getConfiguration()); + UTIL.startMiniCluster(1); + } + + @AfterClass + public static void cleanupTest() throws Exception { + try { + UTIL.shutdownMiniCluster(); + } catch (Exception e) { + LOG.warn("failure shutting down cluster", e); + } + } + + @Before + public void setup() throws Exception { + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(getMasterProcedureExecutor(), false); + } + + @After + public void tearDown() throws Exception { + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(getMasterProcedureExecutor(), false); + for (HTableDescriptor htd: UTIL.getHBaseAdmin().listTables()) { + LOG.info("Tear down, remove table=" + htd.getTableName()); + UTIL.deleteTable(htd.getTableName()); + } + } + + @Test(timeout = 60000) + public void testEnableTable() throws Exception { + final TableName tableName = TableName.valueOf("testEnableTable"); + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f1", "f2"); + UTIL.getHBaseAdmin().disableTable(tableName); + + // Enable the table + long procId = procExec.submitProcedure( + new EnableTableProcedure(procExec.getEnvironment(), tableName, false)); + // Wait the completion + ProcedureTestingUtility.waitProcedure(procExec, procId); + ProcedureTestingUtility.assertProcNotFailed(procExec, procId); + MasterProcedureTestingUtility.validateTableIsEnabled(UTIL.getHBaseCluster().getMaster(), + tableName); + } + + @Test(timeout=60000, expected=TableNotDisabledException.class) + public void testEnableNonDisabledTable() throws Exception { + final TableName tableName = TableName.valueOf("testEnableNonExistingTable"); + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f1", "f2"); + + // Enable the table - expect failure + long procId1 = procExec.submitProcedure( + new EnableTableProcedure(procExec.getEnvironment(), tableName, false)); + ProcedureTestingUtility.waitProcedure(procExec, procId1); + + ProcedureResult result = procExec.getResult(procId1); + assertTrue(result.isFailed()); + LOG.debug("Enable failed with exception: " + result.getException()); + assertTrue(result.getException().getCause() instanceof TableNotDisabledException); + + // Enable the table with skipping table state check flag (simulate recovery scenario) + long procId2 = procExec.submitProcedure( + new EnableTableProcedure(procExec.getEnvironment(), tableName, true)); + // Wait the completion + ProcedureTestingUtility.waitProcedure(procExec, procId2); + ProcedureTestingUtility.assertProcNotFailed(procExec, procId2); + + // Enable the table - expect failure from ProcedurePrepareLatch + final ProcedurePrepareLatch prepareLatch = new ProcedurePrepareLatch.CompatibilityLatch(); + long procId3 = procExec.submitProcedure( + new EnableTableProcedure(procExec.getEnvironment(), tableName, false, prepareLatch)); + prepareLatch.await(); + Assert.fail("Enable should throw exception through latch."); + } + + @Test(timeout = 60000) + public void testRecoveryAndDoubleExecution() throws Exception { + final TableName tableName = TableName.valueOf("testRecoveryAndDoubleExecution"); + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + final byte[][] splitKeys = new byte[][] { + Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c") + }; + MasterProcedureTestingUtility.createTable(procExec, tableName, splitKeys, "f1", "f2"); + UTIL.getHBaseAdmin().disableTable(tableName); + ProcedureTestingUtility.waitNoProcedureRunning(procExec); + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); + + // Start the Enable procedure && kill the executor + long procId = procExec.submitProcedure( + new EnableTableProcedure(procExec.getEnvironment(), tableName, false)); + + // Restart the executor and execute the step twice + int numberOfSteps = EnableTableState.values().length; + MasterProcedureTestingUtility.testRecoveryAndDoubleExecution( + procExec, + procId, + numberOfSteps, + EnableTableState.values()); + MasterProcedureTestingUtility.validateTableIsEnabled(UTIL.getHBaseCluster().getMaster(), + tableName); + } + + @Test(timeout = 60000) + public void testRollbackAndDoubleExecution() throws Exception { + final TableName tableName = TableName.valueOf("testRollbackAndDoubleExecution"); + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + final byte[][] splitKeys = new byte[][] { + Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c") + }; + MasterProcedureTestingUtility.createTable(procExec, tableName, splitKeys, "f1", "f2"); + UTIL.getHBaseAdmin().disableTable(tableName); + ProcedureTestingUtility.waitNoProcedureRunning(procExec); + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); + + // Start the Enable procedure && kill the executor + long procId = procExec.submitProcedure( + new EnableTableProcedure(procExec.getEnvironment(), tableName, false)); + + int numberOfSteps = EnableTableState.values().length - 2; // failing in the middle of proc + MasterProcedureTestingUtility.testRollbackAndDoubleExecution( + procExec, + procId, + numberOfSteps, + EnableTableState.values()); + MasterProcedureTestingUtility.validateTableIsDisabled(UTIL.getHBaseCluster().getMaster(), + tableName); + } + + private ProcedureExecutor getMasterProcedureExecutor() { + return UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor(); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java new file mode 100644 index 00000000000..25763023500 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java @@ -0,0 +1,429 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master.procedure; + +import java.util.concurrent.CountDownLatch; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.MiniHBaseCluster; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; +import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; +import org.apache.hadoop.hbase.procedure2.store.ProcedureStore; +import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore; +import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableState; +import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableState; +import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableState; +import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableState; +import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableState; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.FSUtils; +import org.apache.hadoop.hbase.util.ModifyRegionUtils; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.mockito.Mockito; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +@Category({MasterTests.class, LargeTests.class}) +public class TestMasterFailoverWithProcedures { + private static final Log LOG = LogFactory.getLog(TestMasterFailoverWithProcedures.class); + + protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); + + private static void setupConf(Configuration conf) { + } + + @Before + public void setup() throws Exception { + setupConf(UTIL.getConfiguration()); + UTIL.startMiniCluster(2, 1); + + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + ProcedureTestingUtility.setToggleKillBeforeStoreUpdate(procExec, false); + ProcedureTestingUtility.setKillBeforeStoreUpdate(procExec, false); + } + + @After + public void tearDown() throws Exception { + try { + UTIL.shutdownMiniCluster(); + } catch (Exception e) { + LOG.warn("failure shutting down cluster", e); + } + } + + @Test(timeout=60000) + public void testWalRecoverLease() throws Exception { + final ProcedureStore masterStore = getMasterProcedureExecutor().getStore(); + assertTrue("expected WALStore for this test", masterStore instanceof WALProcedureStore); + + HMaster firstMaster = UTIL.getHBaseCluster().getMaster(); + // Abort Latch for the master store + final CountDownLatch masterStoreAbort = new CountDownLatch(1); + masterStore.registerListener(new ProcedureStore.ProcedureStoreListener() { + @Override + public void abortProcess() { + LOG.debug("Abort store of Master"); + masterStoreAbort.countDown(); + } + }); + + // startup a fake master the new WAL store will take the lease + // and the active master should abort. + HMaster backupMaster3 = Mockito.mock(HMaster.class); + Mockito.doReturn(firstMaster.getConfiguration()).when(backupMaster3).getConfiguration(); + Mockito.doReturn(true).when(backupMaster3).isActiveMaster(); + final WALProcedureStore backupStore3 = new WALProcedureStore(firstMaster.getConfiguration(), + firstMaster.getMasterFileSystem().getFileSystem(), + ((WALProcedureStore)masterStore).getLogDir(), + new MasterProcedureEnv.WALStoreLeaseRecovery(backupMaster3)); + // Abort Latch for the test store + final CountDownLatch backupStore3Abort = new CountDownLatch(1); + backupStore3.registerListener(new ProcedureStore.ProcedureStoreListener() { + @Override + public void abortProcess() { + LOG.debug("Abort store of backupMaster3"); + backupStore3Abort.countDown(); + backupStore3.stop(true); + } + }); + backupStore3.start(1); + backupStore3.recoverLease(); + + // Try to trigger a command on the master (WAL lease expired on the active one) + HTableDescriptor htd = MasterProcedureTestingUtility.createHTD(TableName.valueOf("mtb"), "f"); + HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, null); + LOG.debug("submit proc"); + getMasterProcedureExecutor().submitProcedure( + new CreateTableProcedure(getMasterProcedureExecutor().getEnvironment(), htd, regions)); + LOG.debug("wait master store abort"); + masterStoreAbort.await(); + + // Now the real backup master should start up + LOG.debug("wait backup master to startup"); + waitBackupMaster(UTIL, firstMaster); + assertEquals(true, firstMaster.isStopped()); + + // wait the store in here to abort (the test will fail due to timeout if it doesn't) + LOG.debug("wait the store to abort"); + backupStore3.getStoreTracker().setDeleted(1, false); + backupStore3.delete(1); + backupStore3Abort.await(); + } + + // ========================================================================== + // Test Create Table + // ========================================================================== + @Test(timeout=60000) + public void testCreateWithFailover() throws Exception { + // TODO: Should we try every step? (master failover takes long time) + // It is already covered by TestCreateTableProcedure + // but without the master restart, only the executor/store is restarted. + // Without Master restart we may not find bug in the procedure code + // like missing "wait" for resources to be available (e.g. RS) + testCreateWithFailoverAtStep(CreateTableState.CREATE_TABLE_ASSIGN_REGIONS.ordinal()); + } + + private void testCreateWithFailoverAtStep(final int step) throws Exception { + final TableName tableName = TableName.valueOf("testCreateWithFailoverAtStep" + step); + + // create the table + ProcedureExecutor procExec = getMasterProcedureExecutor(); + ProcedureTestingUtility.setKillBeforeStoreUpdate(procExec, true); + ProcedureTestingUtility.setToggleKillBeforeStoreUpdate(procExec, true); + + // Start the Create procedure && kill the executor + byte[][] splitKeys = null; + HTableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, "f1", "f2"); + HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, splitKeys); + long procId = procExec.submitProcedure( + new CreateTableProcedure(procExec.getEnvironment(), htd, regions)); + testRecoveryAndDoubleExecution(UTIL, procId, step, CreateTableState.values()); + + MasterProcedureTestingUtility.validateTableCreation( + UTIL.getHBaseCluster().getMaster(), tableName, regions, "f1", "f2"); + } + + // ========================================================================== + // Test Delete Table + // ========================================================================== + @Test(timeout=60000) + public void testDeleteWithFailover() throws Exception { + // TODO: Should we try every step? (master failover takes long time) + // It is already covered by TestDeleteTableProcedure + // but without the master restart, only the executor/store is restarted. + // Without Master restart we may not find bug in the procedure code + // like missing "wait" for resources to be available (e.g. RS) + testDeleteWithFailoverAtStep(DeleteTableState.DELETE_TABLE_UNASSIGN_REGIONS.ordinal()); + } + + private void testDeleteWithFailoverAtStep(final int step) throws Exception { + final TableName tableName = TableName.valueOf("testDeleteWithFailoverAtStep" + step); + + // create the table + byte[][] splitKeys = null; + HRegionInfo[] regions = MasterProcedureTestingUtility.createTable( + getMasterProcedureExecutor(), tableName, splitKeys, "f1", "f2"); + Path tableDir = FSUtils.getTableDir(getRootDir(), tableName); + MasterProcedureTestingUtility.validateTableCreation( + UTIL.getHBaseCluster().getMaster(), tableName, regions, "f1", "f2"); + UTIL.getHBaseAdmin().disableTable(tableName); + + ProcedureExecutor procExec = getMasterProcedureExecutor(); + ProcedureTestingUtility.setKillBeforeStoreUpdate(procExec, true); + ProcedureTestingUtility.setToggleKillBeforeStoreUpdate(procExec, true); + + // Start the Delete procedure && kill the executor + long procId = procExec.submitProcedure( + new DeleteTableProcedure(procExec.getEnvironment(), tableName)); + testRecoveryAndDoubleExecution(UTIL, procId, step, DeleteTableState.values()); + + MasterProcedureTestingUtility.validateTableDeletion( + UTIL.getHBaseCluster().getMaster(), tableName, regions, "f1", "f2"); + } + + // ========================================================================== + // Test Truncate Table + // ========================================================================== + @Test(timeout=90000) + public void testTruncateWithFailover() throws Exception { + // TODO: Should we try every step? (master failover takes long time) + // It is already covered by TestTruncateTableProcedure + // but without the master restart, only the executor/store is restarted. + // Without Master restart we may not find bug in the procedure code + // like missing "wait" for resources to be available (e.g. RS) + testTruncateWithFailoverAtStep(true, TruncateTableState.TRUNCATE_TABLE_ADD_TO_META.ordinal()); + } + + private void testTruncateWithFailoverAtStep(final boolean preserveSplits, final int step) + throws Exception { + final TableName tableName = TableName.valueOf("testTruncateWithFailoverAtStep" + step); + + // create the table + final String[] families = new String[] { "f1", "f2" }; + final byte[][] splitKeys = new byte[][] { + Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c") + }; + HRegionInfo[] regions = MasterProcedureTestingUtility.createTable( + getMasterProcedureExecutor(), tableName, splitKeys, families); + // load and verify that there are rows in the table + MasterProcedureTestingUtility.loadData( + UTIL.getConnection(), tableName, 100, splitKeys, families); + assertEquals(100, UTIL.countRows(tableName)); + // disable the table + UTIL.getHBaseAdmin().disableTable(tableName); + + ProcedureExecutor procExec = getMasterProcedureExecutor(); + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); + + // Start the Truncate procedure && kill the executor + long procId = procExec.submitProcedure( + new TruncateTableProcedure(procExec.getEnvironment(), tableName, preserveSplits)); + testRecoveryAndDoubleExecution(UTIL, procId, step, TruncateTableState.values()); + + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, false); + UTIL.waitUntilAllRegionsAssigned(tableName); + + // validate the table regions and layout + if (preserveSplits) { + assertEquals(1 + splitKeys.length, UTIL.getHBaseAdmin().getTableRegions(tableName).size()); + } else { + regions = UTIL.getHBaseAdmin().getTableRegions(tableName).toArray(new HRegionInfo[1]); + assertEquals(1, regions.length); + } + MasterProcedureTestingUtility.validateTableCreation( + UTIL.getHBaseCluster().getMaster(), tableName, regions, families); + + // verify that there are no rows in the table + assertEquals(0, UTIL.countRows(tableName)); + + // verify that the table is read/writable + MasterProcedureTestingUtility.loadData( + UTIL.getConnection(), tableName, 50, splitKeys, families); + assertEquals(50, UTIL.countRows(tableName)); + } + + // ========================================================================== + // Test Disable Table + // ========================================================================== + @Test(timeout=60000) + public void testDisableTableWithFailover() throws Exception { + // TODO: Should we try every step? (master failover takes long time) + // It is already covered by TestDisableTableProcedure + // but without the master restart, only the executor/store is restarted. + // Without Master restart we may not find bug in the procedure code + // like missing "wait" for resources to be available (e.g. RS) + testDisableTableWithFailoverAtStep( + DisableTableState.DISABLE_TABLE_MARK_REGIONS_OFFLINE.ordinal()); + } + + private void testDisableTableWithFailoverAtStep(final int step) throws Exception { + final TableName tableName = TableName.valueOf("testDisableTableWithFailoverAtStep" + step); + + // create the table + final byte[][] splitKeys = new byte[][] { + Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c") + }; + MasterProcedureTestingUtility.createTable( + getMasterProcedureExecutor(), tableName, splitKeys, "f1", "f2"); + + ProcedureExecutor procExec = getMasterProcedureExecutor(); + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); + + // Start the Delete procedure && kill the executor + long procId = procExec.submitProcedure( + new DisableTableProcedure(procExec.getEnvironment(), tableName, false)); + testRecoveryAndDoubleExecution(UTIL, procId, step, DisableTableState.values()); + + MasterProcedureTestingUtility.validateTableIsDisabled( + UTIL.getHBaseCluster().getMaster(), tableName); + } + + // ========================================================================== + // Test Enable Table + // ========================================================================== + @Test(timeout=60000) + public void testEnableTableWithFailover() throws Exception { + // TODO: Should we try every step? (master failover takes long time) + // It is already covered by TestEnableTableProcedure + // but without the master restart, only the executor/store is restarted. + // Without Master restart we may not find bug in the procedure code + // like missing "wait" for resources to be available (e.g. RS) + testEnableTableWithFailoverAtStep( + EnableTableState.ENABLE_TABLE_MARK_REGIONS_ONLINE.ordinal()); + } + + private void testEnableTableWithFailoverAtStep(final int step) throws Exception { + final TableName tableName = TableName.valueOf("testEnableTableWithFailoverAtStep" + step); + + // create the table + final byte[][] splitKeys = new byte[][] { + Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c") + }; + MasterProcedureTestingUtility.createTable( + getMasterProcedureExecutor(), tableName, splitKeys, "f1", "f2"); + UTIL.getHBaseAdmin().disableTable(tableName); + + ProcedureExecutor procExec = getMasterProcedureExecutor(); + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); + + // Start the Delete procedure && kill the executor + long procId = procExec.submitProcedure( + new EnableTableProcedure(procExec.getEnvironment(), tableName, false)); + testRecoveryAndDoubleExecution(UTIL, procId, step, EnableTableState.values()); + + MasterProcedureTestingUtility.validateTableIsEnabled( + UTIL.getHBaseCluster().getMaster(), tableName); + } + + // ========================================================================== + // Test Helpers + // ========================================================================== + public static void testRecoveryAndDoubleExecution(final HBaseTestingUtility testUtil, + final long procId, final int lastStepBeforeFailover, TState[] states) throws Exception { + ProcedureExecutor procExec = + testUtil.getHBaseCluster().getMaster().getMasterProcedureExecutor(); + ProcedureTestingUtility.waitProcedure(procExec, procId); + + for (int i = 0; i < lastStepBeforeFailover; ++i) { + LOG.info("Restart "+ i +" exec state: " + states[i]); + ProcedureTestingUtility.assertProcNotYetCompleted(procExec, procId); + ProcedureTestingUtility.restart(procExec); + ProcedureTestingUtility.waitProcedure(procExec, procId); + } + ProcedureTestingUtility.assertProcNotYetCompleted(procExec, procId); + + LOG.info("Trigger master failover"); + masterFailover(testUtil); + + procExec = testUtil.getHBaseCluster().getMaster().getMasterProcedureExecutor(); + ProcedureTestingUtility.waitProcedure(procExec, procId); + ProcedureTestingUtility.assertProcNotFailed(procExec, procId); + } + + // ========================================================================== + // Master failover utils + // ========================================================================== + public static void masterFailover(final HBaseTestingUtility testUtil) + throws Exception { + MiniHBaseCluster cluster = testUtil.getMiniHBaseCluster(); + + // Kill the master + HMaster oldMaster = cluster.getMaster(); + cluster.killMaster(cluster.getMaster().getServerName()); + + // Wait the secondary + waitBackupMaster(testUtil, oldMaster); + } + + public static void waitBackupMaster(final HBaseTestingUtility testUtil, + final HMaster oldMaster) throws Exception { + MiniHBaseCluster cluster = testUtil.getMiniHBaseCluster(); + + HMaster newMaster = cluster.getMaster(); + while (newMaster == null || newMaster == oldMaster) { + Thread.sleep(250); + newMaster = cluster.getMaster(); + } + + while (!(newMaster.isActiveMaster() && newMaster.isInitialized())) { + Thread.sleep(250); + } + } + + // ========================================================================== + // Helpers + // ========================================================================== + private MasterProcedureEnv getMasterProcedureEnv() { + return getMasterProcedureExecutor().getEnvironment(); + } + + private ProcedureExecutor getMasterProcedureExecutor() { + return UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor(); + } + + private FileSystem getFileSystem() { + return UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem(); + } + + private Path getRootDir() { + return UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir(); + } + + private Path getTempDir() { + return UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getTempDir(); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureQueue.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureQueue.java new file mode 100644 index 00000000000..d22930fd877 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureQueue.java @@ -0,0 +1,433 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master.procedure; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.ConcurrentHashMap; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.Map; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.master.TableLockManager; +import org.apache.hadoop.hbase.procedure2.Procedure; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.testclassification.MasterTests; + +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +@Category({MasterTests.class, SmallTests.class}) +public class TestMasterProcedureQueue { + private static final Log LOG = LogFactory.getLog(TestMasterProcedureQueue.class); + + private MasterProcedureQueue queue; + private Configuration conf; + + @Before + public void setUp() throws IOException { + conf = HBaseConfiguration.create(); + queue = new MasterProcedureQueue(conf, new TableLockManager.NullTableLockManager()); + } + + @After + public void tearDown() throws IOException { + assertEquals(0, queue.size()); + } + + /** + * Verify simple create/insert/fetch/delete of the table queue. + */ + @Test + public void testSimpleTableOpsQueues() throws Exception { + final int NUM_TABLES = 10; + final int NUM_ITEMS = 10; + + int count = 0; + for (int i = 1; i <= NUM_TABLES; ++i) { + TableName tableName = TableName.valueOf(String.format("test-%04d", i)); + // insert items + for (int j = 1; j <= NUM_ITEMS; ++j) { + queue.addBack(new TestTableProcedure(i * 1000 + j, tableName, + TableProcedureInterface.TableOperationType.EDIT)); + assertEquals(++count, queue.size()); + } + } + assertEquals(NUM_TABLES * NUM_ITEMS, queue.size()); + + for (int j = 1; j <= NUM_ITEMS; ++j) { + for (int i = 1; i <= NUM_TABLES; ++i) { + Long procId = queue.poll(); + assertEquals(--count, queue.size()); + assertEquals(i * 1000 + j, procId.longValue()); + } + } + assertEquals(0, queue.size()); + + for (int i = 1; i <= NUM_TABLES; ++i) { + TableName tableName = TableName.valueOf(String.format("test-%04d", i)); + // complete the table deletion + assertTrue(queue.markTableAsDeleted(tableName)); + } + } + + /** + * Check that the table queue is not deletable until every procedure + * in-progress is completed (this is a special case for write-locks). + */ + @Test + public void testCreateDeleteTableOperationsWithWriteLock() throws Exception { + TableName tableName = TableName.valueOf("testtb"); + + queue.addBack(new TestTableProcedure(1, tableName, + TableProcedureInterface.TableOperationType.EDIT)); + + // table can't be deleted because one item is in the queue + assertFalse(queue.markTableAsDeleted(tableName)); + + // fetch item and take a lock + assertEquals(1, queue.poll().longValue()); + // take the xlock + assertTrue(queue.tryAcquireTableWrite(tableName, "write")); + // table can't be deleted because we have the lock + assertEquals(0, queue.size()); + assertFalse(queue.markTableAsDeleted(tableName)); + // release the xlock + queue.releaseTableWrite(tableName); + // complete the table deletion + assertTrue(queue.markTableAsDeleted(tableName)); + } + + /** + * Check that the table queue is not deletable until every procedure + * in-progress is completed (this is a special case for read-locks). + */ + @Test + public void testCreateDeleteTableOperationsWithReadLock() throws Exception { + final TableName tableName = TableName.valueOf("testtb"); + final int nitems = 2; + + for (int i = 1; i <= nitems; ++i) { + queue.addBack(new TestTableProcedure(i, tableName, + TableProcedureInterface.TableOperationType.READ)); + } + + // table can't be deleted because one item is in the queue + assertFalse(queue.markTableAsDeleted(tableName)); + + for (int i = 1; i <= nitems; ++i) { + // fetch item and take a lock + assertEquals(i, queue.poll().longValue()); + // take the rlock + assertTrue(queue.tryAcquireTableRead(tableName, "read " + i)); + // table can't be deleted because we have locks and/or items in the queue + assertFalse(queue.markTableAsDeleted(tableName)); + } + + for (int i = 1; i <= nitems; ++i) { + // table can't be deleted because we have locks + assertFalse(queue.markTableAsDeleted(tableName)); + // release the rlock + queue.releaseTableRead(tableName); + } + + // there are no items and no lock in the queeu + assertEquals(0, queue.size()); + // complete the table deletion + assertTrue(queue.markTableAsDeleted(tableName)); + } + + /** + * Verify the correct logic of RWLocks on the queue + */ + @Test + public void testVerifyRwLocks() throws Exception { + TableName tableName = TableName.valueOf("testtb"); + queue.addBack(new TestTableProcedure(1, tableName, + TableProcedureInterface.TableOperationType.EDIT)); + queue.addBack(new TestTableProcedure(2, tableName, + TableProcedureInterface.TableOperationType.READ)); + queue.addBack(new TestTableProcedure(3, tableName, + TableProcedureInterface.TableOperationType.EDIT)); + queue.addBack(new TestTableProcedure(4, tableName, + TableProcedureInterface.TableOperationType.READ)); + queue.addBack(new TestTableProcedure(5, tableName, + TableProcedureInterface.TableOperationType.READ)); + + // Fetch the 1st item and take the write lock + Long procId = queue.poll(); + assertEquals(1, procId.longValue()); + assertEquals(true, queue.tryAcquireTableWrite(tableName, "write " + procId)); + + // Fetch the 2nd item and verify that the lock can't be acquired + assertEquals(null, queue.poll()); + + // Release the write lock and acquire the read lock + queue.releaseTableWrite(tableName); + + // Fetch the 2nd item and take the read lock + procId = queue.poll(); + assertEquals(2, procId.longValue()); + assertEquals(true, queue.tryAcquireTableRead(tableName, "read " + procId)); + + // Fetch the 3rd item and verify that the lock can't be acquired + procId = queue.poll(); + assertEquals(3, procId.longValue()); + assertEquals(false, queue.tryAcquireTableWrite(tableName, "write " + procId)); + + // release the rdlock of item 2 and take the wrlock for the 3d item + queue.releaseTableRead(tableName); + assertEquals(true, queue.tryAcquireTableWrite(tableName, "write " + procId)); + + // Fetch 4th item and verify that the lock can't be acquired + assertEquals(null, queue.poll()); + + // Release the write lock and acquire the read lock + queue.releaseTableWrite(tableName); + + // Fetch the 4th item and take the read lock + procId = queue.poll(); + assertEquals(4, procId.longValue()); + assertEquals(true, queue.tryAcquireTableRead(tableName, "read " + procId)); + + // Fetch the 4th item and take the read lock + procId = queue.poll(); + assertEquals(5, procId.longValue()); + assertEquals(true, queue.tryAcquireTableRead(tableName, "read " + procId)); + + // Release 4th and 5th read-lock + queue.releaseTableRead(tableName); + queue.releaseTableRead(tableName); + + // remove table queue + assertEquals(0, queue.size()); + assertTrue("queue should be deleted", queue.markTableAsDeleted(tableName)); + } + + /** + * Verify that "write" operations for a single table are serialized, + * but different tables can be executed in parallel. + */ + @Test(timeout=90000) + public void testConcurrentWriteOps() throws Exception { + final TestTableProcSet procSet = new TestTableProcSet(queue); + + final int NUM_ITEMS = 10; + final int NUM_TABLES = 4; + final AtomicInteger opsCount = new AtomicInteger(0); + for (int i = 0; i < NUM_TABLES; ++i) { + TableName tableName = TableName.valueOf(String.format("testtb-%04d", i)); + for (int j = 1; j < NUM_ITEMS; ++j) { + procSet.addBack(new TestTableProcedure(i * 100 + j, tableName, + TableProcedureInterface.TableOperationType.EDIT)); + opsCount.incrementAndGet(); + } + } + assertEquals(opsCount.get(), queue.size()); + + final Thread[] threads = new Thread[NUM_TABLES * 2]; + final HashSet concurrentTables = new HashSet(); + final ArrayList failures = new ArrayList(); + final AtomicInteger concurrentCount = new AtomicInteger(0); + for (int i = 0; i < threads.length; ++i) { + threads[i] = new Thread() { + @Override + public void run() { + while (opsCount.get() > 0) { + try { + TableProcedureInterface proc = procSet.acquire(); + if (proc == null) { + queue.signalAll(); + if (opsCount.get() > 0) { + continue; + } + break; + } + synchronized (concurrentTables) { + assertTrue("unexpected concurrency on " + proc.getTableName(), + concurrentTables.add(proc.getTableName())); + } + assertTrue(opsCount.decrementAndGet() >= 0); + try { + long procId = ((Procedure)proc).getProcId(); + TableName tableId = proc.getTableName(); + int concurrent = concurrentCount.incrementAndGet(); + assertTrue("inc-concurrent="+ concurrent +" 1 <= concurrent <= "+ NUM_TABLES, + concurrent >= 1 && concurrent <= NUM_TABLES); + LOG.debug("[S] tableId="+ tableId +" procId="+ procId +" concurrent="+ concurrent); + Thread.sleep(2000); + concurrent = concurrentCount.decrementAndGet(); + LOG.debug("[E] tableId="+ tableId +" procId="+ procId +" concurrent="+ concurrent); + assertTrue("dec-concurrent=" + concurrent, concurrent < NUM_TABLES); + } finally { + synchronized (concurrentTables) { + assertTrue(concurrentTables.remove(proc.getTableName())); + } + procSet.release(proc); + } + } catch (Throwable e) { + LOG.error("Failed " + e.getMessage(), e); + synchronized (failures) { + failures.add(e.getMessage()); + } + } finally { + queue.signalAll(); + } + } + } + }; + threads[i].start(); + } + for (int i = 0; i < threads.length; ++i) { + threads[i].join(); + } + assertTrue(failures.toString(), failures.isEmpty()); + assertEquals(0, opsCount.get()); + assertEquals(0, queue.size()); + + for (int i = 1; i <= NUM_TABLES; ++i) { + TableName table = TableName.valueOf(String.format("testtb-%04d", i)); + assertTrue("queue should be deleted, table=" + table, queue.markTableAsDeleted(table)); + } + } + + public static class TestTableProcSet { + private final MasterProcedureQueue queue; + private Map procsMap = + new ConcurrentHashMap(); + + public TestTableProcSet(final MasterProcedureQueue queue) { + this.queue = queue; + } + + public void addBack(TableProcedureInterface tableProc) { + Procedure proc = (Procedure)tableProc; + procsMap.put(proc.getProcId(), tableProc); + queue.addBack(proc); + } + + public void addFront(TableProcedureInterface tableProc) { + Procedure proc = (Procedure)tableProc; + procsMap.put(proc.getProcId(), tableProc); + queue.addFront(proc); + } + + public TableProcedureInterface acquire() { + TableProcedureInterface proc = null; + boolean avail = false; + while (!avail) { + Long procId = queue.poll(); + proc = procId != null ? procsMap.remove(procId) : null; + if (proc == null) break; + switch (proc.getTableOperationType()) { + case CREATE: + case DELETE: + case EDIT: + avail = queue.tryAcquireTableWrite(proc.getTableName(), + "op="+ proc.getTableOperationType()); + break; + case READ: + avail = queue.tryAcquireTableRead(proc.getTableName(), + "op="+ proc.getTableOperationType()); + break; + } + if (!avail) { + addFront(proc); + LOG.debug("yield procId=" + procId); + } + } + return proc; + } + + public void release(TableProcedureInterface proc) { + switch (proc.getTableOperationType()) { + case CREATE: + case DELETE: + case EDIT: + queue.releaseTableWrite(proc.getTableName()); + break; + case READ: + queue.releaseTableRead(proc.getTableName()); + break; + } + } + } + + public static class TestTableProcedure extends Procedure + implements TableProcedureInterface { + private final TableOperationType opType; + private final TableName tableName; + + public TestTableProcedure() { + throw new UnsupportedOperationException("recovery should not be triggered here"); + } + + public TestTableProcedure(long procId, TableName tableName, TableOperationType opType) { + this.tableName = tableName; + this.opType = opType; + setProcId(procId); + } + + @Override + public TableName getTableName() { + return tableName; + } + + @Override + public TableOperationType getTableOperationType() { + return opType; + } + + @Override + protected Procedure[] execute(Void env) { + return null; + } + + @Override + protected void rollback(Void env) { + throw new UnsupportedOperationException(); + } + + @Override + protected boolean abort(Void env) { + throw new UnsupportedOperationException(); + } + + @Override + protected void serializeStateData(final OutputStream stream) throws IOException {} + + @Override + protected void deserializeStateData(final InputStream stream) throws IOException {} + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyColumnFamilyProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyColumnFamilyProcedure.java new file mode 100644 index 00000000000..d29ea256422 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyColumnFamilyProcedure.java @@ -0,0 +1,238 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master.procedure; + +import static org.junit.Assert.assertTrue; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.InvalidFamilyOperationException; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; +import org.apache.hadoop.hbase.procedure2.ProcedureResult; +import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; +import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyColumnFamilyState; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({MasterTests.class, MediumTests.class}) +public class TestModifyColumnFamilyProcedure { + private static final Log LOG = LogFactory.getLog(TestModifyColumnFamilyProcedure.class); + + protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); + + private static void setupConf(Configuration conf) { + conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1); + } + + @BeforeClass + public static void setupCluster() throws Exception { + setupConf(UTIL.getConfiguration()); + UTIL.startMiniCluster(1); + } + + @AfterClass + public static void cleanupTest() throws Exception { + try { + UTIL.shutdownMiniCluster(); + } catch (Exception e) { + LOG.warn("failure shutting down cluster", e); + } + } + + @Before + public void setup() throws Exception { + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(getMasterProcedureExecutor(), false); + } + + @After + public void tearDown() throws Exception { + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(getMasterProcedureExecutor(), false); + for (HTableDescriptor htd: UTIL.getHBaseAdmin().listTables()) { + LOG.info("Tear down, remove table=" + htd.getTableName()); + UTIL.deleteTable(htd.getTableName()); + } + } + + @Test(timeout = 60000) + public void testModifyColumnFamily() throws Exception { + final TableName tableName = TableName.valueOf("testModifyColumnFamily"); + final String cf1 = "cf1"; + final HColumnDescriptor columnDescriptor = new HColumnDescriptor(cf1); + int oldBlockSize = columnDescriptor.getBlocksize(); + int newBlockSize = 3 * oldBlockSize; + + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + MasterProcedureTestingUtility.createTable(procExec, tableName, null, cf1, "f2"); + + // Test 1: modify the column family online + columnDescriptor.setBlocksize(newBlockSize); + long procId1 = procExec.submitProcedure(new ModifyColumnFamilyProcedure( + procExec.getEnvironment(), tableName, columnDescriptor)); + // Wait the completion + ProcedureTestingUtility.waitProcedure(procExec, procId1); + ProcedureTestingUtility.assertProcNotFailed(procExec, procId1); + MasterProcedureTestingUtility.validateColumnFamilyModification(UTIL.getHBaseCluster() + .getMaster(), tableName, cf1, columnDescriptor); + + // Test 2: modify the column family offline + UTIL.getHBaseAdmin().disableTable(tableName); + columnDescriptor.setBlocksize(newBlockSize * 2); + long procId2 = + procExec.submitProcedure(new ModifyColumnFamilyProcedure(procExec.getEnvironment(), + tableName, columnDescriptor)); + // Wait the completion + ProcedureTestingUtility.waitProcedure(procExec, procId2); + ProcedureTestingUtility.assertProcNotFailed(procExec, procId2); + MasterProcedureTestingUtility.validateColumnFamilyModification(UTIL.getHBaseCluster() + .getMaster(), tableName, cf1, columnDescriptor); + } + + @Test(timeout=60000) + public void testModifyNonExistingColumnFamily() throws Exception { + final TableName tableName = TableName.valueOf("testModifyExistingColumnFamily"); + final String cf2 = "cf2"; + final HColumnDescriptor columnDescriptor = new HColumnDescriptor(cf2); + int oldBlockSize = columnDescriptor.getBlocksize(); + int newBlockSize = 2 * oldBlockSize; + + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f1"); + + // Modify the column family that does not exist + columnDescriptor.setBlocksize(newBlockSize); + long procId1 = procExec.submitProcedure(new ModifyColumnFamilyProcedure( + procExec.getEnvironment(), tableName, columnDescriptor)); + // Wait the completion + ProcedureTestingUtility.waitProcedure(procExec, procId1); + + ProcedureResult result = procExec.getResult(procId1); + assertTrue(result.isFailed()); + LOG.debug("Modify failed with exception: " + result.getException()); + assertTrue(result.getException().getCause() instanceof InvalidFamilyOperationException); + } + + @Test(timeout=60000) + public void testRecoveryAndDoubleExecutionOffline() throws Exception { + final TableName tableName = TableName.valueOf("testRecoveryAndDoubleExecutionOffline"); + final String cf3 = "cf3"; + final HColumnDescriptor columnDescriptor = new HColumnDescriptor(cf3); + int oldBlockSize = columnDescriptor.getBlocksize(); + int newBlockSize = 4 * oldBlockSize; + + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + // create the table + MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f1", "f2", cf3); + UTIL.getHBaseAdmin().disableTable(tableName); + ProcedureTestingUtility.waitNoProcedureRunning(procExec); + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); + + // Start the Modify procedure && kill the executor + columnDescriptor.setBlocksize(newBlockSize); + long procId = procExec.submitProcedure(new ModifyColumnFamilyProcedure( + procExec.getEnvironment(), tableName, columnDescriptor)); + + // Restart the executor and execute the step twice + int numberOfSteps = ModifyColumnFamilyState.values().length; + MasterProcedureTestingUtility.testRecoveryAndDoubleExecution( + procExec, + procId, + numberOfSteps, + ModifyColumnFamilyState.values()); + + MasterProcedureTestingUtility.validateColumnFamilyModification(UTIL.getHBaseCluster() + .getMaster(), tableName, cf3, columnDescriptor); + } + + @Test(timeout = 60000) + public void testRecoveryAndDoubleExecutionOnline() throws Exception { + final TableName tableName = TableName.valueOf("testRecoveryAndDoubleExecutionOnline"); + final String cf4 = "cf4"; + final HColumnDescriptor columnDescriptor = new HColumnDescriptor(cf4); + int oldBlockSize = columnDescriptor.getBlocksize(); + int newBlockSize = 4 * oldBlockSize; + + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + // create the table + MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f1", "f2", cf4); + ProcedureTestingUtility.waitNoProcedureRunning(procExec); + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); + + // Start the Modify procedure && kill the executor + columnDescriptor.setBlocksize(newBlockSize); + long procId = + procExec.submitProcedure(new ModifyColumnFamilyProcedure(procExec.getEnvironment(), + tableName, columnDescriptor)); + + // Restart the executor and execute the step twice + int numberOfSteps = ModifyColumnFamilyState.values().length; + MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId, numberOfSteps, + ModifyColumnFamilyState.values()); + + MasterProcedureTestingUtility.validateColumnFamilyModification(UTIL.getHBaseCluster() + .getMaster(), tableName, cf4, columnDescriptor); + } + + @Test(timeout = 60000) + public void testRollbackAndDoubleExecution() throws Exception { + final TableName tableName = TableName.valueOf("testRollbackAndDoubleExecution"); + final String cf3 = "cf3"; + final HColumnDescriptor columnDescriptor = new HColumnDescriptor(cf3); + int oldBlockSize = columnDescriptor.getBlocksize(); + int newBlockSize = 4 * oldBlockSize; + + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + // create the table + MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f1", "f2", cf3); + ProcedureTestingUtility.waitNoProcedureRunning(procExec); + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); + + // Start the Modify procedure && kill the executor + columnDescriptor.setBlocksize(newBlockSize); + long procId = procExec.submitProcedure(new ModifyColumnFamilyProcedure( + procExec.getEnvironment(), tableName, columnDescriptor)); + + // Failing in the middle of proc + int numberOfSteps = ModifyColumnFamilyState.values().length - 2; + MasterProcedureTestingUtility.testRollbackAndDoubleExecution( + procExec, + procId, + numberOfSteps, + ModifyColumnFamilyState.values()); + } + + private ProcedureExecutor getMasterProcedureExecutor() { + return UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor(); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyTableProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyTableProcedure.java new file mode 100644 index 00000000000..af29338a735 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyTableProcedure.java @@ -0,0 +1,403 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master.procedure; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; +import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; +import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.ModifyTableState; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({MasterTests.class, MediumTests.class}) +public class TestModifyTableProcedure { + private static final Log LOG = LogFactory.getLog(TestModifyTableProcedure.class); + + protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); + + private static void setupConf(Configuration conf) { + conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1); + } + + @BeforeClass + public static void setupCluster() throws Exception { + setupConf(UTIL.getConfiguration()); + UTIL.startMiniCluster(1); + } + + @AfterClass + public static void cleanupTest() throws Exception { + try { + UTIL.shutdownMiniCluster(); + } catch (Exception e) { + LOG.warn("failure shutting down cluster", e); + } + } + + @Before + public void setup() throws Exception { + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(getMasterProcedureExecutor(), false); + } + + @After + public void tearDown() throws Exception { + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(getMasterProcedureExecutor(), false); + for (HTableDescriptor htd: UTIL.getHBaseAdmin().listTables()) { + LOG.info("Tear down, remove table=" + htd.getTableName()); + UTIL.deleteTable(htd.getTableName()); + } + } + + @Test(timeout=60000) + public void testModifyTable() throws Exception { + final TableName tableName = TableName.valueOf("testModifyTable"); + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + MasterProcedureTestingUtility.createTable(procExec, tableName, null, "cf"); + UTIL.getHBaseAdmin().disableTable(tableName); + + // Modify the table descriptor + HTableDescriptor htd = new HTableDescriptor(UTIL.getHBaseAdmin().getTableDescriptor(tableName)); + + // Test 1: Modify 1 property + long newMaxFileSize = htd.getMaxFileSize() * 2; + htd.setMaxFileSize(newMaxFileSize); + htd.setRegionReplication(3); + + long procId1 = ProcedureTestingUtility.submitAndWait( + procExec, new ModifyTableProcedure(procExec.getEnvironment(), htd)); + ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId1)); + + HTableDescriptor currentHtd = UTIL.getHBaseAdmin().getTableDescriptor(tableName); + assertEquals(newMaxFileSize, currentHtd.getMaxFileSize()); + + // Test 2: Modify multiple properties + boolean newReadOnlyOption = htd.isReadOnly() ? false : true; + long newMemStoreFlushSize = htd.getMemStoreFlushSize() * 2; + htd.setReadOnly(newReadOnlyOption); + htd.setMemStoreFlushSize(newMemStoreFlushSize); + + long procId2 = ProcedureTestingUtility.submitAndWait( + procExec, new ModifyTableProcedure(procExec.getEnvironment(), htd)); + ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId2)); + + currentHtd = UTIL.getHBaseAdmin().getTableDescriptor(tableName); + assertEquals(newReadOnlyOption, currentHtd.isReadOnly()); + assertEquals(newMemStoreFlushSize, currentHtd.getMemStoreFlushSize()); + } + + @Test(timeout = 60000) + public void testModifyTableAddCF() throws Exception { + final TableName tableName = TableName.valueOf("testModifyTableAddCF"); + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + MasterProcedureTestingUtility.createTable(procExec, tableName, null, "cf1"); + HTableDescriptor currentHtd = UTIL.getHBaseAdmin().getTableDescriptor(tableName); + assertEquals(1, currentHtd.getFamiliesKeys().size()); + + // Test 1: Modify the table descriptor online + String cf2 = "cf2"; + HTableDescriptor htd = new HTableDescriptor(UTIL.getHBaseAdmin().getTableDescriptor(tableName)); + htd.addFamily(new HColumnDescriptor(cf2)); + + long procId = ProcedureTestingUtility.submitAndWait( + procExec, new ModifyTableProcedure(procExec.getEnvironment(), htd)); + ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId)); + + currentHtd = UTIL.getHBaseAdmin().getTableDescriptor(tableName); + assertEquals(2, currentHtd.getFamiliesKeys().size()); + assertTrue(currentHtd.hasFamily(cf2.getBytes())); + + // Test 2: Modify the table descriptor offline + UTIL.getHBaseAdmin().disableTable(tableName); + ProcedureTestingUtility.waitNoProcedureRunning(procExec); + String cf3 = "cf3"; + HTableDescriptor htd2 = + new HTableDescriptor(UTIL.getHBaseAdmin().getTableDescriptor(tableName)); + htd2.addFamily(new HColumnDescriptor(cf3)); + + long procId2 = + ProcedureTestingUtility.submitAndWait(procExec, + new ModifyTableProcedure(procExec.getEnvironment(), htd2)); + ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId2)); + + currentHtd = UTIL.getHBaseAdmin().getTableDescriptor(tableName); + assertTrue(currentHtd.hasFamily(cf3.getBytes())); + assertEquals(3, currentHtd.getFamiliesKeys().size()); + } + + @Test(timeout = 60000) + public void testModifyTableDeleteCF() throws Exception { + final TableName tableName = TableName.valueOf("testModifyTableAddCF"); + final String cf2 = "cf2"; + final String cf3 = "cf3"; + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + MasterProcedureTestingUtility.createTable(procExec, tableName, null, "cf1", cf2, cf3); + HTableDescriptor currentHtd = UTIL.getHBaseAdmin().getTableDescriptor(tableName); + assertEquals(3, currentHtd.getFamiliesKeys().size()); + + // Test 1: Modify the table descriptor + HTableDescriptor htd = new HTableDescriptor(UTIL.getHBaseAdmin().getTableDescriptor(tableName)); + htd.removeFamily(cf2.getBytes()); + + long procId = ProcedureTestingUtility.submitAndWait( + procExec, new ModifyTableProcedure(procExec.getEnvironment(), htd)); + ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId)); + + currentHtd = UTIL.getHBaseAdmin().getTableDescriptor(tableName); + assertEquals(2, currentHtd.getFamiliesKeys().size()); + assertFalse(currentHtd.hasFamily(cf2.getBytes())); + + // Test 2: Modify the table descriptor offline + UTIL.getHBaseAdmin().disableTable(tableName); + ProcedureTestingUtility.waitNoProcedureRunning(procExec); + + HTableDescriptor htd2 = + new HTableDescriptor(UTIL.getHBaseAdmin().getTableDescriptor(tableName)); + htd2.removeFamily(cf3.getBytes()); + + long procId2 = + ProcedureTestingUtility.submitAndWait(procExec, + new ModifyTableProcedure(procExec.getEnvironment(), htd2)); + ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId2)); + + currentHtd = UTIL.getHBaseAdmin().getTableDescriptor(tableName); + assertEquals(1, currentHtd.getFamiliesKeys().size()); + assertFalse(currentHtd.hasFamily(cf3.getBytes())); + } + + @Test(timeout=60000) + public void testRecoveryAndDoubleExecutionOffline() throws Exception { + final TableName tableName = TableName.valueOf("testRecoveryAndDoubleExecutionOffline"); + final String cf2 = "cf2"; + final String cf3 = "cf3"; + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + // create the table + HRegionInfo[] regions = MasterProcedureTestingUtility.createTable( + procExec, tableName, null, "cf1", cf3); + UTIL.getHBaseAdmin().disableTable(tableName); + + ProcedureTestingUtility.waitNoProcedureRunning(procExec); + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); + + // Modify multiple properties of the table. + HTableDescriptor htd = new HTableDescriptor(UTIL.getHBaseAdmin().getTableDescriptor(tableName)); + boolean newCompactionEnableOption = htd.isCompactionEnabled() ? false : true; + htd.setCompactionEnabled(newCompactionEnableOption); + htd.addFamily(new HColumnDescriptor(cf2)); + htd.removeFamily(cf3.getBytes()); + htd.setRegionReplication(3); + + // Start the Modify procedure && kill the executor + long procId = + procExec.submitProcedure(new ModifyTableProcedure(procExec.getEnvironment(), htd)); + + // Restart the executor and execute the step twice + int numberOfSteps = ModifyTableState.values().length; + MasterProcedureTestingUtility.testRecoveryAndDoubleExecution( + procExec, + procId, + numberOfSteps, + ModifyTableState.values()); + + // Validate descriptor + HTableDescriptor currentHtd = UTIL.getHBaseAdmin().getTableDescriptor(tableName); + assertEquals(newCompactionEnableOption, currentHtd.isCompactionEnabled()); + assertEquals(2, currentHtd.getFamiliesKeys().size()); + + // cf2 should be added cf3 should be removed + MasterProcedureTestingUtility.validateTableCreation(UTIL.getHBaseCluster().getMaster(), + tableName, regions, false, "cf1", cf2); + } + + @Test(timeout = 60000) + public void testRecoveryAndDoubleExecutionOnline() throws Exception { + final TableName tableName = TableName.valueOf("testRecoveryAndDoubleExecutionOnline"); + final String cf2 = "cf2"; + final String cf3 = "cf3"; + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + // create the table + HRegionInfo[] regions = MasterProcedureTestingUtility.createTable( + procExec, tableName, null, "cf1", cf3); + + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); + + // Modify multiple properties of the table. + HTableDescriptor htd = new HTableDescriptor(UTIL.getHBaseAdmin().getTableDescriptor(tableName)); + boolean newCompactionEnableOption = htd.isCompactionEnabled() ? false : true; + htd.setCompactionEnabled(newCompactionEnableOption); + htd.addFamily(new HColumnDescriptor(cf2)); + htd.removeFamily(cf3.getBytes()); + + // Start the Modify procedure && kill the executor + long procId = + procExec.submitProcedure(new ModifyTableProcedure(procExec.getEnvironment(), htd)); + + // Restart the executor and execute the step twice + int numberOfSteps = ModifyTableState.values().length; + MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId, numberOfSteps, + ModifyTableState.values()); + + // Validate descriptor + HTableDescriptor currentHtd = UTIL.getHBaseAdmin().getTableDescriptor(tableName); + assertEquals(newCompactionEnableOption, currentHtd.isCompactionEnabled()); + assertEquals(2, currentHtd.getFamiliesKeys().size()); + assertTrue(currentHtd.hasFamily(cf2.getBytes())); + assertFalse(currentHtd.hasFamily(cf3.getBytes())); + + // cf2 should be added cf3 should be removed + MasterProcedureTestingUtility.validateTableCreation(UTIL.getHBaseCluster().getMaster(), + tableName, regions, "cf1", cf2); + } + + @Test(timeout = 60000) + public void testRollbackAndDoubleExecutionOnline() throws Exception { + final TableName tableName = TableName.valueOf("testRollbackAndDoubleExecution"); + final String familyName = "cf2"; + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + // create the table + HRegionInfo[] regions = MasterProcedureTestingUtility.createTable( + procExec, tableName, null, "cf1"); + + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); + + HTableDescriptor htd = new HTableDescriptor(UTIL.getHBaseAdmin().getTableDescriptor(tableName)); + boolean newCompactionEnableOption = htd.isCompactionEnabled() ? false : true; + htd.setCompactionEnabled(newCompactionEnableOption); + htd.addFamily(new HColumnDescriptor(familyName)); + + // Start the Modify procedure && kill the executor + long procId = + procExec.submitProcedure(new ModifyTableProcedure(procExec.getEnvironment(), htd)); + + // Restart the executor and rollback the step twice + int numberOfSteps = ModifyTableState.values().length - 4; // failing in the middle of proc + MasterProcedureTestingUtility.testRollbackAndDoubleExecution( + procExec, + procId, + numberOfSteps, + ModifyTableState.values()); + + // cf2 should not be present + MasterProcedureTestingUtility.validateTableCreation(UTIL.getHBaseCluster().getMaster(), + tableName, regions, "cf1"); + } + + @Test(timeout = 60000) + public void testRollbackAndDoubleExecutionOffline() throws Exception { + final TableName tableName = TableName.valueOf("testRollbackAndDoubleExecution"); + final String familyName = "cf2"; + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + // create the table + HRegionInfo[] regions = MasterProcedureTestingUtility.createTable( + procExec, tableName, null, "cf1"); + UTIL.getHBaseAdmin().disableTable(tableName); + + ProcedureTestingUtility.waitNoProcedureRunning(procExec); + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); + + HTableDescriptor htd = new HTableDescriptor(UTIL.getHBaseAdmin().getTableDescriptor(tableName)); + boolean newCompactionEnableOption = htd.isCompactionEnabled() ? false : true; + htd.setCompactionEnabled(newCompactionEnableOption); + htd.addFamily(new HColumnDescriptor(familyName)); + htd.setRegionReplication(3); + + // Start the Modify procedure && kill the executor + long procId = + procExec.submitProcedure(new ModifyTableProcedure(procExec.getEnvironment(), htd)); + + // Restart the executor and rollback the step twice + int numberOfSteps = ModifyTableState.values().length - 4; // failing in the middle of proc + MasterProcedureTestingUtility.testRollbackAndDoubleExecution( + procExec, + procId, + numberOfSteps, + ModifyTableState.values()); + + // cf2 should not be present + MasterProcedureTestingUtility.validateTableCreation(UTIL.getHBaseCluster().getMaster(), + tableName, regions, "cf1"); + } + + @Test(timeout = 60000) + public void testRollbackAndDoubleExecutionAfterPONR() throws Exception { + final TableName tableName = TableName.valueOf("testRollbackAndDoubleExecutionAfterPONR"); + final String familyToAddName = "cf2"; + final String familyToRemove = "cf1"; + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + + // create the table + HRegionInfo[] regions = MasterProcedureTestingUtility.createTable( + procExec, tableName, null, familyToRemove); + UTIL.getHBaseAdmin().disableTable(tableName); + + ProcedureTestingUtility.waitNoProcedureRunning(procExec); + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); + + HTableDescriptor htd = new HTableDescriptor(UTIL.getHBaseAdmin().getTableDescriptor(tableName)); + htd.setCompactionEnabled(!htd.isCompactionEnabled()); + htd.addFamily(new HColumnDescriptor(familyToAddName)); + htd.removeFamily(familyToRemove.getBytes()); + htd.setRegionReplication(3); + + // Start the Modify procedure && kill the executor + long procId = + procExec.submitProcedure(new ModifyTableProcedure(procExec.getEnvironment(), htd)); + + // Failing after MODIFY_TABLE_DELETE_FS_LAYOUT we should not trigger the rollback. + // NOTE: the 5 (number of MODIFY_TABLE_DELETE_FS_LAYOUT + 1 step) is hardcoded, + // so you have to look at this test at least once when you add a new step. + int numberOfSteps = 5; + MasterProcedureTestingUtility.testRollbackAndDoubleExecutionAfterPONR( + procExec, + procId, + numberOfSteps, + ModifyTableState.values()); + + // "cf2" should be added and "cf1" should be removed + MasterProcedureTestingUtility.validateTableCreation(UTIL.getHBaseCluster().getMaster(), + tableName, regions, false, familyToAddName); + } + + private ProcedureExecutor getMasterProcedureExecutor() { + return UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor(); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTruncateTableProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTruncateTableProcedure.java new file mode 100644 index 00000000000..58acbaedd41 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTruncateTableProcedure.java @@ -0,0 +1,246 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.master.procedure; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.TableNotDisabledException; +import org.apache.hadoop.hbase.TableNotFoundException; +import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; +import org.apache.hadoop.hbase.procedure2.ProcedureResult; +import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; +import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableState; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Bytes; + +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +@Category({MasterTests.class, MediumTests.class}) +public class TestTruncateTableProcedure { + private static final Log LOG = LogFactory.getLog(TestTruncateTableProcedure.class); + + protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); + + private static void setupConf(Configuration conf) { + conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1); + } + + @BeforeClass + public static void setupCluster() throws Exception { + setupConf(UTIL.getConfiguration()); + UTIL.startMiniCluster(1); + } + + @AfterClass + public static void cleanupTest() throws Exception { + try { + UTIL.shutdownMiniCluster(); + } catch (Exception e) { + LOG.warn("failure shutting down cluster", e); + } + } + + @Before + public void setup() throws Exception { + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, false); + assertTrue("expected executor to be running", procExec.isRunning()); + } + + @After + public void tearDown() throws Exception { + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(getMasterProcedureExecutor(), false); + for (HTableDescriptor htd: UTIL.getHBaseAdmin().listTables()) { + LOG.info("Tear down, remove table=" + htd.getTableName()); + UTIL.deleteTable(htd.getTableName()); + } + } + + @Test(timeout=60000) + public void testTruncateNotExistentTable() throws Exception { + final TableName tableName = TableName.valueOf("testTruncateNotExistentTable"); + + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + long procId = ProcedureTestingUtility.submitAndWait(procExec, + new TruncateTableProcedure(procExec.getEnvironment(), tableName, true)); + + // Second delete should fail with TableNotFound + ProcedureResult result = procExec.getResult(procId); + assertTrue(result.isFailed()); + LOG.debug("Truncate failed with exception: " + result.getException()); + assertTrue(result.getException().getCause() instanceof TableNotFoundException); + } + + @Test(timeout=60000) + public void testTruncateNotDisabledTable() throws Exception { + final TableName tableName = TableName.valueOf("testTruncateNotDisabledTable"); + + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f"); + + long procId = ProcedureTestingUtility.submitAndWait(procExec, + new TruncateTableProcedure(procExec.getEnvironment(), tableName, false)); + + // Second delete should fail with TableNotDisabled + ProcedureResult result = procExec.getResult(procId); + assertTrue(result.isFailed()); + LOG.debug("Truncate failed with exception: " + result.getException()); + assertTrue(result.getException().getCause() instanceof TableNotDisabledException); + } + + @Test(timeout=60000) + public void testSimpleTruncatePreserveSplits() throws Exception { + final TableName tableName = TableName.valueOf("testSimpleTruncatePreserveSplits"); + testSimpleTruncate(tableName, true); + } + + @Test(timeout=60000) + public void testSimpleTruncateNoPreserveSplits() throws Exception { + final TableName tableName = TableName.valueOf("testSimpleTruncateNoPreserveSplits"); + testSimpleTruncate(tableName, false); + } + + private void testSimpleTruncate(final TableName tableName, final boolean preserveSplits) + throws Exception { + final String[] families = new String[] { "f1", "f2" }; + final byte[][] splitKeys = new byte[][] { + Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c") + }; + + HRegionInfo[] regions = MasterProcedureTestingUtility.createTable( + getMasterProcedureExecutor(), tableName, splitKeys, families); + // load and verify that there are rows in the table + MasterProcedureTestingUtility.loadData( + UTIL.getConnection(), tableName, 100, splitKeys, families); + assertEquals(100, UTIL.countRows(tableName)); + // disable the table + UTIL.getHBaseAdmin().disableTable(tableName); + + // truncate the table + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + long procId = ProcedureTestingUtility.submitAndWait(procExec, + new TruncateTableProcedure(procExec.getEnvironment(), tableName, preserveSplits)); + ProcedureTestingUtility.assertProcNotFailed(procExec, procId); + + UTIL.waitUntilAllRegionsAssigned(tableName); + + // validate the table regions and layout + if (preserveSplits) { + assertEquals(1 + splitKeys.length, UTIL.getHBaseAdmin().getTableRegions(tableName).size()); + } else { + regions = UTIL.getHBaseAdmin().getTableRegions(tableName).toArray(new HRegionInfo[1]); + assertEquals(1, regions.length); + } + MasterProcedureTestingUtility.validateTableCreation( + UTIL.getHBaseCluster().getMaster(), tableName, regions, families); + + // verify that there are no rows in the table + assertEquals(0, UTIL.countRows(tableName)); + + // verify that the table is read/writable + MasterProcedureTestingUtility.loadData( + UTIL.getConnection(), tableName, 50, splitKeys, families); + assertEquals(50, UTIL.countRows(tableName)); + } + + @Test(timeout=60000) + public void testRecoveryAndDoubleExecutionPreserveSplits() throws Exception { + final TableName tableName = TableName.valueOf("testRecoveryAndDoubleExecutionPreserveSplits"); + testRecoveryAndDoubleExecution(tableName, true); + } + + @Test(timeout=60000) + public void testRecoveryAndDoubleExecutionNoPreserveSplits() throws Exception { + final TableName tableName = TableName.valueOf("testRecoveryAndDoubleExecutionNoPreserveSplits"); + testRecoveryAndDoubleExecution(tableName, false); + } + + private void testRecoveryAndDoubleExecution(final TableName tableName, + final boolean preserveSplits) throws Exception { + final String[] families = new String[] { "f1", "f2" }; + + // create the table + final byte[][] splitKeys = new byte[][] { + Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c") + }; + HRegionInfo[] regions = MasterProcedureTestingUtility.createTable( + getMasterProcedureExecutor(), tableName, splitKeys, families); + // load and verify that there are rows in the table + MasterProcedureTestingUtility.loadData( + UTIL.getConnection(), tableName, 100, splitKeys, families); + assertEquals(100, UTIL.countRows(tableName)); + // disable the table + UTIL.getHBaseAdmin().disableTable(tableName); + + final ProcedureExecutor procExec = getMasterProcedureExecutor(); + ProcedureTestingUtility.waitNoProcedureRunning(procExec); + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true); + + // Start the Truncate procedure && kill the executor + long procId = procExec.submitProcedure( + new TruncateTableProcedure(procExec.getEnvironment(), tableName, preserveSplits)); + + // Restart the executor and execute the step twice + // NOTE: the 7 (number of TruncateTableState steps) is hardcoded, + // so you have to look at this test at least once when you add a new step. + MasterProcedureTestingUtility.testRecoveryAndDoubleExecution( + procExec, procId, 7, TruncateTableState.values()); + + ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, false); + UTIL.waitUntilAllRegionsAssigned(tableName); + + // validate the table regions and layout + if (preserveSplits) { + assertEquals(1 + splitKeys.length, UTIL.getHBaseAdmin().getTableRegions(tableName).size()); + } else { + regions = UTIL.getHBaseAdmin().getTableRegions(tableName).toArray(new HRegionInfo[1]); + assertEquals(1, regions.length); + } + MasterProcedureTestingUtility.validateTableCreation( + UTIL.getHBaseCluster().getMaster(), tableName, regions, families); + + // verify that there are no rows in the table + assertEquals(0, UTIL.countRows(tableName)); + + // verify that the table is read/writable + MasterProcedureTestingUtility.loadData( + UTIL.getConnection(), tableName, 50, splitKeys, families); + assertEquals(50, UTIL.countRows(tableName)); + } + + private ProcedureExecutor getMasterProcedureExecutor() { + return UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor(); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/MobTestUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/MobTestUtil.java index 5e28cd96f5c..bc3354bc91b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/MobTestUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/MobTestUtil.java @@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.mob; import java.io.IOException; import java.util.Random; +import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.regionserver.StoreFile; @@ -70,10 +71,10 @@ public class MobTestUtil { } /** - * Compare two KeyValue only for their row family qualifier value + * Compare two Cells only for their row family qualifier value */ - public static void assertKeyValuesEquals(KeyValue firstKeyValue, - KeyValue secondKeyValue) { + public static void assertCellEquals(Cell firstKeyValue, + Cell secondKeyValue) { Assert.assertEquals(Bytes.toString(CellUtil.cloneRow(firstKeyValue)), Bytes.toString(CellUtil.cloneRow(secondKeyValue))); Assert.assertEquals(Bytes.toString(CellUtil.cloneFamily(firstKeyValue)), diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestCachedMobFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestCachedMobFile.java index e0e95415d0f..b38e7cb2add 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestCachedMobFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestCachedMobFile.java @@ -25,10 +25,10 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue.Type; -import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFileContext; import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; @@ -45,13 +45,13 @@ public class TestCachedMobFile extends TestCase{ static final Log LOG = LogFactory.getLog(TestCachedMobFile.class); private Configuration conf = HBaseConfiguration.create(); private CacheConfig cacheConf = new CacheConfig(conf); - private final String TABLE = "tableName"; - private final String FAMILY = "familyName"; - private final String FAMILY1 = "familyName1"; - private final String FAMILY2 = "familyName2"; - private final long EXPECTED_REFERENCE_ZERO = 0; - private final long EXPECTED_REFERENCE_ONE = 1; - private final long EXPECTED_REFERENCE_TWO = 2; + private static final String TABLE = "tableName"; + private static final String FAMILY = "familyName"; + private static final String FAMILY1 = "familyName1"; + private static final String FAMILY2 = "familyName2"; + private static final long EXPECTED_REFERENCE_ZERO = 0; + private static final long EXPECTED_REFERENCE_ONE = 1; + private static final long EXPECTED_REFERENCE_TWO = 2; @Test public void testOpenClose() throws Exception { @@ -121,34 +121,34 @@ public class TestCachedMobFile extends TestCase{ KeyValue expectedKey = new KeyValue(startKey, family, qualify, Long.MAX_VALUE, Type.Put, startKey); KeyValue seekKey = expectedKey.createKeyOnly(false); - KeyValue kv = KeyValueUtil.ensureKeyValue(cachedMobFile.readCell(seekKey, false)); - MobTestUtil.assertKeyValuesEquals(expectedKey, kv); + Cell cell = cachedMobFile.readCell(seekKey, false); + MobTestUtil.assertCellEquals(expectedKey, cell); // Test the end key byte[] endKey = Bytes.toBytes("zz"); // The end key bytes expectedKey = new KeyValue(endKey, family, qualify, Long.MAX_VALUE, Type.Put, endKey); seekKey = expectedKey.createKeyOnly(false); - kv = KeyValueUtil.ensureKeyValue(cachedMobFile.readCell(seekKey, false)); - MobTestUtil.assertKeyValuesEquals(expectedKey, kv); + cell = cachedMobFile.readCell(seekKey, false); + MobTestUtil.assertCellEquals(expectedKey, cell); // Test the random key byte[] randomKey = Bytes.toBytes(MobTestUtil.generateRandomString(2)); expectedKey = new KeyValue(randomKey, family, qualify, Long.MAX_VALUE, Type.Put, randomKey); seekKey = expectedKey.createKeyOnly(false); - kv = KeyValueUtil.ensureKeyValue(cachedMobFile.readCell(seekKey, false)); - MobTestUtil.assertKeyValuesEquals(expectedKey, kv); + cell = cachedMobFile.readCell(seekKey, false); + MobTestUtil.assertCellEquals(expectedKey, cell); // Test the key which is less than the start key byte[] lowerKey = Bytes.toBytes("a1"); // Smaller than "aa" expectedKey = new KeyValue(startKey, family, qualify, Long.MAX_VALUE, Type.Put, startKey); seekKey = new KeyValue(lowerKey, family, qualify, Long.MAX_VALUE, Type.Put, lowerKey); - kv = KeyValueUtil.ensureKeyValue(cachedMobFile.readCell(seekKey, false)); - MobTestUtil.assertKeyValuesEquals(expectedKey, kv); + cell = cachedMobFile.readCell(seekKey, false); + MobTestUtil.assertCellEquals(expectedKey, cell); // Test the key which is more than the end key byte[] upperKey = Bytes.toBytes("z{"); // Bigger than "zz" seekKey = new KeyValue(upperKey, family, qualify, Long.MAX_VALUE, Type.Put, upperKey); - kv = KeyValueUtil.ensureKeyValue(cachedMobFile.readCell(seekKey, false)); - Assert.assertNull(kv); + cell = cachedMobFile.readCell(seekKey, false); + Assert.assertNull(cell); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestDefaultMobStoreFlusher.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestDefaultMobStoreFlusher.java index 5e3a6957def..b91d4d1ab6a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestDefaultMobStoreFlusher.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestDefaultMobStoreFlusher.java @@ -29,12 +29,7 @@ import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.MasterNotRunningException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.ZooKeeperConnectionException; -import org.apache.hadoop.hbase.client.HBaseAdmin; -import org.apache.hadoop.hbase.client.HTable; -import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.client.ResultScanner; -import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.*; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; import org.junit.AfterClass; @@ -71,32 +66,33 @@ public class TestDefaultMobStoreFlusher { @Test public void testFlushNonMobFile() throws InterruptedException { String TN = "testFlushNonMobFile"; - HTable table = null; + TableName tn = TableName.valueOf(TN); + Table table = null; HBaseAdmin admin = null; try { - HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(TN)); + HTableDescriptor desc = new HTableDescriptor(tn); HColumnDescriptor hcd = new HColumnDescriptor(family); hcd.setMaxVersions(4); desc.addFamily(hcd); - admin = new HBaseAdmin(TEST_UTIL.getConfiguration()); + admin = TEST_UTIL.getHBaseAdmin(); admin.createTable(desc); - table = new HTable(TEST_UTIL.getConfiguration(), TN); + table = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()) + .getTable(TableName.valueOf(TN)); //Put data Put put0 = new Put(row1); - put0.add(family, qf1, 1, value1); + put0.addColumn(family, qf1, 1, value1); table.put(put0); //Put more data Put put1 = new Put(row2); - put1.add(family, qf2, 1, value2); + put1.addColumn(family, qf2, 1, value2); table.put(put1); //Flush - table.flushCommits(); - admin.flush(TN); + admin.flush(tn); Scan scan = new Scan(); scan.addColumn(family, qf1); @@ -131,34 +127,35 @@ public class TestDefaultMobStoreFlusher { @Test public void testFlushMobFile() throws InterruptedException { String TN = "testFlushMobFile"; - HTable table = null; - HBaseAdmin admin = null; + TableName tn = TableName.valueOf(TN); + Table table = null; + Admin admin = null; try { - HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(TN)); + HTableDescriptor desc = new HTableDescriptor(tn); HColumnDescriptor hcd = new HColumnDescriptor(family); hcd.setMobEnabled(true); hcd.setMobThreshold(3L); hcd.setMaxVersions(4); desc.addFamily(hcd); - admin = new HBaseAdmin(TEST_UTIL.getConfiguration()); + Connection c = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); + admin = c.getAdmin(); admin.createTable(desc); - table = new HTable(TEST_UTIL.getConfiguration(), TN); + table = c.getTable(TableName.valueOf(TN)); //put data Put put0 = new Put(row1); - put0.add(family, qf1, 1, value1); + put0.addColumn(family, qf1, 1, value1); table.put(put0); //put more data Put put1 = new Put(row2); - put1.add(family, qf2, 1, value2); + put1.addColumn(family, qf2, 1, value2); table.put(put1); //flush - table.flushCommits(); - admin.flush(TN); + admin.flush(tn); //Scan Scan scan = new Scan(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestExpiredMobFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestExpiredMobFileCleaner.java index f16fa20f351..dfaeca64b8d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestExpiredMobFileCleaner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestExpiredMobFileCleaner.java @@ -28,11 +28,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.HTable; -import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.mob.ExpiredMobFileCleaner; -import org.apache.hadoop.hbase.mob.MobUtils; +import org.apache.hadoop.hbase.client.*; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.util.ToolRunner; @@ -53,7 +49,7 @@ public class TestExpiredMobFileCleaner { private final static byte[] row2 = Bytes.toBytes("row2"); private final static byte[] qf = Bytes.toBytes("qf"); - private static HTable table; + private static BufferedMutator table; private static Admin admin; @BeforeClass @@ -93,8 +89,8 @@ public class TestExpiredMobFileCleaner { admin = TEST_UTIL.getHBaseAdmin(); admin.createTable(desc); - table = new HTable(TEST_UTIL.getConfiguration(), tableName); - table.setAutoFlush(false, false); + table = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()) + .getBufferedMutator(tableName); } private void modifyColumnExpiryDays(int expireDays) throws Exception { @@ -108,14 +104,14 @@ public class TestExpiredMobFileCleaner { admin.modifyColumn(tableName, hcd); } - private void putKVAndFlush(HTable table, byte[] row, byte[] value, long ts) + private void putKVAndFlush(BufferedMutator table, byte[] row, byte[] value, long ts) throws Exception { Put put = new Put(row, ts); - put.add(Bytes.toBytes(family), qf, value); - table.put(put); + put.addColumn(Bytes.toBytes(family), qf, value); + table.mutate(put); - table.flushCommits(); + table.flush(); admin.flush(tableName); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobDataBlockEncoding.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobDataBlockEncoding.java index 055eac31215..15aa7d45ca2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobDataBlockEncoding.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobDataBlockEncoding.java @@ -27,12 +27,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.HBaseAdmin; -import org.apache.hadoop.hbase.client.HTable; -import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.client.ResultScanner; -import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.*; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; @@ -51,7 +46,7 @@ public class TestMobDataBlockEncoding { private final static byte [] qf1 = Bytes.toBytes("qualifier1"); private final static byte [] qf2 = Bytes.toBytes("qualifier2"); protected final byte[] qf3 = Bytes.toBytes("qualifier3"); - private static HTable table; + private static Table table; private static HBaseAdmin admin; private static HColumnDescriptor hcd; private static HTableDescriptor desc; @@ -80,9 +75,10 @@ public class TestMobDataBlockEncoding { hcd.setMaxVersions(4); hcd.setDataBlockEncoding(encoding); desc.addFamily(hcd); - admin = new HBaseAdmin(TEST_UTIL.getConfiguration()); + admin = TEST_UTIL.getHBaseAdmin(); admin.createTable(desc); - table = new HTable(TEST_UTIL.getConfiguration(), TN); + table = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()) + .getTable(TableName.valueOf(TN)); } /** @@ -113,13 +109,11 @@ public class TestMobDataBlockEncoding { byte[] value = generateMobValue((int) defaultThreshold + 1); Put put1 = new Put(row1); - put1.add(family, qf1, ts3, value); - put1.add(family, qf2, ts2, value); - put1.add(family, qf3, ts1, value); + put1.addColumn(family, qf1, ts3, value); + put1.addColumn(family, qf2, ts2, value); + put1.addColumn(family, qf3, ts1, value); table.put(put1); - - table.flushCommits(); - admin.flush(TN); + admin.flush(TableName.valueOf(TN)); Scan scan = new Scan(); scan.setMaxVersions(4); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFile.java index 01050aeb7d6..d05da2415be 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFile.java @@ -25,10 +25,10 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue.Type; -import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFileContext; import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; @@ -73,35 +73,35 @@ public class TestMobFile extends TestCase { KeyValue expectedKey = new KeyValue(startKey, family, qualify, Long.MAX_VALUE, Type.Put, startKey); KeyValue seekKey = expectedKey.createKeyOnly(false); - KeyValue kv = KeyValueUtil.ensureKeyValue(mobFile.readCell(seekKey, false)); - MobTestUtil.assertKeyValuesEquals(expectedKey, kv); + Cell cell = mobFile.readCell(seekKey, false); + MobTestUtil.assertCellEquals(expectedKey, cell); // Test the end key byte[] endKey = Bytes.toBytes("zz"); // The end key bytes expectedKey = new KeyValue(endKey, family, qualify, Long.MAX_VALUE, Type.Put, endKey); seekKey = expectedKey.createKeyOnly(false); - kv = KeyValueUtil.ensureKeyValue(mobFile.readCell(seekKey, false)); - MobTestUtil.assertKeyValuesEquals(expectedKey, kv); + cell = mobFile.readCell(seekKey, false); + MobTestUtil.assertCellEquals(expectedKey, cell); // Test the random key byte[] randomKey = Bytes.toBytes(MobTestUtil.generateRandomString(2)); expectedKey = new KeyValue(randomKey, family, qualify, Long.MAX_VALUE, Type.Put, randomKey); seekKey = expectedKey.createKeyOnly(false); - kv = KeyValueUtil.ensureKeyValue(mobFile.readCell(seekKey, false)); - MobTestUtil.assertKeyValuesEquals(expectedKey, kv); + cell = mobFile.readCell(seekKey, false); + MobTestUtil.assertCellEquals(expectedKey, cell); // Test the key which is less than the start key byte[] lowerKey = Bytes.toBytes("a1"); // Smaller than "aa" expectedKey = new KeyValue(startKey, family, qualify, Long.MAX_VALUE, Type.Put, startKey); seekKey = new KeyValue(lowerKey, family, qualify, Long.MAX_VALUE, Type.Put, lowerKey); - kv = KeyValueUtil.ensureKeyValue(mobFile.readCell(seekKey, false)); - MobTestUtil.assertKeyValuesEquals(expectedKey, kv); + cell = mobFile.readCell(seekKey, false); + MobTestUtil.assertCellEquals(expectedKey, cell); // Test the key which is more than the end key byte[] upperKey = Bytes.toBytes("z{"); // Bigger than "zz" seekKey = new KeyValue(upperKey, family, qualify, Long.MAX_VALUE, Type.Put, upperKey); - kv = KeyValueUtil.ensureKeyValue(mobFile.readCell(seekKey, false)); - assertNull(kv); + cell = mobFile.readCell(seekKey, false); + assertNull(cell); } @Test diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFileCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFileCache.java index 1a809a12603..95fa1b9775c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFileCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFileCache.java @@ -38,7 +38,6 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.regionserver.HMobStore; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.StoreFile; -import org.apache.hadoop.hbase.regionserver.StoreFileInfo; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; import org.junit.Test; @@ -53,26 +52,26 @@ public class TestMobFileCache extends TestCase { private MobCacheConfig mobCacheConf; private MobFileCache mobFileCache; private Date currentDate = new Date(); - private final String TEST_CACHE_SIZE = "2"; - private final int EXPECTED_CACHE_SIZE_ZERO = 0; - private final int EXPECTED_CACHE_SIZE_ONE = 1; - private final int EXPECTED_CACHE_SIZE_TWO = 2; - private final int EXPECTED_CACHE_SIZE_THREE = 3; - private final long EXPECTED_REFERENCE_ONE = 1; - private final long EXPECTED_REFERENCE_TWO = 2; + private static final String TEST_CACHE_SIZE = "2"; + private static final int EXPECTED_CACHE_SIZE_ZERO = 0; + private static final int EXPECTED_CACHE_SIZE_ONE = 1; + private static final int EXPECTED_CACHE_SIZE_TWO = 2; + private static final int EXPECTED_CACHE_SIZE_THREE = 3; + private static final long EXPECTED_REFERENCE_ONE = 1; + private static final long EXPECTED_REFERENCE_TWO = 2; - private final String TABLE = "tableName"; - private final String FAMILY1 = "family1"; - private final String FAMILY2 = "family2"; - private final String FAMILY3 = "family3"; + private static final String TABLE = "tableName"; + private static final String FAMILY1 = "family1"; + private static final String FAMILY2 = "family2"; + private static final String FAMILY3 = "family3"; - private final byte[] ROW = Bytes.toBytes("row"); - private final byte[] ROW2 = Bytes.toBytes("row2"); - private final byte[] VALUE = Bytes.toBytes("value"); - private final byte[] VALUE2 = Bytes.toBytes("value2"); - private final byte[] QF1 = Bytes.toBytes("qf1"); - private final byte[] QF2 = Bytes.toBytes("qf2"); - private final byte[] QF3 = Bytes.toBytes("qf3"); + private static final byte[] ROW = Bytes.toBytes("row"); + private static final byte[] ROW2 = Bytes.toBytes("row2"); + private static final byte[] VALUE = Bytes.toBytes("value"); + private static final byte[] VALUE2 = Bytes.toBytes("value2"); + private static final byte[] QF1 = Bytes.toBytes("qf1"); + private static final byte[] QF2 = Bytes.toBytes("qf2"); + private static final byte[] QF3 = Bytes.toBytes("qf3"); @Override public void setUp() throws Exception { @@ -102,7 +101,6 @@ public class TestMobFileCache extends TestCase { /** * Create the mob store file. - * @param family */ private Path createMobStoreFile(String family) throws IOException { return createMobStoreFile(HBaseConfiguration.create(), family); @@ -110,26 +108,23 @@ public class TestMobFileCache extends TestCase { /** * Create the mob store file - * @param conf - * @param family */ private Path createMobStoreFile(Configuration conf, String family) throws IOException { HColumnDescriptor hcd = new HColumnDescriptor(family); hcd.setMaxVersions(4); hcd.setMobEnabled(true); mobCacheConf = new MobCacheConfig(conf, hcd); - return createMobStoreFile(conf, hcd); + return createMobStoreFile(hcd); } /** * Create the mob store file - * @param conf - * @param hcd */ - private Path createMobStoreFile(Configuration conf, HColumnDescriptor hcd) + private Path createMobStoreFile(HColumnDescriptor hcd) throws IOException { // Setting up a Store - HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE)); + TableName tn = TableName.valueOf(TABLE); + HTableDescriptor htd = new HTableDescriptor(tn); htd.addFamily(hcd); HMobStore mobStore = (HMobStore) region.getStore(hcd.getName()); KeyValue key1 = new KeyValue(ROW, hcd.getName(), QF1, 1, VALUE); @@ -137,7 +132,7 @@ public class TestMobFileCache extends TestCase { KeyValue key3 = new KeyValue(ROW2, hcd.getName(), QF3, 1, VALUE2); KeyValue[] keys = new KeyValue[] { key1, key2, key3 }; int maxKeyCount = keys.length; - HRegionInfo regionInfo = new HRegionInfo(); + HRegionInfo regionInfo = new HRegionInfo(tn); StoreFile.Writer mobWriter = mobStore.createWriterInTmp(currentDate, maxKeyCount, hcd.getCompactionCompression(), regionInfo.getStartKey()); Path mobFilePath = mobWriter.getPath(); @@ -193,7 +188,7 @@ public class TestMobFileCache extends TestCase { CachedMobFile cachedMobFile3 = (CachedMobFile) mobFileCache.openFile( fs, file3Path, mobCacheConf); // Before the evict - // Evict the cache, should clost the first file 1 + // Evict the cache, should close the first file 1 assertEquals(EXPECTED_CACHE_SIZE_THREE, mobFileCache.getCacheSize()); assertEquals(EXPECTED_REFERENCE_TWO, cachedMobFile1.getReferenceCount()); assertEquals(EXPECTED_REFERENCE_TWO, cachedMobFile2.getReferenceCount()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/filecompactions/TestMobFileCompactor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/filecompactions/TestMobFileCompactor.java index e4cad6fd56e..ba0b620ea27 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/filecompactions/TestMobFileCompactor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/filecompactions/TestMobFileCompactor.java @@ -42,16 +42,9 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.NamespaceDescriptor; +import org.apache.hadoop.hbase.client.*; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.Delete; -import org.apache.hadoop.hbase.client.Durability; -import org.apache.hadoop.hbase.client.HTable; -import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.client.ResultScanner; -import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.HFileLink; import org.apache.hadoop.hbase.mob.MobConstants; import org.apache.hadoop.hbase.mob.MobUtils; @@ -74,22 +67,24 @@ public class TestMobFileCompactor { private Configuration conf = null; private String tableNameAsString; private TableName tableName; - private HTable hTable; + private static Connection conn; + private BufferedMutator bufMut; + private Table hTable; private Admin admin; private HTableDescriptor desc; private HColumnDescriptor hcd1; private HColumnDescriptor hcd2; private FileSystem fs; - private final String family1 = "family1"; - private final String family2 = "family2"; - private final String qf1 = "qualifier1"; - private final String qf2 = "qualifier2"; - private byte[] KEYS = Bytes.toBytes("012"); - private int regionNum = KEYS.length; - private int delRowNum = 1; - private int delCellNum = 6; - private int cellNumPerRow = 3; - private int rowNumPerFile = 2; + private static final String family1 = "family1"; + private static final String family2 = "family2"; + private static final String qf1 = "qualifier1"; + private static final String qf2 = "qualifier2"; + private static byte[] KEYS = Bytes.toBytes("012"); + private static int regionNum = KEYS.length; + private static int delRowNum = 1; + private static int delCellNum = 6; + private static int cellNumPerRow = 3; + private static int rowNumPerFile = 2; private static ExecutorService pool; @BeforeClass @@ -99,11 +94,13 @@ public class TestMobFileCompactor { TEST_UTIL.getConfiguration().setLong(MobConstants.MOB_FILE_COMPACTION_MERGEABLE_THRESHOLD, 5000); TEST_UTIL.startMiniCluster(1); pool = createThreadPool(TEST_UTIL.getConfiguration()); + conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration(), pool); } @AfterClass public static void tearDownAfterClass() throws Exception { pool.shutdown(); + conn.close(); TEST_UTIL.shutdownMiniCluster(); } @@ -127,8 +124,8 @@ public class TestMobFileCompactor { desc.addFamily(hcd2); admin = TEST_UTIL.getHBaseAdmin(); admin.createTable(desc, getSplitKeys()); - hTable = new HTable(conf, tableNameAsString); - hTable.setAutoFlush(false, false); + hTable = conn.getTable(tableName); + bufMut = conn.getBufferedMutator(tableName); } @After @@ -160,12 +157,12 @@ public class TestMobFileCompactor { desc.addFamily(hcd1); desc.addFamily(hcd2); admin.createTable(desc, getSplitKeys()); - HTable table = new HTable(conf, tableName); - table.setAutoFlush(false, false); + BufferedMutator bufMut= conn.getBufferedMutator(tableName); + Table table = conn.getTable(tableName); int count = 4; // generate mob files - loadData(admin, table, tableName, count, rowNumPerFile); + loadData(admin, bufMut, tableName, count, rowNumPerFile); int rowNumPerRegion = count * rowNumPerFile; assertEquals("Before compaction: mob rows count", regionNum * rowNumPerRegion, @@ -194,7 +191,7 @@ public class TestMobFileCompactor { resetConf(); int count = 4; // generate mob files - loadData(admin, hTable, tableName, count, rowNumPerFile); + loadData(admin, bufMut, tableName, count, rowNumPerFile); int rowNumPerRegion = count*rowNumPerFile; assertEquals("Before compaction: mob rows count", regionNum*rowNumPerRegion, @@ -218,7 +215,7 @@ public class TestMobFileCompactor { resetConf(); int count = 4; // generate mob files - loadData(admin, hTable, tableName, count, rowNumPerFile); + loadData(admin, bufMut, tableName, count, rowNumPerFile); int rowNumPerRegion = count*rowNumPerFile; assertEquals("Before deleting: mob rows count", regionNum*rowNumPerRegion, @@ -273,7 +270,7 @@ public class TestMobFileCompactor { int count = 4; // generate mob files - loadData(admin, hTable, tableName, count, rowNumPerFile); + loadData(admin, bufMut, tableName, count, rowNumPerFile); int rowNumPerRegion = count*rowNumPerFile; assertEquals("Before deleting: mob rows count", regionNum*rowNumPerRegion, @@ -326,7 +323,7 @@ public class TestMobFileCompactor { conf.setInt(MobConstants.MOB_FILE_COMPACTION_BATCH_SIZE, batchSize); int count = 4; // generate mob files - loadData(admin, hTable, tableName, count, rowNumPerFile); + loadData(admin, bufMut, tableName, count, rowNumPerFile); int rowNumPerRegion = count*rowNumPerFile; assertEquals("Before deleting: mob row count", regionNum*rowNumPerRegion, @@ -374,7 +371,7 @@ public class TestMobFileCompactor { resetConf(); int count = 4; // generate mob files - loadData(admin, hTable, tableName, count, rowNumPerFile); + loadData(admin, bufMut, tableName, count, rowNumPerFile); int rowNumPerRegion = count*rowNumPerFile; long tid = System.currentTimeMillis(); @@ -461,7 +458,7 @@ public class TestMobFileCompactor { public void testCompactionFromAdmin() throws Exception { int count = 4; // generate mob files - loadData(admin, hTable, tableName, count, rowNumPerFile); + loadData(admin, bufMut, tableName, count, rowNumPerFile); int rowNumPerRegion = count*rowNumPerFile; assertEquals("Before deleting: mob rows count", regionNum*rowNumPerRegion, @@ -512,7 +509,7 @@ public class TestMobFileCompactor { public void testMajorCompactionFromAdmin() throws Exception { int count = 4; // generate mob files - loadData(admin, hTable, tableName, count, rowNumPerFile); + loadData(admin, bufMut, tableName, count, rowNumPerFile); int rowNumPerRegion = count*rowNumPerFile; assertEquals("Before deleting: mob rows count", regionNum*rowNumPerRegion, @@ -574,7 +571,7 @@ public class TestMobFileCompactor { * @param table to get the scanner * @return the number of rows */ - private int countMobRows(final HTable table) throws IOException { + private int countMobRows(final Table table) throws IOException { Scan scan = new Scan(); // Do not retrieve the mob data when scanning scan.setAttribute(MobConstants.MOB_SCAN_RAW, Bytes.toBytes(Boolean.TRUE)); @@ -592,7 +589,7 @@ public class TestMobFileCompactor { * @param table to get the scanner * @return the number of cells */ - private int countMobCells(final HTable table) throws IOException { + private int countMobCells(final Table table) throws IOException { Scan scan = new Scan(); // Do not retrieve the mob data when scanning scan.setAttribute(MobConstants.MOB_SCAN_RAW, Bytes.toBytes(Boolean.TRUE)); @@ -680,9 +677,8 @@ public class TestMobFileCompactor { /** * loads some data to the table. - * @param count the mob file number */ - private void loadData(Admin admin, HTable table, TableName tableName, int fileNum, + private void loadData(Admin admin, BufferedMutator table, TableName tableName, int fileNum, int rowNumPerFile) throws IOException, InterruptedException { if (fileNum <= 0) { throw new IllegalArgumentException(); @@ -694,12 +690,12 @@ public class TestMobFileCompactor { byte[] mobVal = makeDummyData(10 * (i + 1)); Put put = new Put(key); put.setDurability(Durability.SKIP_WAL); - put.add(Bytes.toBytes(family1), Bytes.toBytes(qf1), mobVal); - put.add(Bytes.toBytes(family1), Bytes.toBytes(qf2), mobVal); - put.add(Bytes.toBytes(family2), Bytes.toBytes(qf1), mobVal); - table.put(put); + put.addColumn(Bytes.toBytes(family1), Bytes.toBytes(qf1), mobVal); + put.addColumn(Bytes.toBytes(family1), Bytes.toBytes(qf2), mobVal); + put.addColumn(Bytes.toBytes(family2), Bytes.toBytes(qf1), mobVal); + table.mutate(put); if ((i + 1) % rowNumPerFile == 0) { - table.flushCommits(); + table.flush(); admin.flush(tableName); } } @@ -715,7 +711,7 @@ public class TestMobFileCompactor { // delete a family byte[] key1 = Bytes.add(k, Bytes.toBytes(0)); Delete delete1 = new Delete(key1); - delete1.deleteFamily(Bytes.toBytes(family1)); + delete1.addFamily(Bytes.toBytes(family1)); hTable.delete(delete1); // delete one row byte[] key2 = Bytes.add(k, Bytes.toBytes(2)); @@ -724,21 +720,20 @@ public class TestMobFileCompactor { // delete one cell byte[] key3 = Bytes.add(k, Bytes.toBytes(4)); Delete delete3 = new Delete(key3); - delete3.deleteColumn(Bytes.toBytes(family1), Bytes.toBytes(qf1)); + delete3.addColumn(Bytes.toBytes(family1), Bytes.toBytes(qf1)); hTable.delete(delete3); - hTable.flushCommits(); admin.flush(tableName); List regions = TEST_UTIL.getHBaseCluster().getRegions( Bytes.toBytes(tableNameAsString)); for (HRegion region : regions) { region.waitForFlushesAndCompactions(); - region.compactStores(true); + region.compact(true); } } } /** * Creates the dummy data with a specific size. - * @param the size of data + * @param size the size of value * @return the dummy data */ private byte[] makeDummyData(int size) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/filecompactions/TestPartitionedMobFileCompactor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/filecompactions/TestPartitionedMobFileCompactor.java index 3c73d526c4e..ed3853ed61d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/filecompactions/TestPartitionedMobFileCompactor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/filecompactions/TestPartitionedMobFileCompactor.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue.Type; +import org.apache.hadoop.hbase.regionserver.*; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Scan; @@ -54,13 +55,6 @@ import org.apache.hadoop.hbase.mob.MobFileName; import org.apache.hadoop.hbase.mob.MobUtils; import org.apache.hadoop.hbase.mob.filecompactions.MobFileCompactionRequest.CompactionType; import org.apache.hadoop.hbase.mob.filecompactions.PartitionedMobFileCompactionRequest.CompactionPartition; -import org.apache.hadoop.hbase.regionserver.BloomType; -import org.apache.hadoop.hbase.regionserver.HStore; -import org.apache.hadoop.hbase.regionserver.ScanInfo; -import org.apache.hadoop.hbase.regionserver.ScanType; -import org.apache.hadoop.hbase.regionserver.StoreFile; -import org.apache.hadoop.hbase.regionserver.StoreFileScanner; -import org.apache.hadoop.hbase.regionserver.StoreScanner; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.Threads; @@ -93,7 +87,7 @@ public class TestPartitionedMobFileCompactor { TEST_UTIL.getConfiguration().setBoolean("hbase.regionserver.info.port.auto", true); TEST_UTIL.getConfiguration().setInt("hfile.format.version", 3); TEST_UTIL.startMiniCluster(1); - pool = createThreadPool(TEST_UTIL.getConfiguration()); + pool = createThreadPool(); } @AfterClass @@ -403,6 +397,7 @@ public class TestPartitionedMobFileCompactor { scanners, 0L, HConstants.LATEST_TIMESTAMP); List results = new ArrayList<>(); boolean hasMore = true; + while (hasMore) { hasMore = scanner.next(results); size += results.size(); @@ -412,7 +407,7 @@ public class TestPartitionedMobFileCompactor { return size; } - private static ExecutorService createThreadPool(Configuration conf) { + private static ExecutorService createThreadPool() { int maxThreads = 10; long keepAliveTime = 60; final SynchronousQueue queue = new SynchronousQueue(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/mapreduce/TestMobSweepJob.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/mapreduce/TestMobSweepJob.java index 49345e4ff19..3023849c858 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/mapreduce/TestMobSweepJob.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/mapreduce/TestMobSweepJob.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hbase.mob.mapreduce; +import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; import java.io.IOException; @@ -132,7 +133,7 @@ public class TestMobSweepJob { List toBeArchived = sweepJob.getUnusedFiles(configuration); assertEquals(2, toBeArchived.size()); - assertEquals(new String[] { "4", "6" }, toBeArchived.toArray(new String[0])); + assertArrayEquals(new String[]{"4", "6"}, toBeArchived.toArray(new String[0])); } @Test diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/mapreduce/TestMobSweepReducer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/mapreduce/TestMobSweepReducer.java index 308b50e352b..8c24123ceb0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/mapreduce/TestMobSweepReducer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/mapreduce/TestMobSweepReducer.java @@ -37,9 +37,7 @@ import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.HTable; -import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.*; import org.apache.hadoop.hbase.mapreduce.TableInputFormat; import org.apache.hadoop.hbase.master.TableLockManager; import org.apache.hadoop.hbase.master.TableLockManager.TableLock; @@ -74,7 +72,7 @@ public class TestMobSweepReducer { private final static String row = "row"; private final static String family = "family"; private final static String qf = "qf"; - private static HTable table; + private static BufferedMutator table; private static Admin admin; @BeforeClass @@ -104,7 +102,8 @@ public class TestMobSweepReducer { admin = TEST_UTIL.getHBaseAdmin(); admin.createTable(desc); - table = new HTable(TEST_UTIL.getConfiguration(), tableName); + table = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()) + .getBufferedMutator(TableName.valueOf(tableName)); } @After @@ -138,12 +137,12 @@ public class TestMobSweepReducer { Path mobFamilyPath = MobUtils.getMobFamilyPath(TEST_UTIL.getConfiguration(), tn, family); Put put = new Put(Bytes.toBytes(row)); - put.add(Bytes.toBytes(family), Bytes.toBytes(qf), 1, mobValueBytes); + put.addColumn(Bytes.toBytes(family), Bytes.toBytes(qf), 1, mobValueBytes); Put put2 = new Put(Bytes.toBytes(row + "ignore")); - put2.add(Bytes.toBytes(family), Bytes.toBytes(qf), 1, mobValueBytes); - table.put(put); - table.put(put2); - table.flushCommits(); + put2.addColumn(Bytes.toBytes(family), Bytes.toBytes(qf), 1, mobValueBytes); + table.mutate(put); + table.mutate(put2); + table.flush(); admin.flush(tn); FileStatus[] fileStatuses = TEST_UTIL.getTestFileSystem().listStatus(mobFamilyPath); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/mapreduce/TestMobSweeper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/mapreduce/TestMobSweeper.java index 8a017a2adf3..31778ae1cd6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/mapreduce/TestMobSweeper.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/mapreduce/TestMobSweeper.java @@ -32,12 +32,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.HTable; -import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.client.ResultScanner; -import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.*; import org.apache.hadoop.hbase.mob.MobConstants; import org.apache.hadoop.hbase.mob.MobUtils; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -57,7 +52,8 @@ public class TestMobSweeper { private final static String row = "row_"; private final static String family = "family"; private final static String column = "column"; - private static HTable table; + private static Table table; + private static BufferedMutator bufMut; private static Admin admin; private Random random = new Random(); @@ -94,9 +90,10 @@ public class TestMobSweeper { admin = TEST_UTIL.getHBaseAdmin(); admin.createTable(desc); - table = new HTable(TEST_UTIL.getConfiguration(), tableName); - table.setAutoFlush(false, false); - + Connection c = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); + TableName tn = TableName.valueOf(tableName); + table = c.getTable(tn); + bufMut = c.getBufferedMutator(tn); } @After @@ -120,7 +117,7 @@ public class TestMobSweeper { return sb.toString(); } - private void generateMobTable(Admin admin, HTable table, String tableName, int count, + private void generateMobTable(Admin admin, BufferedMutator table, String tableName, int count, int flushStep) throws IOException, InterruptedException { if (count <= 0 || flushStep <= 0) return; @@ -130,14 +127,14 @@ public class TestMobSweeper { random.nextBytes(mobVal); Put put = new Put(Bytes.toBytes(row + i)); - put.add(Bytes.toBytes(family), Bytes.toBytes(column), mobVal); - table.put(put); + put.addColumn(Bytes.toBytes(family), Bytes.toBytes(column), mobVal); + table.mutate(put); if (index++ % flushStep == 0) { - table.flushCommits(); + table.flush(); admin.flush(TableName.valueOf(tableName)); } } - table.flushCommits(); + table.flush(); admin.flush(TableName.valueOf(tableName)); } @@ -145,11 +142,11 @@ public class TestMobSweeper { public void testSweeper() throws Exception { int count = 10; //create table and generate 10 mob files - generateMobTable(admin, table, tableName, count, 1); + generateMobTable(admin, bufMut, tableName, count, 1); //get mob files Path mobFamilyPath = getMobFamilyPath(TEST_UTIL.getConfiguration(), tableName, family); FileStatus[] fileStatuses = TEST_UTIL.getTestFileSystem().listStatus(mobFamilyPath); - // mobFileSet0 stores the orignal mob files + // mobFileSet0 stores the original mob files TreeSet mobFilesSet = new TreeSet(); for (FileStatus status : fileStatuses) { mobFilesSet.add(status.getPath().getName()); @@ -211,11 +208,11 @@ public class TestMobSweeper { .equalsIgnoreCase(mobFilesSet.iterator().next())); } - private void testCompactionDelaySweeperInternal(HTable table, String tableName) + private void testCompactionDelaySweeperInternal(Table table, BufferedMutator bufMut, String tableName) throws Exception { int count = 10; //create table and generate 10 mob files - generateMobTable(admin, table, tableName, count, 1); + generateMobTable(admin, bufMut, tableName, count, 1); //get mob files Path mobFamilyPath = getMobFamilyPath(TEST_UTIL.getConfiguration(), tableName, family); FileStatus[] fileStatuses = TEST_UTIL.getTestFileSystem().listStatus(mobFamilyPath); @@ -282,7 +279,7 @@ public class TestMobSweeper { @Test public void testCompactionDelaySweeper() throws Exception { - testCompactionDelaySweeperInternal(table, tableName); + testCompactionDelaySweeperInternal(table, bufMut, tableName); } @Test @@ -299,9 +296,10 @@ public class TestMobSweeper { hcd.setMaxVersions(4); desc.addFamily(hcd); admin.createTable(desc); - HTable table = new HTable(TEST_UTIL.getConfiguration(), tableName); - table.setAutoFlush(false, false); - testCompactionDelaySweeperInternal(table, tableNameAsString); + Connection c = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); + BufferedMutator bufMut = c.getBufferedMutator(tableName); + Table table = c.getTable(tableName); + testCompactionDelaySweeperInternal(table, bufMut, tableNameAsString); table.close(); admin.disableTable(tableName); admin.deleteTable(tableName); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java index f919078bd02..c86e0ffbbc6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/namespace/TestNamespaceAuditor.java @@ -25,7 +25,9 @@ import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; import java.io.IOException; +import java.util.Collections; import java.util.List; +import java.util.Set; import java.util.concurrent.CountDownLatch; import org.apache.commons.lang.StringUtils; @@ -50,19 +52,27 @@ import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Mutation; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.coprocessor.BaseMasterObserver; import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver; import org.apache.hadoop.hbase.coprocessor.BaseRegionServerObserver; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; +import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.ObserverContext; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionServerObserver; -import org.apache.hadoop.hbase.coprocessor.TestRegionServerObserver.CPRegionServerObserver; import org.apache.hadoop.hbase.mapreduce.TableInputFormatBase; +import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.master.RegionState; +import org.apache.hadoop.hbase.master.RegionStates; import org.apache.hadoop.hbase.master.TableNamespaceManager; +import org.apache.hadoop.hbase.quotas.MasterQuotaManager; +import org.apache.hadoop.hbase.quotas.QuotaExceededException; import org.apache.hadoop.hbase.quotas.QuotaUtil; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.RegionServerCoprocessorHost; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; @@ -74,29 +84,28 @@ import org.junit.BeforeClass; import org.junit.Test; import org.junit.experimental.categories.Category; +import com.google.common.collect.Sets; + @Category(MediumTests.class) public class TestNamespaceAuditor { private static final Log LOG = LogFactory.getLog(TestNamespaceAuditor.class); private static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); - private static HBaseAdmin admin; + private static HBaseAdmin ADMIN; private String prefix = "TestNamespaceAuditor"; @BeforeClass public static void before() throws Exception { UTIL.getConfiguration().set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, CustomObserver.class.getName()); + UTIL.getConfiguration().set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, + MasterSyncObserver.class.getName()); Configuration conf = UTIL.getConfiguration(); conf.setBoolean(QuotaUtil.QUOTA_CONF_KEY, true); conf.setClass("hbase.coprocessor.regionserver.classes", CPRegionServerObserver.class, RegionServerObserver.class); - UTIL.startMiniCluster(1, 3); - UTIL.waitFor(60000, new Waiter.Predicate() { - @Override - public boolean evaluate() throws Exception { - return UTIL.getHBaseCluster().getMaster().getMasterQuotaManager().isQuotaEnabled(); - } - }); - admin = UTIL.getHBaseAdmin(); + UTIL.startMiniCluster(1, 1); + waitForQuotaEnabled(); + ADMIN = UTIL.getHBaseAdmin(); } @AfterClass @@ -105,14 +114,14 @@ public class TestNamespaceAuditor { } @After - public void cleanup() throws IOException, KeeperException { - for (HTableDescriptor table : admin.listTables()) { - admin.disableTable(table.getTableName()); - admin.deleteTable(table.getTableName()); + public void cleanup() throws Exception, KeeperException { + for (HTableDescriptor table : ADMIN.listTables()) { + ADMIN.disableTable(table.getTableName()); + deleteTable(table.getTableName()); } - for (NamespaceDescriptor ns : admin.listNamespaceDescriptors()) { + for (NamespaceDescriptor ns : ADMIN.listNamespaceDescriptors()) { if (ns.getName().startsWith(prefix)) { - admin.deleteNamespace(ns.getName()); + ADMIN.deleteNamespace(ns.getName()); } } assertTrue("Quota manager not enabled", UTIL.getHBaseCluster().getMaster() @@ -125,19 +134,19 @@ public class TestNamespaceAuditor { NamespaceDescriptor nspDesc = NamespaceDescriptor.create(nsp).addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "5") .addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "2").build(); - admin.createNamespace(nspDesc); - assertNotNull("Namespace descriptor found null.", admin.getNamespaceDescriptor(nsp)); - assertEquals(admin.listNamespaceDescriptors().length, 3); + ADMIN.createNamespace(nspDesc); + assertNotNull("Namespace descriptor found null.", ADMIN.getNamespaceDescriptor(nsp)); + assertEquals(ADMIN.listNamespaceDescriptors().length, 3); HTableDescriptor tableDescOne = new HTableDescriptor(TableName.valueOf(nsp + TableName.NAMESPACE_DELIM + "table1")); HTableDescriptor tableDescTwo = new HTableDescriptor(TableName.valueOf(nsp + TableName.NAMESPACE_DELIM + "table2")); HTableDescriptor tableDescThree = new HTableDescriptor(TableName.valueOf(nsp + TableName.NAMESPACE_DELIM + "table3")); - admin.createTable(tableDescOne); + ADMIN.createTable(tableDescOne); boolean constraintViolated = false; try { - admin.createTable(tableDescTwo, Bytes.toBytes("AAA"), Bytes.toBytes("ZZZ"), 5); + ADMIN.createTable(tableDescTwo, Bytes.toBytes("AAA"), Bytes.toBytes("ZZZ"), 5); } catch (Exception exp) { assertTrue(exp instanceof IOException); constraintViolated = true; @@ -145,14 +154,14 @@ public class TestNamespaceAuditor { assertTrue("Constraint not violated for table " + tableDescTwo.getTableName(), constraintViolated); } - admin.createTable(tableDescTwo, Bytes.toBytes("AAA"), Bytes.toBytes("ZZZ"), 4); + ADMIN.createTable(tableDescTwo, Bytes.toBytes("AAA"), Bytes.toBytes("ZZZ"), 4); NamespaceTableAndRegionInfo nspState = getQuotaManager().getState(nsp); assertNotNull(nspState); assertTrue(nspState.getTables().size() == 2); assertTrue(nspState.getRegionCount() == 5); constraintViolated = false; try { - admin.createTable(tableDescThree); + ADMIN.createTable(tableDescThree); } catch (Exception exp) { assertTrue(exp instanceof IOException); constraintViolated = true; @@ -172,7 +181,7 @@ public class TestNamespaceAuditor { .addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "hihdufh") .addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "2").build(); try { - admin.createNamespace(nspDesc); + ADMIN.createNamespace(nspDesc); } catch (Exception exp) { LOG.warn(exp); exceptionCaught = true; @@ -185,7 +194,7 @@ public class TestNamespaceAuditor { .addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "-456") .addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "2").build(); try { - admin.createNamespace(nspDesc); + ADMIN.createNamespace(nspDesc); } catch (Exception exp) { LOG.warn(exp); exceptionCaught = true; @@ -198,7 +207,7 @@ public class TestNamespaceAuditor { .addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "10") .addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "sciigd").build(); try { - admin.createNamespace(nspDesc); + ADMIN.createNamespace(nspDesc); } catch (Exception exp) { LOG.warn(exp); exceptionCaught = true; @@ -211,7 +220,7 @@ public class TestNamespaceAuditor { .addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "10") .addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "-1500").build(); try { - admin.createNamespace(nspDesc); + ADMIN.createNamespace(nspDesc); } catch (Exception exp) { LOG.warn(exp); exceptionCaught = true; @@ -228,44 +237,54 @@ public class TestNamespaceAuditor { NamespaceDescriptor.create(namespace) .addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "100") .addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "3").build(); - admin.createNamespace(nspDesc); - assertNotNull("Namespace descriptor found null.", admin.getNamespaceDescriptor(namespace)); + ADMIN.createNamespace(nspDesc); + assertNotNull("Namespace descriptor found null.", ADMIN.getNamespaceDescriptor(namespace)); NamespaceTableAndRegionInfo stateInfo = getNamespaceState(nspDesc.getName()); assertNotNull("Namespace state found null for " + namespace, stateInfo); HTableDescriptor tableDescOne = new HTableDescriptor(TableName.valueOf(namespace + TableName.NAMESPACE_DELIM + "table1")); HTableDescriptor tableDescTwo = new HTableDescriptor(TableName.valueOf(namespace + TableName.NAMESPACE_DELIM + "table2")); - admin.createTable(tableDescOne); - admin.createTable(tableDescTwo, Bytes.toBytes("AAA"), Bytes.toBytes("ZZZ"), 5); + ADMIN.createTable(tableDescOne); + ADMIN.createTable(tableDescTwo, Bytes.toBytes("AAA"), Bytes.toBytes("ZZZ"), 5); stateInfo = getNamespaceState(nspDesc.getName()); assertNotNull("Namespace state found to be null.", stateInfo); assertEquals(2, stateInfo.getTables().size()); assertEquals(5, stateInfo.getRegionCountOfTable(tableDescTwo.getTableName())); assertEquals(6, stateInfo.getRegionCount()); - admin.disableTable(tableDescOne.getTableName()); - admin.deleteTable(tableDescOne.getTableName()); + ADMIN.disableTable(tableDescOne.getTableName()); + deleteTable(tableDescOne.getTableName()); stateInfo = getNamespaceState(nspDesc.getName()); assertNotNull("Namespace state found to be null.", stateInfo); assertEquals(5, stateInfo.getRegionCount()); assertEquals(1, stateInfo.getTables().size()); - admin.disableTable(tableDescTwo.getTableName()); - admin.deleteTable(tableDescTwo.getTableName()); - admin.deleteNamespace(namespace); + ADMIN.disableTable(tableDescTwo.getTableName()); + deleteTable(tableDescTwo.getTableName()); + ADMIN.deleteNamespace(namespace); stateInfo = getNamespaceState(namespace); assertNull("Namespace state not found to be null.", stateInfo); } public static class CPRegionServerObserver extends BaseRegionServerObserver { - private boolean shouldFailMerge = false; + private volatile boolean shouldFailMerge = false; public void failMerge(boolean fail) { shouldFailMerge = fail; } + private boolean triggered = false; + + public synchronized void waitUtilTriggered() throws InterruptedException { + while (!triggered) { + wait(); + } + } + @Override - public void preMerge(ObserverContext ctx, HRegion regionA, - HRegion regionB) throws IOException { + public synchronized void preMerge(ObserverContext ctx, + Region regionA, Region regionB) throws IOException { + triggered = true; + notifyAll(); if (shouldFailMerge) { throw new IOException("fail merge"); } @@ -275,64 +294,126 @@ public class TestNamespaceAuditor { @Test public void testRegionMerge() throws Exception { String nsp1 = prefix + "_regiontest"; - NamespaceDescriptor nspDesc = NamespaceDescriptor.create(nsp1) - .addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "3") - .addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "2").build(); - admin.createNamespace(nspDesc); + NamespaceDescriptor nspDesc = + NamespaceDescriptor.create(nsp1) + .addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "3") + .addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "2").build(); + ADMIN.createNamespace(nspDesc); final TableName tableTwo = TableName.valueOf(nsp1 + TableName.NAMESPACE_DELIM + "table2"); byte[] columnFamily = Bytes.toBytes("info"); HTableDescriptor tableDescOne = new HTableDescriptor(tableTwo); tableDescOne.addFamily(new HColumnDescriptor(columnFamily)); - NamespaceTableAndRegionInfo stateInfo; final int initialRegions = 3; - admin.createTable(tableDescOne, Bytes.toBytes("1"), Bytes.toBytes("2000"), initialRegions); + ADMIN.createTable(tableDescOne, Bytes.toBytes("1"), Bytes.toBytes("2000"), initialRegions); Connection connection = ConnectionFactory.createConnection(UTIL.getConfiguration()); - HTable htable = (HTable)connection.getTable(tableTwo); - UTIL.loadNumericRows(htable, Bytes.toBytes("info"), 1, 1000); - admin.flush(tableTwo); - stateInfo = getNamespaceState(nsp1); - List hris = admin.getTableRegions(tableTwo); + try (Table table = connection.getTable(tableTwo)) { + UTIL.loadNumericRows(table, Bytes.toBytes("info"), 1000, 1999); + } + ADMIN.flush(tableTwo); + List hris = ADMIN.getTableRegions(tableTwo); + Collections.sort(hris); // merge the two regions - admin.mergeRegions(hris.get(0).getEncodedNameAsBytes(), - hris.get(1).getEncodedNameAsBytes(), false); - while (admin.getTableRegions(tableTwo).size() == initialRegions) { - Thread.sleep(100); - } - hris = admin.getTableRegions(tableTwo); - assertEquals(initialRegions-1, hris.size()); + final Set encodedRegionNamesToMerge = + Sets.newHashSet(hris.get(0).getEncodedName(), hris.get(1).getEncodedName()); + ADMIN.mergeRegions(hris.get(0).getEncodedNameAsBytes(), hris.get(1).getEncodedNameAsBytes(), + false); + UTIL.waitFor(10000, 100, new Waiter.ExplainingPredicate() { - HRegion actualRegion = UTIL.getHBaseCluster().getRegions(tableTwo).get(0); - byte[] splitKey = getSplitKey(actualRegion.getStartKey(), actualRegion.getEndKey()); - admin.split(tableTwo, Bytes.toBytes("500")); - while (admin.getTableRegions(tableTwo).size() != initialRegions) { - Thread.sleep(100); - } - assertEquals(initialRegions, admin.getTableRegions(tableTwo).size()); + @Override + public boolean evaluate() throws Exception { + RegionStates regionStates = + UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates(); + for (HRegionInfo hri : ADMIN.getTableRegions(tableTwo)) { + if (encodedRegionNamesToMerge.contains(hri.getEncodedName())) { + return false; + } + if (!regionStates.isRegionInState(hri, RegionState.State.OPEN)) { + return false; + } + } + return true; + } + + @Override + public String explainFailure() throws Exception { + RegionStates regionStates = + UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates(); + for (HRegionInfo hri : ADMIN.getTableRegions(tableTwo)) { + if (encodedRegionNamesToMerge.contains(hri.getEncodedName())) { + return hri + " which is expected to be merged is still online"; + } + if (!regionStates.isRegionInState(hri, RegionState.State.OPEN)) { + return hri + " is still in not opened"; + } + } + return "Unknown"; + } + }); + hris = ADMIN.getTableRegions(tableTwo); + assertEquals(initialRegions - 1, hris.size()); + Collections.sort(hris); + + final HRegionInfo hriToSplit = hris.get(1); + ADMIN.split(tableTwo, Bytes.toBytes("500")); + + UTIL.waitFor(10000, 100, new Waiter.ExplainingPredicate() { + + @Override + public boolean evaluate() throws Exception { + RegionStates regionStates = + UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates(); + for (HRegionInfo hri : ADMIN.getTableRegions(tableTwo)) { + if (hri.getEncodedName().equals(hriToSplit.getEncodedName())) { + return false; + } + if (!regionStates.isRegionInState(hri, RegionState.State.OPEN)) { + return false; + } + } + return true; + } + + @Override + public String explainFailure() throws Exception { + RegionStates regionStates = + UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().getRegionStates(); + for (HRegionInfo hri : ADMIN.getTableRegions(tableTwo)) { + if (hri.getEncodedName().equals(hriToSplit.getEncodedName())) { + return hriToSplit + " which is expected to be split is still online"; + } + if (!regionStates.isRegionInState(hri, RegionState.State.OPEN)) { + return hri + " is still in not opened"; + } + } + return "Unknown"; + } + }); + hris = ADMIN.getTableRegions(tableTwo); + assertEquals(initialRegions, hris.size()); + Collections.sort(hris); // fail region merge through Coprocessor hook MiniHBaseCluster cluster = UTIL.getHBaseCluster(); - hris = admin.getTableRegions(tableTwo); - HRegionServer regionServer = cluster.getRegionServer( - cluster.getServerWith(hris.get(0).getRegionName())); + HRegionServer regionServer = cluster.getRegionServer(0); RegionServerCoprocessorHost cpHost = regionServer.getRegionServerCoprocessorHost(); Coprocessor coprocessor = cpHost.findCoprocessor(CPRegionServerObserver.class.getName()); CPRegionServerObserver regionServerObserver = (CPRegionServerObserver) coprocessor; regionServerObserver.failMerge(true); - admin.mergeRegions(hris.get(0).getEncodedNameAsBytes(), - hris.get(1).getEncodedNameAsBytes(), false); - assertEquals(initialRegions, admin.getTableRegions(tableTwo).size()); - // verify that we cannot split - actualRegion = UTIL.getHBaseCluster().getRegions(tableTwo).get(0); - admin.split(tableTwo, TableInputFormatBase.getSplitKey(actualRegion.getStartKey(), - actualRegion.getEndKey(), true)); - while (admin.getTableRegions(tableTwo).size() != initialRegions) { - Thread.sleep(100); - } - assertEquals(initialRegions, admin.getTableRegions(tableTwo).size()); - regionServerObserver.failMerge(true); + regionServerObserver.triggered = false; - htable.close(); - } + ADMIN.mergeRegions(hris.get(1).getEncodedNameAsBytes(), hris.get(2).getEncodedNameAsBytes(), + false); + regionServerObserver.waitUtilTriggered(); + hris = ADMIN.getTableRegions(tableTwo); + assertEquals(initialRegions, hris.size()); + Collections.sort(hris); + // verify that we cannot split + HRegionInfo hriToSplit2 = hris.get(1); + ADMIN.split(tableTwo, + TableInputFormatBase.getSplitKey(hriToSplit2.getStartKey(), hriToSplit2.getEndKey(), true)); + Thread.sleep(2000); + assertEquals(initialRegions, ADMIN.getTableRegions(tableTwo).size()); + } @Test public void testRegionOperations() throws Exception { @@ -340,7 +421,7 @@ public class TestNamespaceAuditor { NamespaceDescriptor nspDesc = NamespaceDescriptor.create(nsp1) .addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "2") .addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "2").build(); - admin.createNamespace(nspDesc); + ADMIN.createNamespace(nspDesc); boolean constraintViolated = false; final TableName tableOne = TableName.valueOf(nsp1 + TableName.NAMESPACE_DELIM + "table1"); byte[] columnFamily = Bytes.toBytes("info"); @@ -348,7 +429,7 @@ public class TestNamespaceAuditor { tableDescOne.addFamily(new HColumnDescriptor(columnFamily)); NamespaceTableAndRegionInfo stateInfo; try { - admin.createTable(tableDescOne, Bytes.toBytes("1"), Bytes.toBytes("1000"), 7); + ADMIN.createTable(tableDescOne, Bytes.toBytes("1"), Bytes.toBytes("1000"), 7); } catch (Exception exp) { assertTrue(exp instanceof DoNotRetryIOException); LOG.info(exp); @@ -356,32 +437,33 @@ public class TestNamespaceAuditor { } finally { assertTrue(constraintViolated); } - assertFalse(admin.tableExists(tableOne)); + assertFalse(ADMIN.tableExists(tableOne)); // This call will pass. - admin.createTable(tableDescOne); + ADMIN.createTable(tableDescOne); Connection connection = ConnectionFactory.createConnection(UTIL.getConfiguration()); HTable htable = (HTable)connection.getTable(tableOne); UTIL.loadNumericRows(htable, Bytes.toBytes("info"), 1, 1000); - admin.flush(tableOne); + ADMIN.flush(tableOne); stateInfo = getNamespaceState(nsp1); assertEquals(1, stateInfo.getTables().size()); assertEquals(1, stateInfo.getRegionCount()); restartMaster(); - admin.split(tableOne, Bytes.toBytes("500")); + ADMIN.split(tableOne, Bytes.toBytes("500")); HRegion actualRegion = UTIL.getHBaseCluster().getRegions(tableOne).get(0); CustomObserver observer = (CustomObserver) actualRegion.getCoprocessorHost().findCoprocessor( CustomObserver.class.getName()); assertNotNull(observer); observer.postSplit.await(); - assertEquals(2, admin.getTableRegions(tableOne).size()); + assertEquals(2, ADMIN.getTableRegions(tableOne).size()); actualRegion = UTIL.getHBaseCluster().getRegions(tableOne).get(0); observer = (CustomObserver) actualRegion.getCoprocessorHost().findCoprocessor( CustomObserver.class.getName()); assertNotNull(observer); - admin.split(tableOne, getSplitKey(actualRegion.getStartKey(), actualRegion.getEndKey())); + ADMIN.split(tableOne, getSplitKey(actualRegion.getRegionInfo().getStartKey(), + actualRegion.getRegionInfo().getEndKey())); observer.postSplit.await(); // Make sure no regions have been added. - List hris = admin.getTableRegions(tableOne); + List hris = ADMIN.getTableRegions(tableOne); assertEquals(2, hris.size()); assertTrue("split completed", observer.preSplitBeforePONR.getCount() == 1); @@ -433,18 +515,18 @@ public class TestNamespaceAuditor { NamespaceDescriptor nspDesc = NamespaceDescriptor.create(nsp1) .addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "20") .addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "10").build(); - admin.createNamespace(nspDesc); + ADMIN.createNamespace(nspDesc); TableName tableOne = TableName.valueOf(nsp1 + TableName.NAMESPACE_DELIM + "table1"); TableName tableTwo = TableName.valueOf(nsp1 + TableName.NAMESPACE_DELIM + "table2"); TableName tableThree = TableName.valueOf(nsp1 + TableName.NAMESPACE_DELIM + "table3"); HTableDescriptor tableDescOne = new HTableDescriptor(tableOne); HTableDescriptor tableDescTwo = new HTableDescriptor(tableTwo); HTableDescriptor tableDescThree = new HTableDescriptor(tableThree); - admin.createTable(tableDescOne, Bytes.toBytes("1"), Bytes.toBytes("1000"), 3); - admin.createTable(tableDescTwo, Bytes.toBytes("1"), Bytes.toBytes("1000"), 3); - admin.createTable(tableDescThree, Bytes.toBytes("1"), Bytes.toBytes("1000"), 4); - admin.disableTable(tableThree); - admin.deleteTable(tableThree); + ADMIN.createTable(tableDescOne, Bytes.toBytes("1"), Bytes.toBytes("1000"), 3); + ADMIN.createTable(tableDescTwo, Bytes.toBytes("1"), Bytes.toBytes("1000"), 3); + ADMIN.createTable(tableDescThree, Bytes.toBytes("1"), Bytes.toBytes("1000"), 4); + ADMIN.disableTable(tableThree); + deleteTable(tableThree); // wait for chore to complete UTIL.waitFor(1000, new Waiter.Predicate() { @Override @@ -459,21 +541,72 @@ public class TestNamespaceAuditor { .getTables().size(), after.getTables().size()); } - private void restartMaster() throws Exception { - UTIL.getHBaseCluster().getMaster().stop("Stopping to start again"); - UTIL.getHBaseCluster().startMaster(); - Thread.sleep(60000); + private static void waitForQuotaEnabled() throws Exception { UTIL.waitFor(60000, new Waiter.Predicate() { @Override public boolean evaluate() throws Exception { - return UTIL.getHBaseCluster().getMaster().getMasterQuotaManager().isQuotaEnabled(); + HMaster master = UTIL.getHBaseCluster().getMaster(); + if (master == null) { + return false; + } + MasterQuotaManager quotaManager = master.getMasterQuotaManager(); + return quotaManager != null && quotaManager.isQuotaEnabled(); } }); } + private void restartMaster() throws Exception { + UTIL.getHBaseCluster().getMaster(0).stop("Stopping to start again"); + UTIL.getHBaseCluster().waitOnMaster(0); + UTIL.getHBaseCluster().startMaster(); + waitForQuotaEnabled(); + } + private NamespaceAuditor getQuotaManager() { return UTIL.getHBaseCluster().getMaster() .getMasterQuotaManager().getNamespaceQuotaManager(); } + public static class MasterSyncObserver extends BaseMasterObserver { + volatile CountDownLatch tableDeletionLatch; + + @Override + public void preDeleteTable(ObserverContext ctx, + TableName tableName) throws IOException { + tableDeletionLatch = new CountDownLatch(1); + } + + @Override + public void postDeleteTableHandler( + final ObserverContext ctx, TableName tableName) + throws IOException { + tableDeletionLatch.countDown(); + } + } + + private void deleteTable(final TableName tableName) throws Exception { + // NOTE: We need a latch because admin is not sync, + // so the postOp coprocessor method may be called after the admin operation returned. + MasterSyncObserver observer = (MasterSyncObserver)UTIL.getHBaseCluster().getMaster() + .getMasterCoprocessorHost().findCoprocessor(MasterSyncObserver.class.getName()); + ADMIN.deleteTable(tableName); + observer.tableDeletionLatch.await(); + } + + @Test(expected = QuotaExceededException.class, timeout = 30000) + public void testExceedTableQuotaInNamespace() throws Exception { + String nsp = prefix + "_testExceedTableQuotaInNamespace"; + NamespaceDescriptor nspDesc = + NamespaceDescriptor.create(nsp).addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "1") + .build(); + ADMIN.createNamespace(nspDesc); + assertNotNull("Namespace descriptor found null.", ADMIN.getNamespaceDescriptor(nsp)); + assertEquals(ADMIN.listNamespaceDescriptors().length, 3); + HTableDescriptor tableDescOne = + new HTableDescriptor(TableName.valueOf(nsp + TableName.NAMESPACE_DELIM + "table1")); + HTableDescriptor tableDescTwo = + new HTableDescriptor(TableName.valueOf(nsp + TableName.NAMESPACE_DELIM + "table2")); + ADMIN.createTable(tableDescOne); + ADMIN.createTable(tableDescTwo, Bytes.toBytes("AAA"), Bytes.toBytes("ZZZ"), 4); + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaThrottle.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaThrottle.java index 0901d2f3f93..6da18d9eedf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaThrottle.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaThrottle.java @@ -37,7 +37,7 @@ import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper; -import org.apache.hadoop.hbase.util.IncrementingEnvironmentEdge; +import org.apache.hadoop.hbase.util.ManualEnvironmentEdge; import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; import org.apache.hadoop.security.UserGroupInformation; @@ -51,10 +51,11 @@ import static org.junit.Assert.assertEquals; @Category({RegionServerTests.class, MediumTests.class}) public class TestQuotaThrottle { - final Log LOG = LogFactory.getLog(getClass()); + private final static Log LOG = LogFactory.getLog(TestQuotaThrottle.class); + + private final static int REFRESH_TIME = 30 * 60000; private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - private final static byte[] FAMILY = Bytes.toBytes("cf"); private final static byte[] QUALIFIER = Bytes.toBytes("q"); @@ -64,11 +65,13 @@ public class TestQuotaThrottle { TableName.valueOf("TestQuotaAdmin2") }; + private static ManualEnvironmentEdge envEdge; private static HTable[] tables; @BeforeClass public static void setUpBeforeClass() throws Exception { TEST_UTIL.getConfiguration().setBoolean(QuotaUtil.QUOTA_CONF_KEY, true); + TEST_UTIL.getConfiguration().setInt(QuotaCache.REFRESH_CONF_KEY, REFRESH_TIME); TEST_UTIL.getConfiguration().setInt("hbase.hstore.compactionThreshold", 10); TEST_UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100); TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 250); @@ -82,10 +85,15 @@ public class TestQuotaThrottle { for (int i = 0; i < TABLE_NAMES.length; ++i) { tables[i] = TEST_UTIL.createTable(TABLE_NAMES[i], FAMILY); } + + envEdge = new ManualEnvironmentEdge(); + envEdge.setValue(EnvironmentEdgeManager.currentTime()); + EnvironmentEdgeManagerTestHelper.injectEdge(envEdge); } @AfterClass public static void tearDownAfterClass() throws Exception { + EnvironmentEdgeManager.reset(); for (int i = 0; i < tables.length; ++i) { if (tables[i] != null) { tables[i].close(); @@ -375,12 +383,12 @@ public class TestQuotaThrottle { private void triggerCacheRefresh(boolean bypass, boolean userLimiter, boolean tableLimiter, boolean nsLimiter, final TableName... tables) throws Exception { + envEdge.incValue(2 * REFRESH_TIME); for (RegionServerThread rst: TEST_UTIL.getMiniHBaseCluster().getRegionServerThreads()) { RegionServerQuotaManager quotaManager = rst.getRegionServer().getRegionServerQuotaManager(); QuotaCache quotaCache = quotaManager.getQuotaCache(); quotaCache.triggerCacheRefresh(); - Thread.sleep(250); for (TableName table: tables) { quotaCache.getTableLimiter(table); @@ -388,6 +396,7 @@ public class TestQuotaThrottle { boolean isUpdated = false; while (!isUpdated) { + quotaCache.triggerCacheRefresh(); isUpdated = true; for (TableName table: tables) { boolean isBypass = true; @@ -401,8 +410,8 @@ public class TestQuotaThrottle { isBypass &= quotaCache.getNamespaceLimiter(table.getNamespaceAsString()).isBypass(); } if (isBypass != bypass) { + envEdge.incValue(100); isUpdated = false; - Thread.sleep(250); break; } } @@ -416,8 +425,6 @@ public class TestQuotaThrottle { } private void waitMinuteQuota() { - EnvironmentEdgeManagerTestHelper.injectEdge( - new IncrementingEnvironmentEdge( - EnvironmentEdgeManager.currentTime() + 70000)); + envEdge.incValue(70000); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java index 1927334d8c0..6544c72b320 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java @@ -49,7 +49,7 @@ import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFileBlock; import org.apache.hadoop.hbase.io.hfile.HFileContext; import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; -import org.apache.hadoop.hbase.io.hfile.HFileReaderV2; +import org.apache.hadoop.hbase.io.hfile.HFileReaderImpl; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.io.compress.CompressionOutputStream; import org.apache.hadoop.io.compress.Compressor; @@ -602,8 +602,9 @@ public class DataBlockEncodingTool { // run the utilities DataBlockEncodingTool comp = new DataBlockEncodingTool(compressionName); int majorVersion = reader.getHFileVersion(); - comp.useHBaseChecksum = majorVersion > 2 - || (majorVersion == 2 && reader.getHFileMinorVersion() >= HFileReaderV2.MINOR_VERSION_WITH_CHECKSUM); + comp.useHBaseChecksum = majorVersion > 2 || + (majorVersion == 2 && + reader.getHFileMinorVersion() >= HFileReaderImpl.MINOR_VERSION_WITH_CHECKSUM); comp.checkStatistics(scanner, kvLimit); if (doVerify) { comp.verifyCodecs(scanner, kvLimit); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperStub.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperStub.java index 0d28e544726..67fcf963a88 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperStub.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperStub.java @@ -130,6 +130,11 @@ public class MetricsRegionServerWrapperStub implements MetricsRegionServerWrappe return 99; } + @Override + public int getPercentFileLocalSecondaryRegions() { + return 99; + } + @Override public int getCompactionQueueSize() { return 411; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/NoOpScanPolicyObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/NoOpScanPolicyObserver.java index 36493cd6ee1..d5e022144d7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/NoOpScanPolicyObserver.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/NoOpScanPolicyObserver.java @@ -74,7 +74,7 @@ public class NoOpScanPolicyObserver extends BaseRegionObserver { public KeyValueScanner preStoreScannerOpen(final ObserverContext c, Store store, final Scan scan, final NavigableSet targetCols, KeyValueScanner s) throws IOException { - HRegion r = c.getEnvironment().getRegion(); + Region r = c.getEnvironment().getRegion(); return scan.isReversed() ? new ReversedStoreScanner(store, store.getScanInfo(), scan, targetCols, r.getReadpoint(scan .getIsolationLevel())) : new StoreScanner(store, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java index 192c9898029..66e19523de6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java @@ -60,10 +60,10 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.filter.BinaryComparator; import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; import org.apache.hadoop.hbase.io.HeapSize; -import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.VerySlowRegionServerTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.wal.WAL; import org.junit.After; import org.junit.Before; import org.junit.Rule; @@ -80,7 +80,7 @@ public class TestAtomicOperation { static final Log LOG = LogFactory.getLog(TestAtomicOperation.class); @Rule public TestName name = new TestName(); - HRegion region = null; + Region region = null; private HBaseTestingUtility TEST_UTIL = HBaseTestingUtility.createLocalHTU(); // Test names @@ -101,7 +101,7 @@ public class TestAtomicOperation { @After public void teardown() throws IOException { if (region != null) { - region.close(); + ((HRegion)region).close(); region = null; } } @@ -125,11 +125,11 @@ public class TestAtomicOperation { a.setReturnResults(false); a.add(fam1, qual1, Bytes.toBytes(v1)); a.add(fam1, qual2, Bytes.toBytes(v2)); - assertNull(region.append(a)); + assertNull(region.append(a, HConstants.NO_NONCE, HConstants.NO_NONCE)); a = new Append(row); a.add(fam1, qual1, Bytes.toBytes(v2)); a.add(fam1, qual2, Bytes.toBytes(v1)); - Result result = region.append(a); + Result result = region.append(a, HConstants.NO_NONCE, HConstants.NO_NONCE); assertEquals(0, Bytes.compareTo(Bytes.toBytes(v1+v2), result.getValue(fam1, qual1))); assertEquals(0, Bytes.compareTo(Bytes.toBytes(v2+v1), result.getValue(fam1, qual2))); } @@ -216,12 +216,12 @@ public class TestAtomicOperation { */ public static class Incrementer extends Thread { - private final HRegion region; + private final Region region; private final int numIncrements; private final int amount; - public Incrementer(HRegion region, + public Incrementer(Region region, int threadNumber, int amount, int numIncrements) { this.region = region; this.numIncrements = numIncrements; @@ -238,7 +238,7 @@ public class TestAtomicOperation { inc.addColumn(fam1, qual2, amount*2); inc.addColumn(fam2, qual3, amount*3); inc.setDurability(Durability.ASYNC_WAL); - region.increment(inc); + region.increment(inc, HConstants.NO_NONCE, HConstants.NO_NONCE); // verify: Make sure we only see completed increments Get g = new Get(row); @@ -276,7 +276,7 @@ public class TestAtomicOperation { a.add(fam1, qual2, val); a.add(fam2, qual3, val); a.setDurability(Durability.ASYNC_WAL); - region.append(a); + region.append(a, HConstants.NO_NONCE, HConstants.NO_NONCE); Get g = new Get(row); Result result = region.get(g); @@ -340,9 +340,9 @@ public class TestAtomicOperation { if (i%10==0) { synchronized(region) { LOG.debug("flushing"); - region.flushcache(); + region.flush(true); if (i%100==0) { - region.compactStores(); + region.compact(false); } } } @@ -433,9 +433,9 @@ public class TestAtomicOperation { if (i%10==0) { synchronized(region) { LOG.debug("flushing"); - region.flushcache(); + region.flush(true); if (i%100==0) { - region.compactStores(); + region.compact(false); } } } @@ -460,13 +460,14 @@ public class TestAtomicOperation { p.add(fam1, qual1, value2); mrm.add(p); } - region.mutateRowsWithLocks(mrm, rowsToLock); + region.mutateRowsWithLocks(mrm, rowsToLock, HConstants.NO_NONCE, HConstants.NO_NONCE); op ^= true; // check: should always see exactly one column Scan s = new Scan(row); RegionScanner rs = region.getScanner(s); List r = new ArrayList(); - while(rs.next(r)); + while (rs.next(r)) + ; rs.close(); if (r.size() != 1) { LOG.debug(r); @@ -499,13 +500,13 @@ public class TestAtomicOperation { } public static class AtomicOperation extends Thread { - protected final HRegion region; + protected final Region region; protected final int numOps; protected final AtomicLong timeStamps; protected final AtomicInteger failures; protected final Random r = new Random(); - public AtomicOperation(HRegion region, int numOps, AtomicLong timeStamps, + public AtomicOperation(Region region, int numOps, AtomicLong timeStamps, AtomicInteger failures) { this.region = region; this.numOps = numOps; @@ -540,14 +541,14 @@ public class TestAtomicOperation { conf.setClass(HConstants.REGION_IMPL, MockHRegion.class, HeapSize.class); HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName)) .addFamily(new HColumnDescriptor(family)); - final MockHRegion region = (MockHRegion) TEST_UTIL.createLocalHRegion(htd, null, null); - + final Region region = TEST_UTIL.createLocalHRegion(htd, null, null); + Put[] puts = new Put[1]; Put put = new Put(Bytes.toBytes("r1")); put.add(Bytes.toBytes(family), Bytes.toBytes("q1"), Bytes.toBytes("10")); puts[0] = put; - region.batchMutate(puts); + region.batchMutate(puts, HConstants.NO_NONCE, HConstants.NO_NONCE); MultithreadedTestUtil.TestContext ctx = new MultithreadedTestUtil.TestContext(conf); ctx.addThread(new PutThread(ctx, region)); @@ -560,7 +561,8 @@ public class TestAtomicOperation { Scan s = new Scan(); RegionScanner scanner = region.getScanner(s); List results = new ArrayList(); - scanner.next(results, 2); + ScannerContext scannerContext = ScannerContext.newBuilder().setBatchLimit(2).build(); + scanner.next(results, scannerContext); for (Cell keyValue : results) { assertEquals("50",Bytes.toString(CellUtil.cloneValue(keyValue))); } @@ -568,8 +570,8 @@ public class TestAtomicOperation { } private class PutThread extends TestThread { - private MockHRegion region; - PutThread(TestContext ctx, MockHRegion region) { + private Region region; + PutThread(TestContext ctx, Region region) { super(ctx); this.region = region; } @@ -580,13 +582,13 @@ public class TestAtomicOperation { put.add(Bytes.toBytes(family), Bytes.toBytes("q1"), Bytes.toBytes("50")); puts[0] = put; testStep = TestStep.PUT_STARTED; - region.batchMutate(puts); + region.batchMutate(puts, HConstants.NO_NONCE, HConstants.NO_NONCE); } } private class CheckAndPutThread extends TestThread { - private MockHRegion region; - CheckAndPutThread(TestContext ctx, MockHRegion region) { + private Region region; + CheckAndPutThread(TestContext ctx, Region region) { super(ctx); this.region = region; } @@ -621,10 +623,10 @@ public class TestAtomicOperation { return new WrappedRowLock(super.getRowLockInternal(row, waitForLock)); } - public class WrappedRowLock extends RowLock { + public class WrappedRowLock extends RowLockImpl { private WrappedRowLock(RowLock rowLock) { - super(rowLock.context); + setContext(((RowLockImpl)rowLock).getContext()); } @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java index 1ae17ed6ea2..4a3d35236d4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java @@ -69,7 +69,7 @@ public class TestBlocksRead extends HBaseTestCase { return conf; } - HRegion region = null; + Region region = null; private HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); private final String DIR = TEST_UTIL.getDataTestDir("TestBlocksRead").toString(); @@ -98,7 +98,7 @@ public class TestBlocksRead extends HBaseTestCase { * @throws IOException * @return created and initialized region. */ - private HRegion initHRegion(byte[] tableName, String callingMethod, + private Region initHRegion(byte[] tableName, String callingMethod, HBaseConfiguration conf, String family) throws IOException { HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName)); HColumnDescriptor familyDesc; @@ -112,7 +112,7 @@ public class TestBlocksRead extends HBaseTestCase { HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false); Path path = new Path(DIR + callingMethod); - HRegion r = HBaseTestingUtility.createRegionAndWAL(info, path, conf, htd); + Region r = HBaseTestingUtility.createRegionAndWAL(info, path, conf, htd); blockCache = new CacheConfig(conf).getBlockCache(); return r; } @@ -234,7 +234,7 @@ public class TestBlocksRead extends HBaseTestCase { putData(FAMILY, "row", "col5", 5); putData(FAMILY, "row", "col6", 6); putData(FAMILY, "row", "col7", 7); - region.flushcache(); + region.flush(true); // Expected block reads: 1 // The top block has the KV we are @@ -271,7 +271,7 @@ public class TestBlocksRead extends HBaseTestCase { } /** - * Test # of blocks read (targetted at some of the cases Lazy Seek optimizes). + * Test # of blocks read (targeted at some of the cases Lazy Seek optimizes). * * @throws Exception */ @@ -287,12 +287,12 @@ public class TestBlocksRead extends HBaseTestCase { // File 1 putData(FAMILY, "row", "col1", 1); putData(FAMILY, "row", "col2", 2); - region.flushcache(); + region.flush(true); // File 2 putData(FAMILY, "row", "col1", 3); putData(FAMILY, "row", "col2", 4); - region.flushcache(); + region.flush(true); // Expected blocks read: 1. // File 2's top block is also the KV we are @@ -312,7 +312,7 @@ public class TestBlocksRead extends HBaseTestCase { // File 3: Add another column putData(FAMILY, "row", "col3", 5); - region.flushcache(); + region.flush(true); // Expected blocks read: 1 // File 3's top block has the "col3" KV we are @@ -331,7 +331,7 @@ public class TestBlocksRead extends HBaseTestCase { // File 4: Delete the entire row. deleteFamily(FAMILY, "row", 6); - region.flushcache(); + region.flush(true); // For ROWCOL Bloom filter: Expected blocks read: 2. // For ROW Bloom filter: Expected blocks read: 3. @@ -347,28 +347,28 @@ public class TestBlocksRead extends HBaseTestCase { // File 5: Delete deleteFamily(FAMILY, "row", 10); - region.flushcache(); + region.flush(true); // File 6: some more puts, but with timestamps older than the // previous delete. putData(FAMILY, "row", "col1", 7); putData(FAMILY, "row", "col2", 8); putData(FAMILY, "row", "col3", 9); - region.flushcache(); + region.flush(true); - // Baseline expected blocks read: 8. [HBASE-4532] - kvs = getData(FAMILY, "row", Arrays.asList("col1", "col2", "col3"), 5); + // Baseline expected blocks read: 6. [HBASE-4532] + kvs = getData(FAMILY, "row", Arrays.asList("col1", "col2", "col3"), 6, 7, 7); assertEquals(0, kvs.length); // File 7: Put back new data putData(FAMILY, "row", "col1", 11); putData(FAMILY, "row", "col2", 12); putData(FAMILY, "row", "col3", 13); - region.flushcache(); + region.flush(true); - // Expected blocks read: 5. [HBASE-4585] - kvs = getData(FAMILY, "row", Arrays.asList("col1", "col2", "col3"), 5); + // Expected blocks read: 8. [HBASE-4585, HBASE-13109] + kvs = getData(FAMILY, "row", Arrays.asList("col1", "col2", "col3"), 8, 9, 9); assertEquals(3, kvs.length); verifyData(kvs[0], "row", "col1", 11); verifyData(kvs[1], "row", "col2", 12); @@ -394,7 +394,7 @@ public class TestBlocksRead extends HBaseTestCase { try { putData(FAMILY, "row", "col1", 1); putData(FAMILY, "row", "col2", 2); - region.flushcache(); + region.flush(true); // Execute a scan with caching turned off // Expected blocks stored: 0 @@ -441,7 +441,7 @@ public class TestBlocksRead extends HBaseTestCase { putData(FAMILY, "row", "col" + i, i); } putData(FAMILY, "row", "col99", 201); - region.flushcache(); + region.flush(true); kvs = getData(FAMILY, "row", Arrays.asList("col0"), 2); assertEquals(0, kvs.length); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksScanned.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksScanned.java index 25330a88d9d..b2ba97c9176 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksScanned.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksScanned.java @@ -25,14 +25,14 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValueUtil; -import org.apache.hadoop.hbase.testclassification.RegionServerTests; -import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.CacheStats; +import org.apache.hadoop.hbase.testclassification.RegionServerTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; import org.junit.Assert; import org.junit.Before; @@ -89,10 +89,9 @@ public class TestBlocksScanned extends HBaseTestCase { } private void _testBlocksScanned(HTableDescriptor table) throws Exception { - HRegion r = createNewHRegion(table, START_KEY, END_KEY, - TEST_UTIL.getConfiguration()); + Region r = createNewHRegion(table, START_KEY, END_KEY, TEST_UTIL.getConfiguration()); addContent(r, FAMILY, COL); - r.flushcache(); + r.flush(true); CacheStats stats = new CacheConfig(TEST_UTIL.getConfiguration()).getBlockCache().getStats(); long before = stats.getHitCount() + stats.getMissCount(); @@ -103,7 +102,8 @@ public class TestBlocksScanned extends HBaseTestCase { InternalScanner s = r.getScanner(scan); List results = new ArrayList(); - while (s.next(results)); + while (s.next(results)) + ; s.close(); int expectResultSize = 'z' - 'a'; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoad.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoad.java index 15dbef579ae..aa57e2230d4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoad.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoad.java @@ -90,7 +90,7 @@ public class TestBulkLoad { private final Expectations callOnce; @Rule public TestName name = new TestName(); - + public TestBulkLoad() throws IOException { callOnce = new Expectations() { { @@ -130,25 +130,26 @@ public class TestBulkLoad { }; context.checking(expection); testRegionWithFamiliesAndSpecifiedTableName(tableName, family1) - .bulkLoadHFiles(familyPaths, false); + .bulkLoadHFiles(familyPaths, false, null); } @Test public void bulkHLogShouldThrowNoErrorAndWriteMarkerWithBlankInput() throws IOException { - testRegionWithFamilies(family1).bulkLoadHFiles(new ArrayList>(), false); + testRegionWithFamilies(family1).bulkLoadHFiles(new ArrayList>(), + false, null); } @Test public void shouldBulkLoadSingleFamilyHLog() throws IOException { context.checking(callOnce); - testRegionWithFamilies(family1).bulkLoadHFiles(withFamilyPathsFor(family1), false); + testRegionWithFamilies(family1).bulkLoadHFiles(withFamilyPathsFor(family1), false, null); } @Test public void shouldBulkLoadManyFamilyHLog() throws IOException { context.checking(callOnce); testRegionWithFamilies(family1, family2).bulkLoadHFiles(withFamilyPathsFor(family1, family2), - false); + false, null); } @Test @@ -156,31 +157,32 @@ public class TestBulkLoad { context.checking(callOnce); TableName tableName = TableName.valueOf("test", "test"); testRegionWithFamiliesAndSpecifiedTableName(tableName, family1, family2) - .bulkLoadHFiles(withFamilyPathsFor(family1, family2), false); + .bulkLoadHFiles(withFamilyPathsFor(family1, family2), false, null); } @Test(expected = DoNotRetryIOException.class) public void shouldCrashIfBulkLoadFamiliesNotInTable() throws IOException { - testRegionWithFamilies(family1).bulkLoadHFiles(withFamilyPathsFor(family1, family2), false); + testRegionWithFamilies(family1).bulkLoadHFiles(withFamilyPathsFor(family1, family2), false, + null); } @Test(expected = DoNotRetryIOException.class) public void bulkHLogShouldThrowErrorWhenFamilySpecifiedAndHFileExistsButNotInTableDescriptor() throws IOException { - testRegionWithFamilies().bulkLoadHFiles(withFamilyPathsFor(family1), false); + testRegionWithFamilies().bulkLoadHFiles(withFamilyPathsFor(family1), false, null); } @Test(expected = DoNotRetryIOException.class) public void shouldThrowErrorIfBadFamilySpecifiedAsFamilyPath() throws IOException { testRegionWithFamilies() .bulkLoadHFiles(asList(withInvalidColumnFamilyButProperHFileLocation(family1)), - false); + false, null); } @Test(expected = FileNotFoundException.class) public void shouldThrowErrorIfHFileDoesNotExist() throws IOException { List> list = asList(withMissingHFileForFamily(family1)); - testRegionWithFamilies(family1).bulkLoadHFiles(list, false); + testRegionWithFamilies(family1).bulkLoadHFiles(list, false, null); } private Pair withMissingHFileForFamily(byte[] family) { @@ -233,17 +235,24 @@ public class TestBulkLoad { HFile.WriterFactory hFileFactory = HFile.getWriterFactoryNoCache(conf); // TODO We need a way to do this without creating files File hFileLocation = testFolder.newFile(); - hFileFactory.withOutputStream(new FSDataOutputStream(new FileOutputStream(hFileLocation))); - hFileFactory.withFileContext(new HFileContext()); - HFile.Writer writer = hFileFactory.create(); - - writer.append(new KeyValue(CellUtil.createCell(randomBytes, - family, - randomBytes, - 0l, - KeyValue.Type.Put.getCode(), - randomBytes))); - writer.close(); + FSDataOutputStream out = new FSDataOutputStream(new FileOutputStream(hFileLocation)); + try { + hFileFactory.withOutputStream(out); + hFileFactory.withFileContext(new HFileContext()); + HFile.Writer writer = hFileFactory.create(); + try { + writer.append(new KeyValue(CellUtil.createCell(randomBytes, + family, + randomBytes, + 0l, + KeyValue.Type.Put.getCode(), + randomBytes))); + } finally { + writer.close(); + } + } finally { + out.close(); + } return hFileLocation.getAbsoluteFile().getAbsolutePath(); } @@ -286,7 +295,7 @@ public class TestBulkLoad { assertNotNull(desc); if (tableName != null) { - assertTrue(Bytes.equals(ProtobufUtil.toTableName(desc.getTableName()).getName(), + assertTrue(Bytes.equals(ProtobufUtil.toTableName(desc.getTableName()).getName(), tableName)); } @@ -296,11 +305,8 @@ public class TestBulkLoad { assertTrue(Bytes.equals(store.getFamilyName().toByteArray(), familyName)); assertTrue(Bytes.equals(Bytes.toBytes(store.getStoreHomeDir()), familyName)); assertEquals(storeFileNames.size(), store.getStoreFileCount()); - for (String storeFile : store.getStoreFileList()) { - assertTrue(storeFile.equals(storeFileNames.get(index++))); - } } - + return true; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java index dc142d6b0c6..b7ebd23865f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java @@ -48,7 +48,6 @@ import org.apache.hadoop.hbase.io.hfile.BlockType; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.hfile.HFileBlock; -import org.apache.hadoop.hbase.io.hfile.HFileReaderV2; import org.apache.hadoop.hbase.io.hfile.HFileScanner; import org.apache.hadoop.hbase.io.hfile.TestHFileWriterV2; import org.apache.hadoop.hbase.wal.DefaultWALProvider; @@ -221,7 +220,7 @@ public class TestCacheOnWriteInSchema { BlockCache cache = cacheConf.getBlockCache(); StoreFile sf = new StoreFile(fs, path, conf, cacheConf, BloomType.ROWCOL); - HFileReaderV2 reader = (HFileReaderV2) sf.createReader().getHFileReader(); + HFile.Reader reader = sf.createReader().getHFileReader(); try { // Open a scanner with (on read) caching disabled HFileScanner scanner = reader.getScanner(false, false); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java index 7632a41dfd7..1d5c61b387a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java @@ -19,7 +19,8 @@ package org.apache.hadoop.hbase.regionserver; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; import java.io.IOException; import java.util.ArrayList; @@ -31,10 +32,17 @@ import java.util.Set; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.*; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.KeyValueTestUtil; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; @@ -65,7 +73,7 @@ public class TestColumnSeeking { htd.addFamily(hcd); HRegionInfo info = new HRegionInfo(table, null, null, false); // Set this so that the archiver writes to the temp dir as well. - HRegion region = TEST_UTIL.createLocalHRegion(info, htd); + Region region = TEST_UTIL.createLocalHRegion(info, htd); try { List rows = generateRandomWords(10, "row"); List allColumns = generateRandomWords(10, "column"); @@ -116,17 +124,17 @@ public class TestColumnSeeking { region.put(p); if (Math.random() < flushPercentage) { LOG.info("Flushing... "); - region.flushcache(); + region.flush(true); } if (Math.random() < minorPercentage) { LOG.info("Minor compacting... "); - region.compactStores(false); + region.compact(false); } if (Math.random() < majorPercentage) { LOG.info("Major compacting... "); - region.compactStores(true); + region.compact(true); } } } @@ -177,7 +185,7 @@ public class TestColumnSeeking { htd.addFamily(hcd); HRegionInfo info = new HRegionInfo(table, null, null, false); - HRegion region = TEST_UTIL.createLocalHRegion(info, htd); + Region region = TEST_UTIL.createLocalHRegion(info, htd); List rows = generateRandomWords(10, "row"); List allColumns = generateRandomWords(100, "column"); @@ -229,17 +237,17 @@ public class TestColumnSeeking { region.put(p); if (Math.random() < flushPercentage) { LOG.info("Flushing... "); - region.flushcache(); + region.flush(true); } if (Math.random() < minorPercentage) { LOG.info("Minor compacting... "); - region.compactStores(false); + region.compact(false); } if (Math.random() < majorPercentage) { LOG.info("Major compacting... "); - region.compactStores(true); + region.compact(true); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java index 64668adb5db..9d1136e0862 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java @@ -189,7 +189,7 @@ public class TestCompaction { delete.deleteFamily(famAndQf[0]); r.delete(delete); } - r.flushcache(); + r.flush(true); // Multiple versions allowed for an entry, so the delete isn't enough // Lower TTL and expire to ensure that all our entries have been wiped @@ -204,7 +204,7 @@ public class TestCompaction { } Thread.sleep(ttl); - r.compactStores(true); + r.compact(true); assertEquals(0, count()); } } @@ -318,7 +318,7 @@ public class TestCompaction { CountDownLatch latch = new CountDownLatch(numStores); // create some store files and setup requests for each store on which we want to do a // compaction - for (Store store : r.getStores().values()) { + for (Store store : r.getStores()) { createStoreFile(r, store.getColumnFamilyName()); createStoreFile(r, store.getColumnFamilyName()); createStoreFile(r, store.getColumnFamilyName()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionState.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionState.java index 3f5f9058be7..d9894a5eef5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionState.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionState.java @@ -133,7 +133,7 @@ public class TestCompactionState { ht = TEST_UTIL.createTable(table, families); loadData(ht, families, 3000, flushes); HRegionServer rs = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0); - List regions = rs.getOnlineRegions(table); + List regions = rs.getOnlineRegions(table); int countBefore = countStoreFilesInFamilies(regions, families); int countBeforeSingleFamily = countStoreFilesInFamily(regions, family); assertTrue(countBefore > 0); // there should be some data files @@ -163,7 +163,7 @@ public class TestCompactionState { // Now, should have the right compaction state, // otherwise, the compaction should have already been done if (expectedState != state) { - for (HRegion region: regions) { + for (Region region: regions) { state = region.getCompactionState(); assertEquals(CompactionState.NONE, state); } @@ -201,13 +201,13 @@ public class TestCompactionState { } private static int countStoreFilesInFamily( - List regions, final byte[] family) { + List regions, final byte[] family) { return countStoreFilesInFamilies(regions, new byte[][]{family}); } - private static int countStoreFilesInFamilies(List regions, final byte[][] families) { + private static int countStoreFilesInFamilies(List regions, final byte[][] families) { int count = 0; - for (HRegion region: regions) { + for (Region region: regions) { count += region.getStoreFileList(families).size(); } return count; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java index 6054cf57ea2..622c14550eb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java @@ -24,7 +24,6 @@ import java.lang.management.MemoryMXBean; import java.util.ArrayList; import java.util.Arrays; import java.util.List; -import java.util.Map; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; @@ -48,9 +47,9 @@ import org.apache.hadoop.hbase.KeyValueTestUtil; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.TableDescriptor; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; -import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdge; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; @@ -842,12 +841,16 @@ public class TestDefaultMemStore extends TestCase { this.memstore.upsert(l, 2);// readpoint is 2 long newSize = this.memstore.size.get(); assert(newSize > oldSize); - + //The kv1 should be removed. + assert(memstore.cellSet.size() == 2); + KeyValue kv4 = KeyValueTestUtil.create("r", "f", "q", 104, "v"); kv4.setSequenceId(1); l.clear(); l.add(kv4); this.memstore.upsert(l, 3); assertEquals(newSize, this.memstore.size.get()); + //The kv2 should be removed. + assert(memstore.cellSet.size() == 2); //this.memstore = null; } @@ -919,10 +922,10 @@ public class TestDefaultMemStore extends TestCase { HBaseTestingUtility hbaseUtility = HBaseTestingUtility.createLocalHTU(conf); HRegion region = hbaseUtility.createTestRegion("foobar", new HColumnDescriptor("foo")); - Map stores = region.getStores(); + List stores = region.getStores(); assertTrue(stores.size() == 1); - Store s = stores.entrySet().iterator().next().getValue(); + Store s = stores.iterator().next(); edge.setCurrentTimeMillis(1234); s.add(KeyValueTestUtil.create("r", "f", "q", 100, "v")); edge.setCurrentTimeMillis(1234 + 100); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDeleteMobTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDeleteMobTable.java index 028e60272f0..6dbcec0662a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDeleteMobTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDeleteMobTable.java @@ -26,12 +26,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.HBaseAdmin; -import org.apache.hadoop.hbase.client.HTable; -import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.client.ResultScanner; -import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.*; import org.apache.hadoop.hbase.mob.MobConstants; import org.apache.hadoop.hbase.mob.MobUtils; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -88,20 +83,19 @@ public class TestDeleteMobTable { hcd.setMobThreshold(0); htd.addFamily(hcd); HBaseAdmin admin = null; - HTable table = null; + Table table = null; try { - admin = new HBaseAdmin(TEST_UTIL.getConfiguration()); + admin = TEST_UTIL.getHBaseAdmin(); admin.createTable(htd); - table = new HTable(TEST_UTIL.getConfiguration(), tableName); + table = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()).getTable(tn); byte[] value = generateMobValue(10); byte[] row = Bytes.toBytes("row"); Put put = new Put(row); - put.add(FAMILY, QF, EnvironmentEdgeManager.currentTime(), value); + put.addColumn(FAMILY, QF, EnvironmentEdgeManager.currentTime(), value); table.put(put); - table.flushCommits(); - admin.flush(tableName); + admin.flush(tn); // the mob file exists Assert.assertEquals(1, countMobFiles(tn, hcd.getNameAsString())); @@ -134,20 +128,19 @@ public class TestDeleteMobTable { HColumnDescriptor hcd = new HColumnDescriptor(FAMILY); htd.addFamily(hcd); HBaseAdmin admin = null; - HTable table = null; + Table table = null; try { - admin = new HBaseAdmin(TEST_UTIL.getConfiguration()); + admin = TEST_UTIL.getHBaseAdmin(); admin.createTable(htd); - table = new HTable(TEST_UTIL.getConfiguration(), tableName); + table = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()).getTable(tn); byte[] value = generateMobValue(10); byte[] row = Bytes.toBytes("row"); Put put = new Put(row); - put.add(FAMILY, QF, EnvironmentEdgeManager.currentTime(), value); + put.addColumn(FAMILY, QF, EnvironmentEdgeManager.currentTime(), value); table.put(put); - table.flushCommits(); - admin.flush(tableName); + admin.flush(tn); table.close(); // the mob file doesn't exist @@ -205,7 +198,7 @@ public class TestDeleteMobTable { return fs.exists(new Path(storePath, fileName)); } - private String assertHasOneMobRow(HTable table, TableName tn, String familyName) + private String assertHasOneMobRow(Table table, TableName tn, String familyName) throws IOException { Scan scan = new Scan(); scan.setAttribute(MobConstants.MOB_SCAN_RAW, Bytes.toBytes(Boolean.TRUE)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionKeyRotation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionKeyRotation.java index af86b4e0621..c4ae2458e39 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionKeyRotation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionKeyRotation.java @@ -38,7 +38,6 @@ import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Waiter.Predicate; -import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.io.crypto.Encryption; @@ -67,9 +66,11 @@ public class TestEncryptionKeyRotation { SecureRandom rng = new SecureRandom(); byte[] keyBytes = new byte[AES.KEY_LENGTH]; rng.nextBytes(keyBytes); - initialCFKey = new SecretKeySpec(keyBytes, "AES"); + String algorithm = + conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); + initialCFKey = new SecretKeySpec(keyBytes, algorithm); rng.nextBytes(keyBytes); - secondCFKey = new SecretKeySpec(keyBytes, "AES"); + secondCFKey = new SecretKeySpec(keyBytes, algorithm); } @BeforeClass @@ -95,7 +96,9 @@ public class TestEncryptionKeyRotation { HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("default", "testCFKeyRotation")); HColumnDescriptor hcd = new HColumnDescriptor("cf"); - hcd.setEncryptionType("AES"); + String algorithm = + conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); + hcd.setEncryptionType(algorithm); hcd.setEncryptionKey(EncryptionUtil.wrapKey(conf, "hbase", initialCFKey)); htd.addFamily(hcd); @@ -154,7 +157,9 @@ public class TestEncryptionKeyRotation { HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("default", "testMasterKeyRotation")); HColumnDescriptor hcd = new HColumnDescriptor("cf"); - hcd.setEncryptionType("AES"); + String algorithm = + conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); + hcd.setEncryptionType(algorithm); hcd.setEncryptionKey(EncryptionUtil.wrapKey(conf, "hbase", initialCFKey)); htd.addFamily(hcd); @@ -191,9 +196,9 @@ public class TestEncryptionKeyRotation { private static List findStorefilePaths(TableName tableName) throws Exception { List paths = new ArrayList(); - for (HRegion region: + for (Region region: TEST_UTIL.getRSForFirstRegionInTable(tableName).getOnlineRegions(tableName)) { - for (Store store: region.getStores().values()) { + for (Store store: region.getStores()) { for (StoreFile storefile: store.getStorefiles()) { paths.add(storefile.getPath()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionRandomKeying.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionRandomKeying.java index 29a58a65ee7..0a6b2b57781 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionRandomKeying.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionRandomKeying.java @@ -32,7 +32,6 @@ import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.io.crypto.Encryption; @@ -54,9 +53,9 @@ public class TestEncryptionRandomKeying { private static List findStorefilePaths(TableName tableName) throws Exception { List paths = new ArrayList(); - for (HRegion region: + for (Region region: TEST_UTIL.getRSForFirstRegionInTable(tableName).getOnlineRegions(htd.getTableName())) { - for (Store store: region.getStores().values()) { + for (Store store: region.getStores()) { for (StoreFile storefile: store.getStorefiles()) { paths.add(storefile.getPath()); } @@ -92,7 +91,9 @@ public class TestEncryptionRandomKeying { // Specify an encryption algorithm without a key htd = new HTableDescriptor(TableName.valueOf("default", "TestEncryptionRandomKeying")); HColumnDescriptor hcd = new HColumnDescriptor("cf"); - hcd.setEncryptionType("AES"); + String algorithm = + conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); + hcd.setEncryptionType(algorithm); htd.addFamily(hcd); // Start the minicluster diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java index f29601cb8f2..be43950afae 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java @@ -25,10 +25,9 @@ import static org.junit.Assert.assertTrue; import java.io.IOException; import java.util.ArrayList; import java.util.List; -import java.util.Map; -import java.util.NavigableMap; import java.util.Random; import java.util.Set; +import java.util.TreeSet; import org.apache.commons.io.IOUtils; import org.apache.commons.logging.Log; @@ -38,19 +37,17 @@ import org.apache.hadoop.hbase.ChoreService; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.NotServingRegionException; import org.apache.hadoop.hbase.ScheduledChore; -import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.HConnection; -import org.apache.hadoop.hbase.client.HConnectionManager; import org.apache.hadoop.hbase.client.HTable; -import org.apache.hadoop.hbase.client.MetaScanner; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; @@ -61,7 +58,6 @@ import org.apache.hadoop.hbase.protobuf.RequestConverter; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos; -import org.apache.hadoop.hbase.testclassification.FlakeyTests; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; @@ -77,12 +73,11 @@ import com.google.common.collect.Iterators; import com.google.common.collect.Sets; import com.google.protobuf.ServiceException; -@Category({FlakeyTests.class, LargeTests.class}) -@SuppressWarnings("deprecation") +@Category(LargeTests.class) public class TestEndToEndSplitTransaction { private static final Log LOG = LogFactory.getLog(TestEndToEndSplitTransaction.class); private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - private static final Configuration conf = TEST_UTIL.getConfiguration(); + private static final Configuration CONF = TEST_UTIL.getConfiguration(); @BeforeClass public static void beforeAllTests() throws Exception { @@ -97,71 +92,70 @@ public class TestEndToEndSplitTransaction { @Test public void testMasterOpsWhileSplitting() throws Exception { - TableName tableName = - TableName.valueOf("TestSplit"); + TableName tableName = TableName.valueOf("TestSplit"); byte[] familyName = Bytes.toBytes("fam"); try (HTable ht = TEST_UTIL.createTable(tableName, familyName)) { TEST_UTIL.loadTable(ht, familyName, false); } HRegionServer server = TEST_UTIL.getHBaseCluster().getRegionServer(0); - byte []firstRow = Bytes.toBytes("aaa"); - byte []splitRow = Bytes.toBytes("lll"); - byte []lastRow = Bytes.toBytes("zzz"); - HConnection con = HConnectionManager - .getConnection(TEST_UTIL.getConfiguration()); - // this will also cache the region - byte[] regionName = con.locateRegion(tableName, splitRow).getRegionInfo() - .getRegionName(); - HRegion region = server.getRegion(regionName); - SplitTransaction split = new SplitTransaction(region, splitRow); - split.prepare(); + byte[] firstRow = Bytes.toBytes("aaa"); + byte[] splitRow = Bytes.toBytes("lll"); + byte[] lastRow = Bytes.toBytes("zzz"); + try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration())) { + // this will also cache the region + byte[] regionName = conn.getRegionLocator(tableName).getRegionLocation(splitRow) + .getRegionInfo().getRegionName(); + Region region = server.getRegion(regionName); + SplitTransactionImpl split = new SplitTransactionImpl((HRegion) region, splitRow); + split.prepare(); - // 1. phase I - PairOfSameType regions = split.createDaughters(server, server); - assertFalse(test(con, tableName, firstRow, server)); - assertFalse(test(con, tableName, lastRow, server)); + // 1. phase I + PairOfSameType regions = split.createDaughters(server, server); + assertFalse(test(conn, tableName, firstRow, server)); + assertFalse(test(conn, tableName, lastRow, server)); - // passing null as services prevents final step - // 2, most of phase II - split.openDaughters(server, null, regions.getFirst(), regions.getSecond()); - assertFalse(test(con, tableName, firstRow, server)); - assertFalse(test(con, tableName, lastRow, server)); + // passing null as services prevents final step + // 2, most of phase II + split.openDaughters(server, null, regions.getFirst(), regions.getSecond()); + assertFalse(test(conn, tableName, firstRow, server)); + assertFalse(test(conn, tableName, lastRow, server)); - // 3. finish phase II - // note that this replicates some code from SplitTransaction - // 2nd daughter first - server.reportRegionStateTransition( - RegionServerStatusProtos.RegionStateTransition.TransitionCode.SPLIT, - region.getRegionInfo(), regions.getFirst().getRegionInfo(), - regions.getSecond().getRegionInfo()); + // 3. finish phase II + // note that this replicates some code from SplitTransaction + // 2nd daughter first + server.reportRegionStateTransition( + RegionServerStatusProtos.RegionStateTransition.TransitionCode.SPLIT, + region.getRegionInfo(), regions.getFirst().getRegionInfo(), regions.getSecond() + .getRegionInfo()); - // Add to online regions - server.addToOnlineRegions(regions.getSecond()); - // THIS is the crucial point: - // the 2nd daughter was added, so querying before the split key should fail. - assertFalse(test(con, tableName, firstRow, server)); - // past splitkey is ok. - assertTrue(test(con, tableName, lastRow, server)); + // Add to online regions + server.addToOnlineRegions(regions.getSecond()); + // THIS is the crucial point: + // the 2nd daughter was added, so querying before the split key should fail. + assertFalse(test(conn, tableName, firstRow, server)); + // past splitkey is ok. + assertTrue(test(conn, tableName, lastRow, server)); - // Add to online regions - server.addToOnlineRegions(regions.getFirst()); - assertTrue(test(con, tableName, firstRow, server)); - assertTrue(test(con, tableName, lastRow, server)); + // Add to online regions + server.addToOnlineRegions(regions.getFirst()); + assertTrue(test(conn, tableName, firstRow, server)); + assertTrue(test(conn, tableName, lastRow, server)); - assertTrue(test(con, tableName, firstRow, server)); - assertTrue(test(con, tableName, lastRow, server)); + assertTrue(test(conn, tableName, firstRow, server)); + assertTrue(test(conn, tableName, lastRow, server)); + } } /** * attempt to locate the region and perform a get and scan * @return True if successful, False otherwise. */ - private boolean test(HConnection con, TableName tableName, byte[] row, + private boolean test(Connection conn, TableName tableName, byte[] row, HRegionServer server) { // not using HTable to avoid timeouts and retries try { - byte[] regionName = con.relocateRegion(tableName, row).getRegionInfo() - .getRegionName(); + byte[] regionName = conn.getRegionLocator(tableName).getRegionLocation(row, true) + .getRegionInfo().getRegionName(); // get and scan should now succeed without exception ClientProtos.GetRequest request = RequestConverter.buildGetRequest(regionName, new Get(row)); @@ -174,7 +168,7 @@ public class TestEndToEndSplitTransaction { } catch (ServiceException se) { throw ProtobufUtil.getRemoteException(se); } - } catch (IOException x) { + } catch (IOException e) { return false; } catch (ServiceException e) { return false; @@ -198,7 +192,7 @@ public class TestEndToEndSplitTransaction { Stoppable stopper = new StoppableImplementation(); RegionSplitter regionSplitter = new RegionSplitter(table); - RegionChecker regionChecker = new RegionChecker(conf, stopper, TABLENAME); + RegionChecker regionChecker = new RegionChecker(CONF, stopper, TABLENAME); final ChoreService choreService = new ChoreService("TEST_SERVER"); choreService.scheduleChore(regionChecker); @@ -209,11 +203,11 @@ public class TestEndToEndSplitTransaction { stopper.stop(null); if (regionChecker.ex != null) { - throw regionChecker.ex; + throw new AssertionError("regionChecker", regionChecker.ex); } if (regionSplitter.ex != null) { - throw regionSplitter.ex; + throw new AssertionError("regionSplitter", regionSplitter.ex); } //one final check @@ -243,15 +237,15 @@ public class TestEndToEndSplitTransaction { try { Random random = new Random(); for (int i= 0; i< 5; i++) { - NavigableMap regions = - MetaScanner.allTableRegions(connection, tableName); + List regions = + MetaTableAccessor.getTableRegions(connection, tableName, true); if (regions.size() == 0) { continue; } int regionIndex = random.nextInt(regions.size()); //pick a random region and split it into two - HRegionInfo region = Iterators.get(regions.keySet().iterator(), regionIndex); + HRegionInfo region = Iterators.get(regions.iterator(), regionIndex); //pick the mid split point int start = 0, end = Integer.MAX_VALUE; @@ -275,7 +269,7 @@ public class TestEndToEndSplitTransaction { try { admin.splitRegion(region.getRegionName(), splitPoint); //wait until the split is complete - blockUntilRegionSplit(conf, 50000, region.getRegionName(), true); + blockUntilRegionSplit(CONF, 50000, region.getRegionName(), true); } catch (NotServingRegionException ex) { //ignore @@ -290,7 +284,7 @@ public class TestEndToEndSplitTransaction { List puts = new ArrayList<>(); for (int i=start; i< start + 100; i++) { Put put = new Put(Bytes.toBytes(i)); - put.add(family, family, Bytes.toBytes(i)); + put.addColumn(family, family, Bytes.toBytes(i)); puts.add(put); } table.put(puts); @@ -298,7 +292,7 @@ public class TestEndToEndSplitTransaction { } /** - * Checks regions using MetaScanner, MetaTableAccessor and HTable methods + * Checks regions using MetaTableAccessor and HTable methods */ static class RegionChecker extends ScheduledChore { Connection connection; @@ -315,15 +309,10 @@ public class TestEndToEndSplitTransaction { } /** verify region boundaries obtained from MetaScanner */ - void verifyRegionsUsingMetaScanner() throws Exception { - - //MetaScanner.allTableRegions() - NavigableMap regions = MetaScanner.allTableRegions(connection, - tableName); - verifyTableRegions(regions.keySet()); - - //MetaScanner.listAllRegions() - List regionList = MetaScanner.listAllRegions(conf, connection, false); + void verifyRegionsUsingMetaTableAccessor() throws Exception { + List regionList = MetaTableAccessor.getTableRegions(connection, tableName, true); + verifyTableRegions(Sets.newTreeSet(regionList)); + regionList = MetaTableAccessor.getAllRegions(connection, true); verifyTableRegions(Sets.newTreeSet(regionList)); } @@ -333,19 +322,22 @@ public class TestEndToEndSplitTransaction { try { //HTable.getStartEndKeys() table = (HTable) connection.getTable(tableName); - Pair keys = table.getStartEndKeys(); + Pair keys = table.getRegionLocator().getStartEndKeys(); verifyStartEndKeys(keys); //HTable.getRegionsInfo() - Map regions = table.getRegionLocations(); - verifyTableRegions(regions.keySet()); + Set regions = new TreeSet(); + for (HRegionLocation loc : table.getRegionLocator().getAllRegionLocations()) { + regions.add(loc.getRegionInfo()); + } + verifyTableRegions(regions); } finally { IOUtils.closeQuietly(table); } } void verify() throws Exception { - verifyRegionsUsingMetaScanner(); + verifyRegionsUsingMetaTableAccessor(); verifyRegionsUsingHTable(); } @@ -411,7 +403,7 @@ public class TestEndToEndSplitTransaction { admin.flushRegion(regionName); log("blocking until flush is complete: " + Bytes.toStringBinary(regionName)); Threads.sleepWithoutInterrupt(500); - while (rs.cacheFlusher.getFlushQueueSize() > 0) { + while (rs.getOnlineRegion(regionName).getMemstoreSize() > 0) { Threads.sleep(50); } } @@ -422,8 +414,14 @@ public class TestEndToEndSplitTransaction { admin.majorCompactRegion(regionName); log("blocking until compaction is complete: " + Bytes.toStringBinary(regionName)); Threads.sleepWithoutInterrupt(500); - while (rs.compactSplitThread.getCompactionQueueSize() > 0) { - Threads.sleep(50); + outer: for (;;) { + for (Store store : rs.getOnlineRegion(regionName).getStores()) { + if (store.getStorefilesCount() > 1) { + Threads.sleep(50); + continue outer; + } + } + break; } } @@ -434,22 +432,20 @@ public class TestEndToEndSplitTransaction { long start = System.currentTimeMillis(); log("blocking until region is split:" + Bytes.toStringBinary(regionName)); HRegionInfo daughterA = null, daughterB = null; - Connection connection = ConnectionFactory.createConnection(conf); - Table metaTable = connection.getTable(TableName.META_TABLE_NAME); - - try { + try (Connection conn = ConnectionFactory.createConnection(conf); + Table metaTable = conn.getTable(TableName.META_TABLE_NAME)) { Result result = null; HRegionInfo region = null; while ((System.currentTimeMillis() - start) < timeout) { - result = getRegionRow(metaTable, regionName); + result = metaTable.get(new Get(regionName)); if (result == null) { break; } - region = HRegionInfo.getHRegionInfo(result); + region = MetaTableAccessor.getHRegionInfo(result); if (region.isSplitParent()) { log("found parent region: " + region.toString()); - PairOfSameType pair = HRegionInfo.getDaughterRegions(result); + PairOfSameType pair = MetaTableAccessor.getDaughterRegions(result); daughterA = pair.getFirst(); daughterB = pair.getSecond(); break; @@ -465,10 +461,10 @@ public class TestEndToEndSplitTransaction { //if we are here, this means the region split is complete or timed out if (waitForDaughters) { long rem = timeout - (System.currentTimeMillis() - start); - blockUntilRegionIsInMeta(metaTable, rem, daughterA); + blockUntilRegionIsInMeta(conn, rem, daughterA); rem = timeout - (System.currentTimeMillis() - start); - blockUntilRegionIsInMeta(metaTable, rem, daughterB); + blockUntilRegionIsInMeta(conn, rem, daughterB); rem = timeout - (System.currentTimeMillis() - start); blockUntilRegionIsOpened(conf, rem, daughterA); @@ -476,29 +472,18 @@ public class TestEndToEndSplitTransaction { rem = timeout - (System.currentTimeMillis() - start); blockUntilRegionIsOpened(conf, rem, daughterB); } - } finally { - IOUtils.closeQuietly(metaTable); - IOUtils.closeQuietly(connection); } } - public static Result getRegionRow(Table metaTable, byte[] regionName) throws IOException { - Get get = new Get(regionName); - return metaTable.get(get); - } - - public static void blockUntilRegionIsInMeta(Table metaTable, long timeout, HRegionInfo hri) + public static void blockUntilRegionIsInMeta(Connection conn, long timeout, HRegionInfo hri) throws IOException, InterruptedException { log("blocking until region is in META: " + hri.getRegionNameAsString()); long start = System.currentTimeMillis(); while (System.currentTimeMillis() - start < timeout) { - Result result = getRegionRow(metaTable, hri.getRegionName()); - if (result != null) { - HRegionInfo info = HRegionInfo.getHRegionInfo(result); - if (info != null && !info.isOffline()) { - log("found region in META: " + hri.getRegionNameAsString()); - break; - } + HRegionLocation loc = MetaTableAccessor.getRegionLocation(conn, hri); + if (loc != null && !loc.getRegionInfo().isOffline()) { + log("found region in META: " + hri.getRegionNameAsString()); + break; } Threads.sleep(10); } @@ -508,26 +493,21 @@ public class TestEndToEndSplitTransaction { throws IOException, InterruptedException { log("blocking until region is opened for reading:" + hri.getRegionNameAsString()); long start = System.currentTimeMillis(); - Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(hri.getTable()); - - try { - byte [] row = hri.getStartKey(); - // Check for null/empty row. If we find one, use a key that is likely to be in first region. - if (row == null || row.length <= 0) row = new byte [] {'0'}; + try (Connection conn = ConnectionFactory.createConnection(conf); + Table table = conn.getTable(hri.getTable())) { + byte[] row = hri.getStartKey(); + // Check for null/empty row. If we find one, use a key that is likely to be in first region. + if (row == null || row.length <= 0) row = new byte[] { '0' }; Get get = new Get(row); while (System.currentTimeMillis() - start < timeout) { try { table.get(get); break; - } catch(IOException ex) { - //wait some more + } catch (IOException ex) { + // wait some more } Threads.sleep(10); } - } finally { - IOUtils.closeQuietly(table); - IOUtils.closeQuietly(connection); } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestExplicitColumnTracker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestExplicitColumnTracker.java index 72d7aa9cbbf..020781c927b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestExplicitColumnTracker.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestExplicitColumnTracker.java @@ -48,9 +48,9 @@ public class TestExplicitColumnTracker { private void runTest(int maxVersions, TreeSet trackColumns, List scannerColumns, - List expected, int lookAhead) throws IOException { + List expected) throws IOException { ColumnTracker exp = new ExplicitColumnTracker( - trackColumns, 0, maxVersions, Long.MIN_VALUE, lookAhead); + trackColumns, 0, maxVersions, Long.MIN_VALUE); //Initialize result @@ -92,7 +92,7 @@ public class TestExplicitColumnTracker { scanner.add(col4); scanner.add(col5); - runTest(maxVersions, columns, scanner, expected, 0); + runTest(maxVersions, columns, scanner, expected); } @Test @@ -144,59 +144,7 @@ public class TestExplicitColumnTracker { scanner.add(col5); //Initialize result - runTest(maxVersions, columns, scanner, expected, 0); - } - - @Test - public void testGet_MultiVersionWithLookAhead() throws IOException{ - //Create tracker - TreeSet columns = new TreeSet(Bytes.BYTES_COMPARATOR); - //Looking for every other - columns.add(col2); - columns.add(col4); - - List expected = new ArrayList(); - expected.add(ScanQueryMatcher.MatchCode.SKIP); - expected.add(ScanQueryMatcher.MatchCode.SKIP); - expected.add(ScanQueryMatcher.MatchCode.SEEK_NEXT_COL); - - expected.add(ScanQueryMatcher.MatchCode.INCLUDE); // col2; 1st version - expected.add(ScanQueryMatcher.MatchCode.INCLUDE_AND_SEEK_NEXT_COL); // col2; 2nd version - expected.add(ScanQueryMatcher.MatchCode.SKIP); - - expected.add(ScanQueryMatcher.MatchCode.SKIP); - expected.add(ScanQueryMatcher.MatchCode.SEEK_NEXT_COL); - expected.add(ScanQueryMatcher.MatchCode.SEEK_NEXT_COL); - - expected.add(ScanQueryMatcher.MatchCode.INCLUDE); // col4; 1st version - expected.add(ScanQueryMatcher.MatchCode.INCLUDE_AND_SEEK_NEXT_ROW); // col4; 2nd version - expected.add(ScanQueryMatcher.MatchCode.SEEK_NEXT_ROW); - - expected.add(ScanQueryMatcher.MatchCode.SEEK_NEXT_ROW); - expected.add(ScanQueryMatcher.MatchCode.SEEK_NEXT_ROW); - expected.add(ScanQueryMatcher.MatchCode.SEEK_NEXT_ROW); - int maxVersions = 2; - - //Create "Scanner" - List scanner = new ArrayList(); - scanner.add(col1); - scanner.add(col1); - scanner.add(col1); - scanner.add(col2); - scanner.add(col2); - scanner.add(col2); - scanner.add(col3); - scanner.add(col3); - scanner.add(col3); - scanner.add(col4); - scanner.add(col4); - scanner.add(col4); - scanner.add(col5); - scanner.add(col5); - scanner.add(col5); - - //Initialize result - runTest(maxVersions, columns, scanner, expected, 2); + runTest(maxVersions, columns, scanner, expected); } /** @@ -211,7 +159,7 @@ public class TestExplicitColumnTracker { } ColumnTracker explicit = new ExplicitColumnTracker(columns, 0, maxVersions, - Long.MIN_VALUE, 0); + Long.MIN_VALUE); for (int i = 0; i < 100000; i+=2) { byte [] col = Bytes.toBytes("col"+i); ScanQueryMatcher.checkColumn(explicit, col, 0, col.length, 1, KeyValue.Type.Put.getCode(), @@ -240,7 +188,7 @@ public class TestExplicitColumnTracker { new ScanQueryMatcher.MatchCode[] { ScanQueryMatcher.MatchCode.SEEK_NEXT_COL, ScanQueryMatcher.MatchCode.SEEK_NEXT_COL }); - runTest(1, columns, scanner, expected, 0); + runTest(1, columns, scanner, expected); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java index 0d7820fe171..110cd365de4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java @@ -34,16 +34,17 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.apache.hadoop.hbase.testclassification.RegionServerTests; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.wal.WAL; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -74,7 +75,7 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase { // Up flush size else we bind up when we use default catalog flush of 16k. fsTableDescriptors.get(TableName.META_TABLE_NAME).setMemStoreFlushSize(64 * 1024 * 1024); - HRegion mr = HBaseTestingUtility.createRegionAndWAL(HRegionInfo.FIRST_META_REGIONINFO, + Region mr = HBaseTestingUtility.createRegionAndWAL(HRegionInfo.FIRST_META_REGIONINFO, rootdir, this.conf, fsTableDescriptors.get(TableName.META_TABLE_NAME)); try { // Write rows for three tables 'A', 'B', and 'C'. @@ -95,7 +96,7 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase { InternalScanner s = mr.getScanner(new Scan()); try { List keys = new ArrayList(); - while(s.next(keys)) { + while (s.next(keys)) { LOG.info(keys); keys.clear(); } @@ -106,7 +107,7 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase { findRow(mr, 'C', 45, 44); findRow(mr, 'C', 46, 46); findRow(mr, 'C', 43, 42); - mr.flushcache(); + mr.flush(true); findRow(mr, 'C', 44, 44); findRow(mr, 'C', 45, 44); findRow(mr, 'C', 46, 46); @@ -119,7 +120,7 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase { s = mr.getScanner(scan); try { List keys = new ArrayList(); - while (s.next(keys)) { + while (s.next(keys)) { mr.delete(new Delete(CellUtil.cloneRow(keys.get(0)))); keys.clear(); } @@ -131,7 +132,7 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase { findRow(mr, 'C', 45, -1); findRow(mr, 'C', 46, -1); findRow(mr, 'C', 43, -1); - mr.flushcache(); + mr.flush(true); findRow(mr, 'C', 44, -1); findRow(mr, 'C', 45, -1); findRow(mr, 'C', 46, -1); @@ -149,7 +150,7 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase { * @return Row found. * @throws IOException */ - private byte [] findRow(final HRegion mr, final char table, + private byte [] findRow(final Region mr, final char table, final int rowToFind, final int answer) throws IOException { TableName tableb = TableName.valueOf("" + table); @@ -159,7 +160,7 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase { tableb, tofindBytes, HConstants.NINES, false); LOG.info("find=" + new String(metaKey)); - Result r = mr.getClosestRowBefore(metaKey); + Result r = mr.getClosestRowBefore(metaKey, HConstants.CATALOG_FAMILY); if (answer == -1) { assertNull(r); return null; @@ -186,7 +187,7 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase { */ @Test public void testGetClosestRowBefore3() throws IOException{ - HRegion region = null; + Region region = null; byte [] c0 = COLUMNS[0]; byte [] c1 = COLUMNS[1]; try { @@ -231,7 +232,7 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase { r = region.getClosestRowBefore(T31, c0); assertTrue(Bytes.equals(T10, r.getRow())); - region.flushcache(); + region.flush(true); // try finding "010" after flush r = region.getClosestRowBefore(T30, c0); @@ -249,7 +250,7 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase { r = region.getClosestRowBefore(T31, c0); assertTrue(Bytes.equals(T10, r.getRow())); - region.flushcache(); + region.flush(true); r = region.getClosestRowBefore(T30, c0); assertTrue(Bytes.equals(T10, r.getRow())); @@ -267,7 +268,7 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase { // Ask for a value off the end of the file. Should return t10. r = region.getClosestRowBefore(T31, c0); assertTrue(Bytes.equals(T10, r.getRow())); - region.flushcache(); + region.flush(true); r = region.getClosestRowBefore(T31, c0); assertTrue(Bytes.equals(T10, r.getRow())); @@ -283,11 +284,12 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase { } finally { if (region != null) { try { - region.close(); + WAL wal = ((HRegion)region).getWAL(); + ((HRegion)region).close(); + wal.close(); } catch (Exception e) { e.printStackTrace(); } - region.getWAL().close(); } } } @@ -295,7 +297,7 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase { /** For HBASE-694 */ @Test public void testGetClosestRowBefore2() throws IOException{ - HRegion region = null; + Region region = null; byte [] c0 = COLUMNS[0]; try { HTableDescriptor htd = createTableDescriptor(getName()); @@ -317,7 +319,7 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase { Result r = region.getClosestRowBefore(T35, c0); assertTrue(Bytes.equals(T30, r.getRow())); - region.flushcache(); + region.flush(true); // try finding "035" r = region.getClosestRowBefore(T35, c0); @@ -331,7 +333,7 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase { r = region.getClosestRowBefore(T35, c0); assertTrue(Bytes.equals(T30, r.getRow())); - region.flushcache(); + region.flush(true); // try finding "035" r = region.getClosestRowBefore(T35, c0); @@ -339,11 +341,12 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase { } finally { if (region != null) { try { - region.close(); + WAL wal = ((HRegion)region).getWAL(); + ((HRegion)region).close(); + wal.close(); } catch (Exception e) { e.printStackTrace(); } - region.getWAL().close(); } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java index 3a0f9be2be6..39fd410a6b2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHMobStore.java @@ -167,7 +167,7 @@ public class TestHMobStore { KeyValue[] keys = new KeyValue[] { key1, key2, key3 }; int maxKeyCount = keys.length; StoreFile.Writer mobWriter = store.createWriterInTmp(currentDate, maxKeyCount, - hcd.getCompactionCompression(), region.getStartKey()); + hcd.getCompactionCompression(), region.getRegionInfo().getStartKey()); mobFilePath = mobWriter.getPath(); mobWriter.append(key1); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index ea063469f30..6abe0760604 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -51,6 +51,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.NavigableMap; @@ -61,6 +62,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; +import org.apache.commons.lang.RandomStringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -78,8 +80,6 @@ import org.apache.hadoop.hbase.HBaseTestCase; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.RegionTooBusyException; -import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.HConstants.OperationStatusCode; import org.apache.hadoop.hbase.HDFSBlocksDistribution; import org.apache.hadoop.hbase.HRegionInfo; @@ -90,8 +90,10 @@ import org.apache.hadoop.hbase.MultithreadedTestUtil; import org.apache.hadoop.hbase.MultithreadedTestUtil.RepeatingTestThread; import org.apache.hadoop.hbase.MultithreadedTestUtil.TestThread; import org.apache.hadoop.hbase.NotServingRegionException; +import org.apache.hadoop.hbase.RegionTooBusyException; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.TagType; import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.client.Append; @@ -121,29 +123,23 @@ import org.apache.hadoop.hbase.monitoring.TaskMonitor; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor; -import org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.FlushAction; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.StoreFlushDescriptor; +import org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.StoreDescriptor; import org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl; -import org.apache.hadoop.hbase.regionserver.HRegion.RowLock; +import org.apache.hadoop.hbase.regionserver.Region.RowLock; import org.apache.hadoop.hbase.regionserver.TestStore.FaultyFileSystem; +import org.apache.hadoop.hbase.regionserver.handler.FinishRegionRecoveringHandler; +import org.apache.hadoop.hbase.regionserver.wal.HLogKey; import org.apache.hadoop.hbase.regionserver.wal.MetricsWAL; import org.apache.hadoop.hbase.regionserver.wal.MetricsWALSource; -import org.apache.hadoop.hbase.regionserver.wal.HLogKey; import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener; -import org.apache.hadoop.hbase.regionserver.wal.WALUtil; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; -import org.apache.hadoop.hbase.wal.DefaultWALProvider; -import org.apache.hadoop.hbase.wal.FaultyFSLog; -import org.apache.hadoop.hbase.wal.WAL; -import org.apache.hadoop.hbase.wal.WALFactory; -import org.apache.hadoop.hbase.wal.WALKey; -import org.apache.hadoop.hbase.wal.WALProvider; -import org.apache.hadoop.hbase.wal.WALSplitter; +import org.apache.hadoop.hbase.regionserver.wal.WALUtil; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.test.MetricsAssertHelper; -import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.VerySlowRegionServerTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; @@ -152,6 +148,13 @@ import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.IncrementingEnvironmentEdge; import org.apache.hadoop.hbase.util.PairOfSameType; import org.apache.hadoop.hbase.util.Threads; +import org.apache.hadoop.hbase.wal.DefaultWALProvider; +import org.apache.hadoop.hbase.wal.FaultyFSLog; +import org.apache.hadoop.hbase.wal.WAL; +import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALKey; +import org.apache.hadoop.hbase.wal.WALProvider; +import org.apache.hadoop.hbase.wal.WALSplitter; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -165,6 +168,7 @@ import org.mockito.Mockito; import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; +import com.google.common.collect.Maps; import com.google.protobuf.ByteString; /** @@ -173,7 +177,7 @@ import com.google.protobuf.ByteString; * A lot of the meta information for an HRegion now lives inside other HRegions * or in the HBaseMaster, so only basic testing is possible. */ -@Category({VerySlowRegionServerTests.class, MediumTests.class}) +@Category({VerySlowRegionServerTests.class, LargeTests.class}) @SuppressWarnings("deprecation") public class TestHRegion { // Do not spin up clusters in here. If you need to spin up a cluster, do it @@ -255,7 +259,7 @@ public class TestHRegion { region.put(put); // Close with something in memstore and something in the snapshot. Make sure all is cleared. region.close(); - assertEquals(0, region.getMemstoreSize().get()); + assertEquals(0, region.getMemstoreSize()); HBaseTestingUtility.closeRegionAndWAL(region); } @@ -366,17 +370,17 @@ public class TestHRegion { // Initialize region region = initHRegion(tableName, null, null, callingMethod, conf, false, Durability.SYNC_WAL, wal, COLUMN_FAMILY_BYTES); - long size = region.getMemstoreSize().get(); + long size = region.getMemstoreSize(); Assert.assertEquals(0, size); // Put one item into memstore. Measure the size of one item in memstore. Put p1 = new Put(row); p1.add(new KeyValue(row, COLUMN_FAMILY_BYTES, qual1, 1, (byte[])null)); region.put(p1); - final long sizeOfOnePut = region.getMemstoreSize().get(); + final long sizeOfOnePut = region.getMemstoreSize(); // Fail a flush which means the current memstore will hang out as memstore 'snapshot'. try { LOG.info("Flushing"); - region.flushcache(); + region.flush(true); Assert.fail("Didn't bubble up IOE!"); } catch (DroppedSnapshotException dse) { // What we are expecting @@ -384,20 +388,20 @@ public class TestHRegion { // Make it so all writes succeed from here on out ffs.fault.set(false); // Check sizes. Should still be the one entry. - Assert.assertEquals(sizeOfOnePut, region.getMemstoreSize().get()); + Assert.assertEquals(sizeOfOnePut, region.getMemstoreSize()); // Now add two entries so that on this next flush that fails, we can see if we // subtract the right amount, the snapshot size only. Put p2 = new Put(row); p2.add(new KeyValue(row, COLUMN_FAMILY_BYTES, qual2, 2, (byte[])null)); p2.add(new KeyValue(row, COLUMN_FAMILY_BYTES, qual3, 3, (byte[])null)); region.put(p2); - Assert.assertEquals(sizeOfOnePut * 3, region.getMemstoreSize().get()); + Assert.assertEquals(sizeOfOnePut * 3, region.getMemstoreSize()); // Do a successful flush. It will clear the snapshot only. Thats how flushes work. // If already a snapshot, we clear it else we move the memstore to be snapshot and flush // it - region.flushcache(); + region.flush(true); // Make sure our memory accounting is right. - Assert.assertEquals(sizeOfOnePut * 2, region.getMemstoreSize().get()); + Assert.assertEquals(sizeOfOnePut * 2, region.getMemstoreSize()); } finally { HBaseTestingUtility.closeRegionAndWAL(region); } @@ -430,7 +434,7 @@ public class TestHRegion { // Initialize region region = initHRegion(tableName, null, null, callingMethod, conf, false, Durability.SYNC_WAL, wal, COLUMN_FAMILY_BYTES); - long size = region.getMemstoreSize().get(); + long size = region.getMemstoreSize(); Assert.assertEquals(0, size); // Put one item into memstore. Measure the size of one item in memstore. Put p1 = new Put(row); @@ -470,7 +474,7 @@ public class TestHRegion { Put put = new Put(Bytes.toBytes("r1")); put.add(family, Bytes.toBytes("q1"), Bytes.toBytes("v1")); region.put(put); - region.flushcache(); + region.flush(true); Scan scan = new Scan(); scan.setMaxVersions(3); @@ -479,7 +483,7 @@ public class TestHRegion { Delete delete = new Delete(Bytes.toBytes("r1")); region.delete(delete); - region.flushcache(); + region.flush(true); // open the second scanner RegionScanner scanner2 = region.getScanner(scan); @@ -489,7 +493,7 @@ public class TestHRegion { System.out.println("Smallest read point:" + region.getSmallestReadPoint()); // make a major compaction - region.compactStores(true); + region.compact(true); // open the third scanner RegionScanner scanner3 = region.getScanner(scan); @@ -521,7 +525,7 @@ public class TestHRegion { put = new Put(Bytes.toBytes("r2")); put.add(family, Bytes.toBytes("q1"), Bytes.toBytes("v1")); region.put(put); - region.flushcache(); + region.flush(true); Scan scan = new Scan(); scan.setMaxVersions(3); @@ -530,7 +534,7 @@ public class TestHRegion { System.out.println("Smallest read point:" + region.getSmallestReadPoint()); - region.compactStores(true); + region.compact(true); scanner1.reseek(Bytes.toBytes("r2")); List results = new ArrayList(); @@ -573,7 +577,7 @@ public class TestHRegion { } MonitoredTask status = TaskMonitor.get().createStatus(method); Map maxSeqIdInStores = new TreeMap(Bytes.BYTES_COMPARATOR); - for (Store store : region.getStores().values()) { + for (Store store : region.getStores()) { maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(), minSeqId - 1); } long seqId = region.replayRecoveredEditsIfAny(regiondir, maxSeqIdInStores, null, status); @@ -627,7 +631,7 @@ public class TestHRegion { long recoverSeqId = 1030; MonitoredTask status = TaskMonitor.get().createStatus(method); Map maxSeqIdInStores = new TreeMap(Bytes.BYTES_COMPARATOR); - for (Store store : region.getStores().values()) { + for (Store store : region.getStores()) { maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(), recoverSeqId - 1); } long seqId = region.replayRecoveredEditsIfAny(regiondir, maxSeqIdInStores, null, status); @@ -672,7 +676,7 @@ public class TestHRegion { dos.close(); Map maxSeqIdInStores = new TreeMap(Bytes.BYTES_COMPARATOR); - for (Store store : region.getStores().values()) { + for (Store store : region.getStores()) { maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(), minSeqId); } long seqId = region.replayRecoveredEditsIfAny(regiondir, maxSeqIdInStores, null, null); @@ -694,9 +698,9 @@ public class TestHRegion { Path regiondir = region.getRegionFileSystem().getRegionDir(); FileSystem fs = region.getRegionFileSystem().getFileSystem(); byte[] regionName = region.getRegionInfo().getEncodedNameAsBytes(); + byte[][] columns = region.getTableDesc().getFamiliesKeys().toArray(new byte[0][]); - assertEquals(0, region.getStoreFileList( - region.getStores().keySet().toArray(new byte[0][])).size()); + assertEquals(0, region.getStoreFileList(columns).size()); Path recoveredEditsDir = WALSplitter.getRegionDirRecoveredEditsDir(regiondir); @@ -732,15 +736,14 @@ public class TestHRegion { long recoverSeqId = 1030; Map maxSeqIdInStores = new TreeMap(Bytes.BYTES_COMPARATOR); MonitoredTask status = TaskMonitor.get().createStatus(method); - for (Store store : region.getStores().values()) { + for (Store store : region.getStores()) { maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(), recoverSeqId - 1); } long seqId = region.replayRecoveredEditsIfAny(regiondir, maxSeqIdInStores, null, status); assertEquals(maxSeqId, seqId); // assert that the files are flushed - assertEquals(1, region.getStoreFileList( - region.getStores().keySet().toArray(new byte[0][])).size()); + assertEquals(1, region.getStoreFileList(columns).size()); } finally { HBaseTestingUtility.closeRegionAndWAL(this.region); @@ -768,7 +771,7 @@ public class TestHRegion { Put put = new Put(Bytes.toBytes(i)); put.add(family, Bytes.toBytes(i), Bytes.toBytes(i)); region.put(put); - region.flushcache(); + region.flush(true); } // this will create a region with 3 files @@ -870,7 +873,7 @@ public class TestHRegion { Put put = new Put(Bytes.toBytes(i)); put.add(family, Bytes.toBytes(i), Bytes.toBytes(i)); region.put(put); - region.flushcache(); + region.flush(true); } // this will create a region with 3 files from flush @@ -1034,7 +1037,7 @@ public class TestHRegion { // start cache flush will throw exception try { - region.flushcache(); + region.flush(true); fail("This should have thrown exception"); } catch (DroppedSnapshotException unexpected) { // this should not be a dropped snapshot exception. Meaning that RS will not abort @@ -1047,7 +1050,7 @@ public class TestHRegion { isFlushWALMarker.set(FlushAction.COMMIT_FLUSH); try { - region.flushcache(); + region.flush(true); fail("This should have thrown exception"); } catch (DroppedSnapshotException expected) { // we expect this exception, since we were able to write the snapshot, but failed to @@ -1067,7 +1070,7 @@ public class TestHRegion { isFlushWALMarker.set(FlushAction.COMMIT_FLUSH, FlushAction.ABORT_FLUSH); try { - region.flushcache(); + region.flush(true); fail("This should have thrown exception"); } catch (DroppedSnapshotException expected) { // we expect this exception, since we were able to write the snapshot, but failed to @@ -2438,7 +2441,7 @@ public class TestHRegion { this.region = initHRegion(tableName, method, hc, families); try { LOG.info("" + HBaseTestCase.addContent(region, fam3)); - region.flushcache(); + region.flush(true); region.compactStores(); byte[] splitRow = region.checkSplit(); assertNotNull(splitRow); @@ -2485,8 +2488,8 @@ public class TestHRegion { * @throws IOException */ HRegion[] splitRegion(final HRegion parent, final byte[] midkey) throws IOException { - PairOfSameType result = null; - SplitTransaction st = new SplitTransaction(parent, midkey); + PairOfSameType result = null; + SplitTransactionImpl st = new SplitTransactionImpl(parent, midkey); // If prepare does not return true, for some reason -- logged inside in // the prepare call -- we are not ready to split just now. Just return. if (!st.prepare()) { @@ -2497,21 +2500,22 @@ public class TestHRegion { result = st.execute(null, null); } catch (IOException ioe) { try { - LOG.info("Running rollback of failed split of " + parent.getRegionNameAsString() + "; " - + ioe.getMessage()); + LOG.info("Running rollback of failed split of " + + parent.getRegionInfo().getRegionNameAsString() + "; " + ioe.getMessage()); st.rollback(null, null); - LOG.info("Successful rollback of failed split of " + parent.getRegionNameAsString()); + LOG.info("Successful rollback of failed split of " + + parent.getRegionInfo().getRegionNameAsString()); return null; } catch (RuntimeException e) { // If failed rollback, kill this server to avoid having a hole in table. - LOG.info("Failed rollback of failed split of " + parent.getRegionNameAsString() - + " -- aborting server", e); + LOG.info("Failed rollback of failed split of " + + parent.getRegionInfo().getRegionNameAsString() + " -- aborting server", e); } } finally { parent.clearSplit(); } - return new HRegion[] { result.getFirst(), result.getSecond() }; + return new HRegion[] { (HRegion)result.getFirst(), (HRegion)result.getSecond() }; } // //////////////////////////////////////////////////////////////////////////// @@ -2808,7 +2812,7 @@ public class TestHRegion { put.add(kv22); put.add(kv21); region.put(put); - region.flushcache(); + region.flush(true); // Expected List expected = new ArrayList(); @@ -2870,19 +2874,19 @@ public class TestHRegion { put.add(kv14); put.add(kv24); region.put(put); - region.flushcache(); + region.flush(true); put = new Put(row1); put.add(kv23); put.add(kv13); region.put(put); - region.flushcache(); + region.flush(true); put = new Put(row1); put.add(kv22); put.add(kv12); region.put(put); - region.flushcache(); + region.flush(true); put = new Put(row1); put.add(kv21); @@ -3013,7 +3017,7 @@ public class TestHRegion { put.add(kv22); put.add(kv21); region.put(put); - region.flushcache(); + region.flush(true); // Expected List expected = new ArrayList(); @@ -3121,19 +3125,19 @@ public class TestHRegion { put.add(kv14); put.add(kv24); region.put(put); - region.flushcache(); + region.flush(true); put = new Put(row1); put.add(kv23); put.add(kv13); region.put(put); - region.flushcache(); + region.flush(true); put = new Put(row1); put.add(kv22); put.add(kv12); region.put(put); - region.flushcache(); + region.flush(true); put = new Put(row1); put.add(kv21); @@ -3312,8 +3316,9 @@ public class TestHRegion { List results = new ArrayList(); int index = 0; + ScannerContext scannerContext = ScannerContext.newBuilder().setBatchLimit(3).build(); while (true) { - boolean more = s.next(results, 3); + boolean more = s.next(results, scannerContext); if ((index >> 1) < 5) { if (index % 2 == 0) assertEquals(results.size(), 3); @@ -3351,7 +3356,7 @@ public class TestHRegion { try { LOG.info("" + HBaseTestCase.addContent(region, fam3)); - region.flushcache(); + region.flush(true); region.compactStores(); byte[] splitRow = region.checkSplit(); assertNotNull(splitRow); @@ -3380,7 +3385,7 @@ public class TestHRegion { } HBaseTestCase.addContent(regions[i], fam2); HBaseTestCase.addContent(regions[i], fam1); - regions[i].flushcache(); + regions[i].flush(true); } byte[][] midkeys = new byte[regions.length][]; @@ -3398,7 +3403,8 @@ public class TestHRegion { if (midkeys[i] != null) { rs = splitRegion(regions[i], midkeys[i]); for (int j = 0; j < rs.length; j++) { - sortedMap.put(Bytes.toString(rs[j].getRegionName()), HRegion.openHRegion(rs[j], null)); + sortedMap.put(Bytes.toString(rs[j].getRegionInfo().getRegionName()), + HRegion.openHRegion(rs[j], null)); } } } @@ -3442,7 +3448,7 @@ public class TestHRegion { putData(startRow, numRows, qualifier, families); int splitRow = startRow + numRows; putData(splitRow, numRows, qualifier, families); - region.flushcache(); + region.flush(true); HRegion[] regions = null; try { @@ -3482,7 +3488,7 @@ public class TestHRegion { int splitRow = startRow + numRows; byte[] splitRowBytes = Bytes.toBytes("" + splitRow); putData(splitRow, numRows, qualifier, families); - region.flushcache(); + region.flush(true); HRegion[] regions = null; try { @@ -3573,7 +3579,7 @@ public class TestHRegion { if (i != 0 && i % compactInterval == 0) { // System.out.println("iteration = " + i); - region.compactStores(true); + region.compact(true); } if (i % 10 == 5L) { @@ -3636,7 +3642,7 @@ public class TestHRegion { } } try { - region.flushcache(); + region.flush(true); } catch (IOException e) { if (!done) { LOG.error("Error while flusing cache", e); @@ -3701,7 +3707,7 @@ public class TestHRegion { for (int i = 0; i < testCount; i++) { if (i != 0 && i % compactInterval == 0) { - region.compactStores(true); + region.compact(true); } if (i != 0 && i % flushInterval == 0) { @@ -3724,7 +3730,7 @@ public class TestHRegion { putThread.done(); - region.flushcache(); + region.flush(true); putThread.join(); putThread.checkNoError(); @@ -3832,7 +3838,7 @@ public class TestHRegion { */ @Test public void testWritesWhileGetting() throws Exception { - int testCount = 100; + int testCount = 50; int numRows = 1; int numFamilies = 10; int numQualifiers = 100; @@ -3871,13 +3877,13 @@ public class TestHRegion { @Override public void doAnAction() throws Exception { - if (region.flushcache().isCompactionNeeded()) { + if (region.flush(true).isCompactionNeeded()) { ++flushesSinceCompact; } // Compact regularly to avoid creating too many files and exceeding // the ulimit. if (flushesSinceCompact == maxFlushesSinceCompact) { - region.compactStores(false); + region.compact(false); flushesSinceCompact = 0; } } @@ -3891,7 +3897,7 @@ public class TestHRegion { long prevTimestamp = 0L; for (int i = 0; i < testCount; i++) { - + LOG.info("testWritesWhileGetting verify turn " + i); boolean previousEmpty = result == null || result.isEmpty(); result = region.get(get); if (!result.isEmpty() || !previousEmpty || i > compactInterval) { @@ -3927,7 +3933,7 @@ public class TestHRegion { if (putThread != null) putThread.done(); - region.flushcache(); + region.flush(true); if (putThread != null) { putThread.join(); @@ -3975,7 +3981,7 @@ public class TestHRegion { put.add(family, qual1, 1L, Bytes.toBytes(1L)); region.put(put); - region.flushcache(); + region.flush(true); Delete delete = new Delete(Bytes.toBytes(1L), 1L); region.delete(delete); @@ -4033,7 +4039,7 @@ public class TestHRegion { region.put(put); } } - region.flushcache(); + region.flush(true); } // before compaction HStore store = (HStore) region.getStore(fam1); @@ -4046,7 +4052,7 @@ public class TestHRegion { assertEquals(num_unique_rows, reader.getFilterEntries()); } - region.compactStores(true); + region.compact(true); // after compaction storeFiles = store.getStorefiles(); @@ -4087,7 +4093,7 @@ public class TestHRegion { region.put(put); // Flush - region.flushcache(); + region.flush(true); // Get rows Get get = new Get(row); @@ -4131,11 +4137,11 @@ public class TestHRegion { Put put = new Put(row); put.add(familyName, col, 1, Bytes.toBytes("SomeRandomValue")); region.put(put); - region.flushcache(); + region.flush(true); Delete del = new Delete(row); region.delete(del); - region.flushcache(); + region.flush(true); // Get remaining rows (should have none) Get get = new Get(row); @@ -4182,7 +4188,7 @@ public class TestHRegion { HRegion firstRegion = htu.getHBaseCluster().getRegions(TableName.valueOf(this.getName())) .get(0); - firstRegion.flushcache(); + firstRegion.flush(true); HDFSBlocksDistribution blocksDistribution1 = firstRegion.getHDFSBlocksDistribution(); // Given the default replication factor is 2 and we have 2 HFiles, @@ -4355,7 +4361,7 @@ public class TestHRegion { public void run() { while (!incrementDone.get()) { try { - region.flushcache(); + region.flush(true); } catch (Exception e) { e.printStackTrace(); } @@ -4442,7 +4448,7 @@ public class TestHRegion { public void run() { while (!appendDone.get()) { try { - region.flushcache(); + region.flush(true); } catch (Exception e) { e.printStackTrace(); } @@ -4516,7 +4522,7 @@ public class TestHRegion { assertEquals(1, kvs.size()); assertArrayEquals(Bytes.toBytes("value0"), CellUtil.cloneValue(kvs.get(0))); - region.flushcache(); + region.flush(true); get = new Get(row); get.addColumn(family, qualifier); get.setMaxVersions(); @@ -4537,7 +4543,7 @@ public class TestHRegion { assertEquals(1, kvs.size()); assertArrayEquals(Bytes.toBytes("value1"), CellUtil.cloneValue(kvs.get(0))); - region.flushcache(); + region.flush(true); get = new Get(row); get.addColumn(family, qualifier); get.setMaxVersions(); @@ -4655,7 +4661,7 @@ public class TestHRegion { // create a primary region, load some data and flush // create a secondary region, and do a get against that Path rootDir = new Path(dir + "testRegionReplicaSecondary"); - FSUtils.setRootDir(TEST_UTIL.getConfiguration(), rootDir); + FSUtils.setRootDir(TEST_UTIL.getConfiguration(), rootDir); byte[][] families = new byte[][] { Bytes.toBytes("cf1"), Bytes.toBytes("cf2"), Bytes.toBytes("cf3") @@ -4684,7 +4690,7 @@ public class TestHRegion { putData(primaryRegion, 0, 1000, cq, families); // flush region - primaryRegion.flushcache(); + primaryRegion.flush(true); // open secondary region secondaryRegion = HRegion.openHRegion(rootDir, secondaryHri, htd, null, CONF); @@ -4734,7 +4740,7 @@ public class TestHRegion { putData(primaryRegion, 0, 1000, cq, families); // flush region - primaryRegion.flushcache(); + primaryRegion.flush(true); // open secondary region secondaryRegion = HRegion.openHRegion(rootDir, secondaryHri, htd, null, CONF); @@ -4755,6 +4761,14 @@ public class TestHRegion { } } + static WALFactory createWALFactory(Configuration conf, Path rootDir) throws IOException { + Configuration confForWAL = new Configuration(conf); + confForWAL.set(HConstants.HBASE_DIR, rootDir.toString()); + return new WALFactory(confForWAL, + Collections.singletonList(new MetricsWAL()), + "hregion-" + RandomStringUtils.randomNumeric(8)); + } + @Test public void testCompactionFromPrimary() throws IOException { Path rootDir = new Path(dir + "testRegionReplicaSecondary"); @@ -4787,7 +4801,7 @@ public class TestHRegion { putData(primaryRegion, 0, 1000, cq, families); // flush region - primaryRegion.flushcache(); + primaryRegion.flush(true); // open secondary region secondaryRegion = HRegion.openHRegion(rootDir, secondaryHri, htd, null, CONF); @@ -4815,9 +4829,14 @@ public class TestHRegion { private void putData(HRegion region, int startRow, int numRows, byte[] qf, byte[]... families) throws IOException { + putData(region, Durability.SKIP_WAL, startRow, numRows, qf, families); + } + + static void putData(HRegion region, Durability durability, + int startRow, int numRows, byte[] qf, byte[]... families) throws IOException { for (int i = startRow; i < startRow + numRows; i++) { Put put = new Put(Bytes.toBytes("" + i)); - put.setDurability(Durability.SKIP_WAL); + put.setDurability(durability); for (byte[] family : families) { put.add(family, qf, null); } @@ -4825,7 +4844,7 @@ public class TestHRegion { } } - private void verifyData(HRegion newReg, int startRow, int numRows, byte[] qf, byte[]... families) + static void verifyData(HRegion newReg, int startRow, int numRows, byte[] qf, byte[]... families) throws IOException { for (int i = startRow; i < startRow + numRows; i++) { byte[] row = Bytes.toBytes("" + i); @@ -4844,7 +4863,7 @@ public class TestHRegion { } } - private void assertGet(final HRegion r, final byte[] family, final byte[] k) throws IOException { + static void assertGet(final HRegion r, final byte[] family, final byte[] k) throws IOException { // Now I have k, get values out and assert they are as expected. Get get = new Get(k).addFamily(family).setMaxVersions(); Cell[] results = r.get(get).rawCells(); @@ -4907,7 +4926,7 @@ public class TestHRegion { this.region = initHRegion(tableName, method, family); // empty memstore, flush doesn't run - HRegion.FlushResult fr = region.flushcache(); + HRegion.FlushResult fr = region.flush(true); assertFalse(fr.isFlushSucceeded()); assertFalse(fr.isCompactionNeeded()); @@ -4915,7 +4934,7 @@ public class TestHRegion { for (int i = 0; i < 2; i++) { Put put = new Put(tableName).add(family, family, tableName); region.put(put); - fr = region.flushcache(); + fr = region.flush(true); assertTrue(fr.isFlushSucceeded()); assertFalse(fr.isCompactionNeeded()); } @@ -4924,7 +4943,7 @@ public class TestHRegion { for (int i = 0; i < 2; i++) { Put put = new Put(tableName).add(family, family, tableName); region.put(put); - fr = region.flushcache(); + fr = region.flush(true); assertTrue(fr.isFlushSucceeded()); assertTrue(fr.isCompactionNeeded()); } @@ -4991,7 +5010,7 @@ public class TestHRegion { return initHRegion(tableName, null, null, callingMethod, conf, isReadOnly, families); } - private static HRegion initHRegion(byte[] tableName, byte[] startKey, byte[] stopKey, + public static HRegion initHRegion(byte[] tableName, byte[] startKey, byte[] stopKey, String callingMethod, Configuration conf, boolean isReadOnly, byte[]... families) throws IOException { Path logDir = TEST_UTIL.getDataTestDirOnTestFS(callingMethod + ".log"); @@ -5013,7 +5032,7 @@ public class TestHRegion { * @return A region on which you must call * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} when done. */ - private static HRegion initHRegion(byte[] tableName, byte[] startKey, byte[] stopKey, + public static HRegion initHRegion(byte[] tableName, byte[] startKey, byte[] stopKey, String callingMethod, Configuration conf, boolean isReadOnly, Durability durability, WAL wal, byte[]... families) throws IOException { return TEST_UTIL.createLocalHRegion(tableName, startKey, stopKey, callingMethod, conf, @@ -5245,7 +5264,7 @@ public class TestHRegion { put = new Put(rowE); put.add(kv5); region.put(put); - region.flushcache(); + region.flush(true); Scan scan = new Scan(rowD, rowA); scan.addColumn(families[0], col1); scan.setReversed(true); @@ -5325,7 +5344,7 @@ public class TestHRegion { put = new Put(rowE); put.add(kv5); region.put(put); - region.flushcache(); + region.flush(true); Scan scan = new Scan(rowD, rowA); scan.addColumn(families[0], col1); scan.setReversed(true); @@ -5428,7 +5447,7 @@ public class TestHRegion { put.add(kv4_5_4); put.add(kv4_5_5); region.put(put); - region.flushcache(); + region.flush(true); // hfiles(cf1/cf3) : "row1" (1 kvs) / "row2" (1 kv) / "row4" (2 kv) put = new Put(row4); put.add(kv4_5_1); @@ -5440,7 +5459,7 @@ public class TestHRegion { put = new Put(row2); put.add(kv2_4_4); region.put(put); - region.flushcache(); + region.flush(true); // hfiles(cf1/cf3) : "row2"(2 kv) / "row3"(1 kvs) / "row4" (1 kv) put = new Put(row4); put.add(kv4_5_2); @@ -5452,7 +5471,7 @@ public class TestHRegion { put = new Put(row3); put.add(kv3_2_2); region.put(put); - region.flushcache(); + region.flush(true); // memstore(cf1/cf2/cf3) : "row0" (1 kvs) / "row3" ( 1 kv) / "row5" (max) // ( 2 kv) put = new Put(row0); @@ -5551,17 +5570,17 @@ public class TestHRegion { Put put = new Put(row1); put.add(kv1); region.put(put); - region.flushcache(); + region.flush(true); // storefile2 put = new Put(row2); put.add(kv2); region.put(put); - region.flushcache(); + region.flush(true); // storefile3 put = new Put(row3); put.add(kv3); region.put(put); - region.flushcache(); + region.flush(true); // memstore put = new Put(row4); put.add(kv4); @@ -5615,7 +5634,7 @@ public class TestHRegion { int splitRow = startRow + numRows; putData(splitRow, numRows, qualifier, families); int endRow = splitRow + numRows; - region.flushcache(); + region.flush(true); HRegion [] regions = null; try { @@ -5740,7 +5759,7 @@ public class TestHRegion { // create a file in fam1 for the region before opening in OpenRegionHandler region.put(new Put(Bytes.toBytes("a")).add(fam1, fam1, fam1)); - region.flushcache(); + region.flush(true); HBaseTestingUtility.closeRegionAndWAL(region); ArgumentCaptor editCaptor = ArgumentCaptor.forClass(WALEdit.class); @@ -5789,6 +5808,109 @@ public class TestHRegion { } } + // Helper for test testOpenRegionWrittenToWALForLogReplay + static class HRegionWithSeqId extends HRegion { + public HRegionWithSeqId(final Path tableDir, final WAL wal, final FileSystem fs, + final Configuration confParam, final HRegionInfo regionInfo, + final HTableDescriptor htd, final RegionServerServices rsServices) { + super(tableDir, wal, fs, confParam, regionInfo, htd, rsServices); + } + @Override + protected long getNextSequenceId(WAL wal) throws IOException { + return 42; + } + } + + @Test + @SuppressWarnings("unchecked") + public void testOpenRegionWrittenToWALForLogReplay() throws Exception { + // similar to the above test but with distributed log replay + final ServerName serverName = ServerName.valueOf("testOpenRegionWrittenToWALForLogReplay", + 100, 42); + final RegionServerServices rss = spy(TEST_UTIL.createMockRegionServerService(serverName)); + + HTableDescriptor htd + = new HTableDescriptor(TableName.valueOf("testOpenRegionWrittenToWALForLogReplay")); + htd.addFamily(new HColumnDescriptor(fam1)); + htd.addFamily(new HColumnDescriptor(fam2)); + + HRegionInfo hri = new HRegionInfo(htd.getTableName(), + HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY); + + // open the region w/o rss and wal and flush some files + HRegion region = + HBaseTestingUtility.createRegionAndWAL(hri, TEST_UTIL.getDataTestDir(), TEST_UTIL + .getConfiguration(), htd); + assertNotNull(region); + + // create a file in fam1 for the region before opening in OpenRegionHandler + region.put(new Put(Bytes.toBytes("a")).add(fam1, fam1, fam1)); + region.flush(true); + HBaseTestingUtility.closeRegionAndWAL(region); + + ArgumentCaptor editCaptor = ArgumentCaptor.forClass(WALEdit.class); + + // capture append() calls + WAL wal = mock(WAL.class); + when(rss.getWAL((HRegionInfo) any())).thenReturn(wal); + + // add the region to recovering regions + HashMap recoveringRegions = Maps.newHashMap(); + recoveringRegions.put(region.getRegionInfo().getEncodedName(), null); + when(rss.getRecoveringRegions()).thenReturn(recoveringRegions); + + try { + Configuration conf = new Configuration(TEST_UTIL.getConfiguration()); + conf.set(HConstants.REGION_IMPL, HRegionWithSeqId.class.getName()); + region = HRegion.openHRegion(hri, htd, rss.getWAL(hri), + conf, rss, null); + + // verify that we have not appended region open event to WAL because this region is still + // recovering + verify(wal, times(0)).append((HTableDescriptor)any(), (HRegionInfo)any(), (WALKey)any() + , editCaptor.capture(), (AtomicLong)any(), anyBoolean(), (List)any()); + + // not put the region out of recovering state + new FinishRegionRecoveringHandler(rss, region.getRegionInfo().getEncodedName(), "/foo") + .prepare().process(); + + // now we should have put the entry + verify(wal, times(1)).append((HTableDescriptor)any(), (HRegionInfo)any(), (WALKey)any() + , editCaptor.capture(), (AtomicLong)any(), anyBoolean(), (List)any()); + + WALEdit edit = editCaptor.getValue(); + assertNotNull(edit); + assertNotNull(edit.getCells()); + assertEquals(1, edit.getCells().size()); + RegionEventDescriptor desc = WALEdit.getRegionEventDescriptor(edit.getCells().get(0)); + assertNotNull(desc); + + LOG.info("RegionEventDescriptor from WAL: " + desc); + + assertEquals(RegionEventDescriptor.EventType.REGION_OPEN, desc.getEventType()); + assertTrue(Bytes.equals(desc.getTableName().toByteArray(), htd.getName())); + assertTrue(Bytes.equals(desc.getEncodedRegionName().toByteArray(), + hri.getEncodedNameAsBytes())); + assertTrue(desc.getLogSequenceNumber() > 0); + assertEquals(serverName, ProtobufUtil.toServerName(desc.getServer())); + assertEquals(2, desc.getStoresCount()); + + StoreDescriptor store = desc.getStores(0); + assertTrue(Bytes.equals(store.getFamilyName().toByteArray(), fam1)); + assertEquals(store.getStoreHomeDir(), Bytes.toString(fam1)); + assertEquals(1, store.getStoreFileCount()); // 1store file + assertFalse(store.getStoreFile(0).contains("/")); // ensure path is relative + + store = desc.getStores(1); + assertTrue(Bytes.equals(store.getFamilyName().toByteArray(), fam2)); + assertEquals(store.getStoreHomeDir(), Bytes.toString(fam2)); + assertEquals(0, store.getStoreFileCount()); // no store files + + } finally { + HBaseTestingUtility.closeRegionAndWAL(region); + } + } + @Test @SuppressWarnings("unchecked") public void testCloseRegionWrittenToWAL() throws Exception { @@ -5942,7 +6064,7 @@ public class TestHRegion { region.put(new Put(row).add(fam1, q4, now + 10000 - 1, HConstants.EMPTY_BYTE_ARRAY)); // Flush so we are sure store scanning gets this right - region.flushcache(); + region.flush(true); // A query at time T+0 should return all cells Result r = region.get(new Get(row)); @@ -6028,7 +6150,7 @@ public class TestHRegion { } } - private static HRegion initHRegion(byte[] tableName, String callingMethod, + static HRegion initHRegion(byte[] tableName, String callingMethod, byte[]... families) throws IOException { return initHRegion(tableName, callingMethod, HBaseConfiguration.create(), families); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java new file mode 100644 index 00000000000..1ced6278bfd --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java @@ -0,0 +1,1576 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.regionserver; + +import static org.junit.Assert.*; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyBoolean; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; +import static org.apache.hadoop.hbase.regionserver.TestHRegion.*; + +import java.io.File; +import java.io.FileNotFoundException; +import java.io.FileOutputStream; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.UUID; +import java.util.concurrent.atomic.AtomicLong; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Durability; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.io.hfile.HFile; +import org.apache.hadoop.hbase.io.hfile.HFileContext; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType; +import org.apache.hadoop.hbase.protobuf.generated.WALProtos.BulkLoadDescriptor; +import org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor; +import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor; +import org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor; +import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.FlushAction; +import org.apache.hadoop.hbase.protobuf.generated.WALProtos.StoreDescriptor; +import org.apache.hadoop.hbase.regionserver.HRegion.FlushResultImpl; +import org.apache.hadoop.hbase.regionserver.HRegion.PrepareFlushResult; +import org.apache.hadoop.hbase.regionserver.compactions.NoLimitCompactionThroughputController; +import org.apache.hadoop.hbase.regionserver.wal.WALEdit; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper; +import org.apache.hadoop.hbase.util.FSUtils; +import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.hbase.wal.DefaultWALProvider; +import org.apache.hadoop.hbase.wal.WAL; +import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALKey; +import org.apache.hadoop.hbase.wal.WALSplitter.MutationReplay; +import org.apache.hadoop.util.StringUtils; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.TemporaryFolder; +import org.junit.rules.TestName; + +import com.google.common.collect.Lists; +import com.google.protobuf.ByteString; + +/** + * Tests of HRegion methods for replaying flush, compaction, region open, etc events for secondary + * region replicas + */ +@Category(MediumTests.class) +public class TestHRegionReplayEvents { + + static final Log LOG = LogFactory.getLog(TestHRegion.class); + @Rule public TestName name = new TestName(); + + private static HBaseTestingUtility TEST_UTIL; + + public static Configuration CONF ; + private String dir; + private static FileSystem FILESYSTEM; + + private byte[][] families = new byte[][] { + Bytes.toBytes("cf1"), Bytes.toBytes("cf2"), Bytes.toBytes("cf3")}; + + // Test names + protected byte[] tableName; + protected String method; + protected final byte[] row = Bytes.toBytes("rowA"); + protected final byte[] row2 = Bytes.toBytes("rowB"); + protected byte[] cq = Bytes.toBytes("cq"); + + // per test fields + private Path rootDir; + private HTableDescriptor htd; + private long time; + private RegionServerServices rss; + private HRegionInfo primaryHri, secondaryHri; + private HRegion primaryRegion, secondaryRegion; + private WALFactory wals; + private WAL walPrimary, walSecondary; + private WAL.Reader reader; + + @Before + public void setup() throws IOException { + TEST_UTIL = HBaseTestingUtility.createLocalHTU(); + FILESYSTEM = TEST_UTIL.getTestFileSystem(); + CONF = TEST_UTIL.getConfiguration(); + dir = TEST_UTIL.getDataTestDir("TestHRegionReplayEvents").toString(); + method = name.getMethodName(); + tableName = Bytes.toBytes(name.getMethodName()); + rootDir = new Path(dir + method); + TEST_UTIL.getConfiguration().set(HConstants.HBASE_DIR, rootDir.toString()); + method = name.getMethodName(); + + htd = new HTableDescriptor(TableName.valueOf(method)); + for (byte[] family : families) { + htd.addFamily(new HColumnDescriptor(family)); + } + + time = System.currentTimeMillis(); + + primaryHri = new HRegionInfo(htd.getTableName(), + HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, + false, time, 0); + secondaryHri = new HRegionInfo(htd.getTableName(), + HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, + false, time, 1); + + wals = TestHRegion.createWALFactory(CONF, rootDir); + walPrimary = wals.getWAL(primaryHri.getEncodedNameAsBytes()); + walSecondary = wals.getWAL(secondaryHri.getEncodedNameAsBytes()); + + rss = mock(RegionServerServices.class); + when(rss.getServerName()).thenReturn(ServerName.valueOf("foo", 1, 1)); + when(rss.getConfiguration()).thenReturn(CONF); + when(rss.getRegionServerAccounting()).thenReturn(new RegionServerAccounting()); + + primaryRegion = HRegion.createHRegion(primaryHri, rootDir, CONF, htd, walPrimary); + primaryRegion.close(); + + primaryRegion = HRegion.openHRegion(rootDir, primaryHri, htd, walPrimary, CONF, rss, null); + secondaryRegion = HRegion.openHRegion(secondaryHri, htd, null, CONF, rss, null); + + reader = null; + } + + @After + public void tearDown() throws Exception { + if (reader != null) { + reader.close(); + } + + if (primaryRegion != null) { + HBaseTestingUtility.closeRegionAndWAL(primaryRegion); + } + if (secondaryRegion != null) { + HBaseTestingUtility.closeRegionAndWAL(secondaryRegion); + } + + EnvironmentEdgeManagerTestHelper.reset(); + LOG.info("Cleaning test directory: " + TEST_UTIL.getDataTestDir()); + TEST_UTIL.cleanupTestDir(); + } + + String getName() { + return name.getMethodName(); + } + + // Some of the test cases are as follows: + // 1. replay flush start marker again + // 2. replay flush with smaller seqId than what is there in memstore snapshot + // 3. replay flush with larger seqId than what is there in memstore snapshot + // 4. replay flush commit without flush prepare. non droppable memstore + // 5. replay flush commit without flush prepare. droppable memstore + // 6. replay open region event + // 7. replay open region event after flush start + // 8. replay flush form an earlier seqId (test ignoring seqIds) + // 9. start flush does not prevent region from closing. + + @Test + public void testRegionReplicaSecondaryCannotFlush() throws IOException { + // load some data and flush ensure that the secondary replica will not execute the flush + + // load some data to secondary by replaying + putDataByReplay(secondaryRegion, 0, 1000, cq, families); + + verifyData(secondaryRegion, 0, 1000, cq, families); + + // flush region + FlushResultImpl flush = (FlushResultImpl)secondaryRegion.flush(true); + assertEquals(flush.result, FlushResultImpl.Result.CANNOT_FLUSH); + + verifyData(secondaryRegion, 0, 1000, cq, families); + + // close the region, and inspect that it has not flushed + Map> files = secondaryRegion.close(false); + // assert that there are no files (due to flush) + for (List f : files.values()) { + assertTrue(f.isEmpty()); + } + } + + /** + * Tests a case where we replay only a flush start marker, then the region is closed. This region + * should not block indefinitely + */ + @Test (timeout = 60000) + public void testOnlyReplayingFlushStartDoesNotHoldUpRegionClose() throws IOException { + // load some data to primary and flush + int start = 0; + LOG.info("-- Writing some data to primary from " + start + " to " + (start+100)); + putData(primaryRegion, Durability.SYNC_WAL, start, 100, cq, families); + LOG.info("-- Flushing primary, creating 3 files for 3 stores"); + primaryRegion.flush(true); + + // now replay the edits and the flush marker + reader = createWALReaderForPrimary(); + + LOG.info("-- Replaying edits and flush events in secondary"); + while (true) { + WAL.Entry entry = reader.next(); + if (entry == null) { + break; + } + FlushDescriptor flushDesc + = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0)); + if (flushDesc != null) { + if (flushDesc.getAction() == FlushAction.START_FLUSH) { + LOG.info("-- Replaying flush start in secondary"); + PrepareFlushResult result = secondaryRegion.replayWALFlushStartMarker(flushDesc); + } else if (flushDesc.getAction() == FlushAction.COMMIT_FLUSH) { + LOG.info("-- NOT Replaying flush commit in secondary"); + } + } else { + replayEdit(secondaryRegion, entry); + } + } + + assertTrue(rss.getRegionServerAccounting().getGlobalMemstoreSize() > 0); + // now close the region which should not cause hold because of un-committed flush + secondaryRegion.close(); + + // verify that the memstore size is back to what it was + assertEquals(0, rss.getRegionServerAccounting().getGlobalMemstoreSize()); + } + + static int replayEdit(HRegion region, WAL.Entry entry) throws IOException { + if (WALEdit.isMetaEditFamily(entry.getEdit().getCells().get(0))) { + return 0; // handled elsewhere + } + Put put = new Put(entry.getEdit().getCells().get(0).getRow()); + for (Cell cell : entry.getEdit().getCells()) put.add(cell); + put.setDurability(Durability.SKIP_WAL); + MutationReplay mutation = new MutationReplay(MutationType.PUT, put, 0, 0); + region.batchReplay(new MutationReplay[] {mutation}, + entry.getKey().getLogSeqNum()); + return Integer.parseInt(Bytes.toString(put.getRow())); + } + + WAL.Reader createWALReaderForPrimary() throws FileNotFoundException, IOException { + return wals.createReader(TEST_UTIL.getTestFileSystem(), + DefaultWALProvider.getCurrentFileName(walPrimary), + TEST_UTIL.getConfiguration()); + } + + @Test + public void testReplayFlushesAndCompactions() throws IOException { + // initiate a secondary region with some data. + + // load some data to primary and flush. 3 flushes and some more unflushed data + putDataWithFlushes(primaryRegion, 100, 300, 100); + + // compaction from primary + LOG.info("-- Compacting primary, only 1 store"); + primaryRegion.compactStore(Bytes.toBytes("cf1"), + NoLimitCompactionThroughputController.INSTANCE); + + // now replay the edits and the flush marker + reader = createWALReaderForPrimary(); + + LOG.info("-- Replaying edits and flush events in secondary"); + int lastReplayed = 0; + int expectedStoreFileCount = 0; + while (true) { + WAL.Entry entry = reader.next(); + if (entry == null) { + break; + } + FlushDescriptor flushDesc + = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0)); + CompactionDescriptor compactionDesc + = WALEdit.getCompaction(entry.getEdit().getCells().get(0)); + if (flushDesc != null) { + // first verify that everything is replayed and visible before flush event replay + verifyData(secondaryRegion, 0, lastReplayed, cq, families); + Store store = secondaryRegion.getStore(Bytes.toBytes("cf1")); + long storeMemstoreSize = store.getMemStoreSize(); + long regionMemstoreSize = secondaryRegion.getMemstoreSize(); + long storeFlushableSize = store.getFlushableSize(); + long storeSize = store.getSize(); + long storeSizeUncompressed = store.getStoreSizeUncompressed(); + if (flushDesc.getAction() == FlushAction.START_FLUSH) { + LOG.info("-- Replaying flush start in secondary"); + PrepareFlushResult result = secondaryRegion.replayWALFlushStartMarker(flushDesc); + assertNull(result.result); + assertEquals(result.flushOpSeqId, flushDesc.getFlushSequenceNumber()); + + // assert that the store memstore is smaller now + long newStoreMemstoreSize = store.getMemStoreSize(); + LOG.info("Memstore size reduced by:" + + StringUtils.humanReadableInt(newStoreMemstoreSize - storeMemstoreSize)); + assertTrue(storeMemstoreSize > newStoreMemstoreSize); + + } else if (flushDesc.getAction() == FlushAction.COMMIT_FLUSH) { + LOG.info("-- Replaying flush commit in secondary"); + secondaryRegion.replayWALFlushCommitMarker(flushDesc); + + // assert that the flush files are picked + expectedStoreFileCount++; + for (Store s : secondaryRegion.getStores()) { + assertEquals(expectedStoreFileCount, s.getStorefilesCount()); + } + long newFlushableSize = store.getFlushableSize(); + assertTrue(storeFlushableSize > newFlushableSize); + + // assert that the region memstore is smaller now + long newRegionMemstoreSize = secondaryRegion.getMemstoreSize(); + assertTrue(regionMemstoreSize > newRegionMemstoreSize); + + // assert that the store sizes are bigger + assertTrue(store.getSize() > storeSize); + assertTrue(store.getStoreSizeUncompressed() > storeSizeUncompressed); + assertEquals(store.getSize(), store.getStorefilesSize()); + } + // after replay verify that everything is still visible + verifyData(secondaryRegion, 0, lastReplayed+1, cq, families); + } else if (compactionDesc != null) { + secondaryRegion.replayWALCompactionMarker(compactionDesc, true, false, Long.MAX_VALUE); + + // assert that the compaction is applied + for (Store store : secondaryRegion.getStores()) { + if (store.getColumnFamilyName().equals("cf1")) { + assertEquals(1, store.getStorefilesCount()); + } else { + assertEquals(expectedStoreFileCount, store.getStorefilesCount()); + } + } + } else { + lastReplayed = replayEdit(secondaryRegion, entry);; + } + } + + assertEquals(400-1, lastReplayed); + LOG.info("-- Verifying edits from secondary"); + verifyData(secondaryRegion, 0, 400, cq, families); + + LOG.info("-- Verifying edits from primary. Ensuring that files are not deleted"); + verifyData(primaryRegion, 0, lastReplayed, cq, families); + for (Store store : primaryRegion.getStores()) { + if (store.getColumnFamilyName().equals("cf1")) { + assertEquals(1, store.getStorefilesCount()); + } else { + assertEquals(expectedStoreFileCount, store.getStorefilesCount()); + } + } + } + + /** + * Tests cases where we prepare a flush with some seqId and we receive other flush start markers + * equal to, greater or less than the previous flush start marker. + */ + @Test + public void testReplayFlushStartMarkers() throws IOException { + // load some data to primary and flush. 1 flush and some more unflushed data + putDataWithFlushes(primaryRegion, 100, 100, 100); + int numRows = 200; + + // now replay the edits and the flush marker + reader = createWALReaderForPrimary(); + + LOG.info("-- Replaying edits and flush events in secondary"); + + FlushDescriptor startFlushDesc = null; + + int lastReplayed = 0; + while (true) { + WAL.Entry entry = reader.next(); + if (entry == null) { + break; + } + FlushDescriptor flushDesc + = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0)); + if (flushDesc != null) { + // first verify that everything is replayed and visible before flush event replay + Store store = secondaryRegion.getStore(Bytes.toBytes("cf1")); + long storeMemstoreSize = store.getMemStoreSize(); + long regionMemstoreSize = secondaryRegion.getMemstoreSize(); + long storeFlushableSize = store.getFlushableSize(); + + if (flushDesc.getAction() == FlushAction.START_FLUSH) { + startFlushDesc = flushDesc; + LOG.info("-- Replaying flush start in secondary"); + PrepareFlushResult result = secondaryRegion.replayWALFlushStartMarker(startFlushDesc); + assertNull(result.result); + assertEquals(result.flushOpSeqId, startFlushDesc.getFlushSequenceNumber()); + assertTrue(regionMemstoreSize > 0); + assertTrue(storeFlushableSize > 0); + + // assert that the store memstore is smaller now + long newStoreMemstoreSize = store.getMemStoreSize(); + LOG.info("Memstore size reduced by:" + + StringUtils.humanReadableInt(newStoreMemstoreSize - storeMemstoreSize)); + assertTrue(storeMemstoreSize > newStoreMemstoreSize); + verifyData(secondaryRegion, 0, lastReplayed+1, cq, families); + + } + // after replay verify that everything is still visible + verifyData(secondaryRegion, 0, lastReplayed+1, cq, families); + } else { + lastReplayed = replayEdit(secondaryRegion, entry); + } + } + + // at this point, there should be some data (rows 0-100) in memstore snapshot + // and some more data in memstores (rows 100-200) + + verifyData(secondaryRegion, 0, numRows, cq, families); + + // Test case 1: replay the same flush start marker again + LOG.info("-- Replaying same flush start in secondary again"); + PrepareFlushResult result = secondaryRegion.replayWALFlushStartMarker(startFlushDesc); + assertNull(result); // this should return null. Ignoring the flush start marker + // assert that we still have prepared flush with the previous setup. + assertNotNull(secondaryRegion.getPrepareFlushResult()); + assertEquals(secondaryRegion.getPrepareFlushResult().flushOpSeqId, + startFlushDesc.getFlushSequenceNumber()); + assertTrue(secondaryRegion.getMemstoreSize() > 0); // memstore is not empty + verifyData(secondaryRegion, 0, numRows, cq, families); + + // Test case 2: replay a flush start marker with a smaller seqId + FlushDescriptor startFlushDescSmallerSeqId + = clone(startFlushDesc, startFlushDesc.getFlushSequenceNumber() - 50); + LOG.info("-- Replaying same flush start in secondary again " + startFlushDescSmallerSeqId); + result = secondaryRegion.replayWALFlushStartMarker(startFlushDescSmallerSeqId); + assertNull(result); // this should return null. Ignoring the flush start marker + // assert that we still have prepared flush with the previous setup. + assertNotNull(secondaryRegion.getPrepareFlushResult()); + assertEquals(secondaryRegion.getPrepareFlushResult().flushOpSeqId, + startFlushDesc.getFlushSequenceNumber()); + assertTrue(secondaryRegion.getMemstoreSize() > 0); // memstore is not empty + verifyData(secondaryRegion, 0, numRows, cq, families); + + // Test case 3: replay a flush start marker with a larger seqId + FlushDescriptor startFlushDescLargerSeqId + = clone(startFlushDesc, startFlushDesc.getFlushSequenceNumber() + 50); + LOG.info("-- Replaying same flush start in secondary again " + startFlushDescLargerSeqId); + result = secondaryRegion.replayWALFlushStartMarker(startFlushDescLargerSeqId); + assertNull(result); // this should return null. Ignoring the flush start marker + // assert that we still have prepared flush with the previous setup. + assertNotNull(secondaryRegion.getPrepareFlushResult()); + assertEquals(secondaryRegion.getPrepareFlushResult().flushOpSeqId, + startFlushDesc.getFlushSequenceNumber()); + assertTrue(secondaryRegion.getMemstoreSize() > 0); // memstore is not empty + verifyData(secondaryRegion, 0, numRows, cq, families); + + LOG.info("-- Verifying edits from secondary"); + verifyData(secondaryRegion, 0, numRows, cq, families); + + LOG.info("-- Verifying edits from primary."); + verifyData(primaryRegion, 0, numRows, cq, families); + } + + /** + * Tests the case where we prepare a flush with some seqId and we receive a flush commit marker + * less than the previous flush start marker. + */ + @Test + public void testReplayFlushCommitMarkerSmallerThanFlushStartMarker() throws IOException { + // load some data to primary and flush. 2 flushes and some more unflushed data + putDataWithFlushes(primaryRegion, 100, 200, 100); + int numRows = 300; + + // now replay the edits and the flush marker + reader = createWALReaderForPrimary(); + + LOG.info("-- Replaying edits and flush events in secondary"); + FlushDescriptor startFlushDesc = null; + FlushDescriptor commitFlushDesc = null; + + int lastReplayed = 0; + while (true) { + System.out.println(lastReplayed); + WAL.Entry entry = reader.next(); + if (entry == null) { + break; + } + FlushDescriptor flushDesc + = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0)); + if (flushDesc != null) { + if (flushDesc.getAction() == FlushAction.START_FLUSH) { + // don't replay the first flush start marker, hold on to it, replay the second one + if (startFlushDesc == null) { + startFlushDesc = flushDesc; + } else { + LOG.info("-- Replaying flush start in secondary"); + startFlushDesc = flushDesc; + PrepareFlushResult result = secondaryRegion.replayWALFlushStartMarker(startFlushDesc); + assertNull(result.result); + } + } else if (flushDesc.getAction() == FlushAction.COMMIT_FLUSH) { + // do not replay any flush commit yet + if (commitFlushDesc == null) { + commitFlushDesc = flushDesc; // hold on to the first flush commit marker + } + } + // after replay verify that everything is still visible + verifyData(secondaryRegion, 0, lastReplayed+1, cq, families); + } else { + lastReplayed = replayEdit(secondaryRegion, entry); + } + } + + // at this point, there should be some data (rows 0-200) in memstore snapshot + // and some more data in memstores (rows 200-300) + verifyData(secondaryRegion, 0, numRows, cq, families); + + // no store files in the region + int expectedStoreFileCount = 0; + for (Store s : secondaryRegion.getStores()) { + assertEquals(expectedStoreFileCount, s.getStorefilesCount()); + } + long regionMemstoreSize = secondaryRegion.getMemstoreSize(); + + // Test case 1: replay the a flush commit marker smaller than what we have prepared + LOG.info("Testing replaying flush COMMIT " + commitFlushDesc + " on top of flush START" + + startFlushDesc); + assertTrue(commitFlushDesc.getFlushSequenceNumber() < startFlushDesc.getFlushSequenceNumber()); + + LOG.info("-- Replaying flush commit in secondary" + commitFlushDesc); + secondaryRegion.replayWALFlushCommitMarker(commitFlushDesc); + + // assert that the flush files are picked + expectedStoreFileCount++; + for (Store s : secondaryRegion.getStores()) { + assertEquals(expectedStoreFileCount, s.getStorefilesCount()); + } + Store store = secondaryRegion.getStore(Bytes.toBytes("cf1")); + long newFlushableSize = store.getFlushableSize(); + assertTrue(newFlushableSize > 0); // assert that the memstore is not dropped + + // assert that the region memstore is same as before + long newRegionMemstoreSize = secondaryRegion.getMemstoreSize(); + assertEquals(regionMemstoreSize, newRegionMemstoreSize); + + assertNotNull(secondaryRegion.getPrepareFlushResult()); // not dropped + + LOG.info("-- Verifying edits from secondary"); + verifyData(secondaryRegion, 0, numRows, cq, families); + + LOG.info("-- Verifying edits from primary."); + verifyData(primaryRegion, 0, numRows, cq, families); + } + + /** + * Tests the case where we prepare a flush with some seqId and we receive a flush commit marker + * larger than the previous flush start marker. + */ + @Test + public void testReplayFlushCommitMarkerLargerThanFlushStartMarker() throws IOException { + // load some data to primary and flush. 1 flush and some more unflushed data + putDataWithFlushes(primaryRegion, 100, 100, 100); + int numRows = 200; + + // now replay the edits and the flush marker + reader = createWALReaderForPrimary(); + + LOG.info("-- Replaying edits and flush events in secondary"); + FlushDescriptor startFlushDesc = null; + FlushDescriptor commitFlushDesc = null; + + int lastReplayed = 0; + while (true) { + WAL.Entry entry = reader.next(); + if (entry == null) { + break; + } + FlushDescriptor flushDesc + = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0)); + if (flushDesc != null) { + if (flushDesc.getAction() == FlushAction.START_FLUSH) { + if (startFlushDesc == null) { + LOG.info("-- Replaying flush start in secondary"); + startFlushDesc = flushDesc; + PrepareFlushResult result = secondaryRegion.replayWALFlushStartMarker(startFlushDesc); + assertNull(result.result); + } + } else if (flushDesc.getAction() == FlushAction.COMMIT_FLUSH) { + // do not replay any flush commit yet + // hold on to the flush commit marker but simulate a larger + // flush commit seqId + commitFlushDesc = + FlushDescriptor.newBuilder(flushDesc) + .setFlushSequenceNumber(flushDesc.getFlushSequenceNumber() + 50) + .build(); + } + // after replay verify that everything is still visible + verifyData(secondaryRegion, 0, lastReplayed+1, cq, families); + } else { + lastReplayed = replayEdit(secondaryRegion, entry); + } + } + + // at this point, there should be some data (rows 0-100) in memstore snapshot + // and some more data in memstores (rows 100-200) + verifyData(secondaryRegion, 0, numRows, cq, families); + + // no store files in the region + int expectedStoreFileCount = 0; + for (Store s : secondaryRegion.getStores()) { + assertEquals(expectedStoreFileCount, s.getStorefilesCount()); + } + long regionMemstoreSize = secondaryRegion.getMemstoreSize(); + + // Test case 1: replay the a flush commit marker larger than what we have prepared + LOG.info("Testing replaying flush COMMIT " + commitFlushDesc + " on top of flush START" + + startFlushDesc); + assertTrue(commitFlushDesc.getFlushSequenceNumber() > startFlushDesc.getFlushSequenceNumber()); + + LOG.info("-- Replaying flush commit in secondary" + commitFlushDesc); + secondaryRegion.replayWALFlushCommitMarker(commitFlushDesc); + + // assert that the flush files are picked + expectedStoreFileCount++; + for (Store s : secondaryRegion.getStores()) { + assertEquals(expectedStoreFileCount, s.getStorefilesCount()); + } + Store store = secondaryRegion.getStore(Bytes.toBytes("cf1")); + long newFlushableSize = store.getFlushableSize(); + assertTrue(newFlushableSize > 0); // assert that the memstore is not dropped + + // assert that the region memstore is smaller than before, but not empty + long newRegionMemstoreSize = secondaryRegion.getMemstoreSize(); + assertTrue(newRegionMemstoreSize > 0); + assertTrue(regionMemstoreSize > newRegionMemstoreSize); + + assertNull(secondaryRegion.getPrepareFlushResult()); // prepare snapshot should be dropped + + LOG.info("-- Verifying edits from secondary"); + verifyData(secondaryRegion, 0, numRows, cq, families); + + LOG.info("-- Verifying edits from primary."); + verifyData(primaryRegion, 0, numRows, cq, families); + } + + /** + * Tests the case where we receive a flush commit before receiving any flush prepare markers. + * The memstore edits should be dropped after the flush commit replay since they should be in + * flushed files + */ + @Test + public void testReplayFlushCommitMarkerWithoutFlushStartMarkerDroppableMemstore() + throws IOException { + testReplayFlushCommitMarkerWithoutFlushStartMarker(true); + } + + /** + * Tests the case where we receive a flush commit before receiving any flush prepare markers. + * The memstore edits should be not dropped after the flush commit replay since not every edit + * will be in flushed files (based on seqId) + */ + @Test + public void testReplayFlushCommitMarkerWithoutFlushStartMarkerNonDroppableMemstore() + throws IOException { + testReplayFlushCommitMarkerWithoutFlushStartMarker(false); + } + + /** + * Tests the case where we receive a flush commit before receiving any flush prepare markers + */ + public void testReplayFlushCommitMarkerWithoutFlushStartMarker(boolean droppableMemstore) + throws IOException { + // load some data to primary and flush. 1 flushes and some more unflushed data. + // write more data after flush depending on whether droppableSnapshot + putDataWithFlushes(primaryRegion, 100, 100, droppableMemstore ? 0 : 100); + int numRows = droppableMemstore ? 100 : 200; + + // now replay the edits and the flush marker + reader = createWALReaderForPrimary(); + + LOG.info("-- Replaying edits and flush events in secondary"); + FlushDescriptor commitFlushDesc = null; + + int lastReplayed = 0; + while (true) { + WAL.Entry entry = reader.next(); + if (entry == null) { + break; + } + FlushDescriptor flushDesc + = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0)); + if (flushDesc != null) { + if (flushDesc.getAction() == FlushAction.START_FLUSH) { + // do not replay flush start marker + } else if (flushDesc.getAction() == FlushAction.COMMIT_FLUSH) { + commitFlushDesc = flushDesc; // hold on to the flush commit marker + } + // after replay verify that everything is still visible + verifyData(secondaryRegion, 0, lastReplayed+1, cq, families); + } else { + lastReplayed = replayEdit(secondaryRegion, entry); + } + } + + // at this point, there should be some data (rows 0-200) in the memstore without snapshot + // and some more data in memstores (rows 100-300) + verifyData(secondaryRegion, 0, numRows, cq, families); + + // no store files in the region + int expectedStoreFileCount = 0; + for (Store s : secondaryRegion.getStores()) { + assertEquals(expectedStoreFileCount, s.getStorefilesCount()); + } + long regionMemstoreSize = secondaryRegion.getMemstoreSize(); + + // Test case 1: replay a flush commit marker without start flush marker + assertNull(secondaryRegion.getPrepareFlushResult()); + assertTrue(commitFlushDesc.getFlushSequenceNumber() > 0); + + // ensure all files are visible in secondary + for (Store store : secondaryRegion.getStores()) { + assertTrue(store.getMaxSequenceId() <= secondaryRegion.getSequenceId().get()); + } + + LOG.info("-- Replaying flush commit in secondary" + commitFlushDesc); + secondaryRegion.replayWALFlushCommitMarker(commitFlushDesc); + + // assert that the flush files are picked + expectedStoreFileCount++; + for (Store s : secondaryRegion.getStores()) { + assertEquals(expectedStoreFileCount, s.getStorefilesCount()); + } + Store store = secondaryRegion.getStore(Bytes.toBytes("cf1")); + long newFlushableSize = store.getFlushableSize(); + if (droppableMemstore) { + assertTrue(newFlushableSize == 0); // assert that the memstore is dropped + } else { + assertTrue(newFlushableSize > 0); // assert that the memstore is not dropped + } + + // assert that the region memstore is same as before (we could not drop) + long newRegionMemstoreSize = secondaryRegion.getMemstoreSize(); + if (droppableMemstore) { + assertTrue(0 == newRegionMemstoreSize); + } else { + assertTrue(regionMemstoreSize == newRegionMemstoreSize); + } + + LOG.info("-- Verifying edits from secondary"); + verifyData(secondaryRegion, 0, numRows, cq, families); + + LOG.info("-- Verifying edits from primary."); + verifyData(primaryRegion, 0, numRows, cq, families); + } + + private FlushDescriptor clone(FlushDescriptor flush, long flushSeqId) { + return FlushDescriptor.newBuilder(flush) + .setFlushSequenceNumber(flushSeqId) + .build(); + } + + /** + * Tests replaying region open markers from primary region. Checks whether the files are picked up + */ + @Test + public void testReplayRegionOpenEvent() throws IOException { + putDataWithFlushes(primaryRegion, 100, 0, 100); // no flush + int numRows = 100; + + // close the region and open again. + primaryRegion.close(); + primaryRegion = HRegion.openHRegion(rootDir, primaryHri, htd, walPrimary, CONF, rss, null); + + // now replay the edits and the flush marker + reader = createWALReaderForPrimary(); + List regionEvents = Lists.newArrayList(); + + LOG.info("-- Replaying edits and region events in secondary"); + while (true) { + WAL.Entry entry = reader.next(); + if (entry == null) { + break; + } + FlushDescriptor flushDesc + = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0)); + RegionEventDescriptor regionEventDesc + = WALEdit.getRegionEventDescriptor(entry.getEdit().getCells().get(0)); + + if (flushDesc != null) { + // don't replay flush events + } else if (regionEventDesc != null) { + regionEvents.add(regionEventDesc); + } else { + // don't replay edits + } + } + + // we should have 1 open, 1 close and 1 open event + assertEquals(3, regionEvents.size()); + + // replay the first region open event. + secondaryRegion.replayWALRegionEventMarker(regionEvents.get(0)); + + // replay the close event as well + secondaryRegion.replayWALRegionEventMarker(regionEvents.get(1)); + + // no store files in the region + int expectedStoreFileCount = 0; + for (Store s : secondaryRegion.getStores()) { + assertEquals(expectedStoreFileCount, s.getStorefilesCount()); + } + long regionMemstoreSize = secondaryRegion.getMemstoreSize(); + assertTrue(regionMemstoreSize == 0); + + // now replay the region open event that should contain new file locations + LOG.info("Testing replaying region open event " + regionEvents.get(2)); + secondaryRegion.replayWALRegionEventMarker(regionEvents.get(2)); + + // assert that the flush files are picked + expectedStoreFileCount++; + for (Store s : secondaryRegion.getStores()) { + assertEquals(expectedStoreFileCount, s.getStorefilesCount()); + } + Store store = secondaryRegion.getStore(Bytes.toBytes("cf1")); + long newFlushableSize = store.getFlushableSize(); + assertTrue(newFlushableSize == 0); + + // assert that the region memstore is empty + long newRegionMemstoreSize = secondaryRegion.getMemstoreSize(); + assertTrue(newRegionMemstoreSize == 0); + + assertNull(secondaryRegion.getPrepareFlushResult()); //prepare snapshot should be dropped if any + + LOG.info("-- Verifying edits from secondary"); + verifyData(secondaryRegion, 0, numRows, cq, families); + + LOG.info("-- Verifying edits from primary."); + verifyData(primaryRegion, 0, numRows, cq, families); + } + + /** + * Tests the case where we replay a region open event after a flush start but before receiving + * flush commit + */ + @Test + public void testReplayRegionOpenEventAfterFlushStart() throws IOException { + putDataWithFlushes(primaryRegion, 100, 100, 100); + int numRows = 200; + + // close the region and open again. + primaryRegion.close(); + primaryRegion = HRegion.openHRegion(rootDir, primaryHri, htd, walPrimary, CONF, rss, null); + + // now replay the edits and the flush marker + reader = createWALReaderForPrimary(); + List regionEvents = Lists.newArrayList(); + + LOG.info("-- Replaying edits and region events in secondary"); + while (true) { + WAL.Entry entry = reader.next(); + if (entry == null) { + break; + } + FlushDescriptor flushDesc + = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0)); + RegionEventDescriptor regionEventDesc + = WALEdit.getRegionEventDescriptor(entry.getEdit().getCells().get(0)); + + if (flushDesc != null) { + // only replay flush start + if (flushDesc.getAction() == FlushAction.START_FLUSH) { + secondaryRegion.replayWALFlushStartMarker(flushDesc); + } + } else if (regionEventDesc != null) { + regionEvents.add(regionEventDesc); + } else { + replayEdit(secondaryRegion, entry); + } + } + + // at this point, there should be some data (rows 0-100) in the memstore snapshot + // and some more data in memstores (rows 100-200) + verifyData(secondaryRegion, 0, numRows, cq, families); + + // we should have 1 open, 1 close and 1 open event + assertEquals(3, regionEvents.size()); + + // no store files in the region + int expectedStoreFileCount = 0; + for (Store s : secondaryRegion.getStores()) { + assertEquals(expectedStoreFileCount, s.getStorefilesCount()); + } + + // now replay the region open event that should contain new file locations + LOG.info("Testing replaying region open event " + regionEvents.get(2)); + secondaryRegion.replayWALRegionEventMarker(regionEvents.get(2)); + + // assert that the flush files are picked + expectedStoreFileCount = 2; // two flushes happened + for (Store s : secondaryRegion.getStores()) { + assertEquals(expectedStoreFileCount, s.getStorefilesCount()); + } + Store store = secondaryRegion.getStore(Bytes.toBytes("cf1")); + long newSnapshotSize = store.getSnapshotSize(); + assertTrue(newSnapshotSize == 0); + + // assert that the region memstore is empty + long newRegionMemstoreSize = secondaryRegion.getMemstoreSize(); + assertTrue(newRegionMemstoreSize == 0); + + assertNull(secondaryRegion.getPrepareFlushResult()); //prepare snapshot should be dropped if any + + LOG.info("-- Verifying edits from secondary"); + verifyData(secondaryRegion, 0, numRows, cq, families); + + LOG.info("-- Verifying edits from primary."); + verifyData(primaryRegion, 0, numRows, cq, families); + } + + /** + * Tests whether edits coming in for replay are skipped which have smaller seq id than the seqId + * of the last replayed region open event. + */ + @Test + public void testSkippingEditsWithSmallerSeqIdAfterRegionOpenEvent() throws IOException { + putDataWithFlushes(primaryRegion, 100, 100, 0); + int numRows = 100; + + // close the region and open again. + primaryRegion.close(); + primaryRegion = HRegion.openHRegion(rootDir, primaryHri, htd, walPrimary, CONF, rss, null); + + // now replay the edits and the flush marker + reader = createWALReaderForPrimary(); + List regionEvents = Lists.newArrayList(); + List edits = Lists.newArrayList(); + + LOG.info("-- Replaying edits and region events in secondary"); + while (true) { + WAL.Entry entry = reader.next(); + if (entry == null) { + break; + } + FlushDescriptor flushDesc + = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0)); + RegionEventDescriptor regionEventDesc + = WALEdit.getRegionEventDescriptor(entry.getEdit().getCells().get(0)); + + if (flushDesc != null) { + // don't replay flushes + } else if (regionEventDesc != null) { + regionEvents.add(regionEventDesc); + } else { + edits.add(entry); + } + } + + // replay the region open of first open, but with the seqid of the second open + // this way non of the flush files will be picked up. + secondaryRegion.replayWALRegionEventMarker( + RegionEventDescriptor.newBuilder(regionEvents.get(0)).setLogSequenceNumber( + regionEvents.get(2).getLogSequenceNumber()).build()); + + + // replay edits from the before region close. If replay does not + // skip these the following verification will NOT fail. + for (WAL.Entry entry: edits) { + replayEdit(secondaryRegion, entry); + } + + boolean expectedFail = false; + try { + verifyData(secondaryRegion, 0, numRows, cq, families); + } catch (AssertionError e) { + expectedFail = true; // expected + } + if (!expectedFail) { + fail("Should have failed this verification"); + } + } + + @Test + public void testReplayFlushSeqIds() throws IOException { + // load some data to primary and flush + int start = 0; + LOG.info("-- Writing some data to primary from " + start + " to " + (start+100)); + putData(primaryRegion, Durability.SYNC_WAL, start, 100, cq, families); + LOG.info("-- Flushing primary, creating 3 files for 3 stores"); + primaryRegion.flush(true); + + // now replay the flush marker + reader = createWALReaderForPrimary(); + + long flushSeqId = -1; + LOG.info("-- Replaying flush events in secondary"); + while (true) { + WAL.Entry entry = reader.next(); + if (entry == null) { + break; + } + FlushDescriptor flushDesc + = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0)); + if (flushDesc != null) { + if (flushDesc.getAction() == FlushAction.START_FLUSH) { + LOG.info("-- Replaying flush start in secondary"); + secondaryRegion.replayWALFlushStartMarker(flushDesc); + flushSeqId = flushDesc.getFlushSequenceNumber(); + } else if (flushDesc.getAction() == FlushAction.COMMIT_FLUSH) { + LOG.info("-- Replaying flush commit in secondary"); + secondaryRegion.replayWALFlushCommitMarker(flushDesc); + assertEquals(flushSeqId, flushDesc.getFlushSequenceNumber()); + } + } + // else do not replay + } + + // TODO: what to do with this? + // assert that the newly picked up flush file is visible + long readPoint = secondaryRegion.getMVCC().memstoreReadPoint(); + assertEquals(flushSeqId, readPoint); + + // after replay verify that everything is still visible + verifyData(secondaryRegion, 0, 100, cq, families); + } + + @Test + public void testSeqIdsFromReplay() throws IOException { + // test the case where seqId's coming from replayed WALEdits are made persisted with their + // original seqIds and they are made visible through mvcc read point upon replay + String method = name.getMethodName(); + byte[] tableName = Bytes.toBytes(method); + byte[] family = Bytes.toBytes("family"); + + HRegion region = initHRegion(tableName, method, family); + try { + // replay an entry that is bigger than current read point + long readPoint = region.getMVCC().memstoreReadPoint(); + long origSeqId = readPoint + 100; + + Put put = new Put(row).add(family, row, row); + put.setDurability(Durability.SKIP_WAL); // we replay with skip wal + replay(region, put, origSeqId); + + // read point should have advanced to this seqId + assertGet(region, family, row); + + // region seqId should have advanced at least to this seqId + assertEquals(origSeqId, region.getSequenceId().get()); + + // replay an entry that is smaller than current read point + // caution: adding an entry below current read point might cause partial dirty reads. Normal + // replay does not allow reads while replay is going on. + put = new Put(row2).add(family, row2, row2); + put.setDurability(Durability.SKIP_WAL); + replay(region, put, origSeqId - 50); + + assertGet(region, family, row2); + } finally { + region.close(); + } + } + + /** + * Tests that a region opened in secondary mode would not write region open / close + * events to its WAL. + * @throws IOException + */ + @SuppressWarnings("unchecked") + @Test + public void testSecondaryRegionDoesNotWriteRegionEventsToWAL() throws IOException { + secondaryRegion.close(); + walSecondary = spy(walSecondary); + + // test for region open and close + secondaryRegion = HRegion.openHRegion(secondaryHri, htd, walSecondary, CONF, rss, null); + verify(walSecondary, times(0)).append((HTableDescriptor)any(), (HRegionInfo)any(), + (WALKey)any(), (WALEdit)any(), (AtomicLong)any(), anyBoolean(), (List) any()); + + // test for replay prepare flush + putDataByReplay(secondaryRegion, 0, 10, cq, families); + secondaryRegion.replayWALFlushStartMarker(FlushDescriptor.newBuilder(). + setFlushSequenceNumber(10) + .setTableName(ByteString.copyFrom(primaryRegion.getTableDesc().getTableName().getName())) + .setAction(FlushAction.START_FLUSH) + .setEncodedRegionName( + ByteString.copyFrom(primaryRegion.getRegionInfo().getEncodedNameAsBytes())) + .setRegionName(ByteString.copyFrom(primaryRegion.getRegionInfo().getRegionName())) + .build()); + + verify(walSecondary, times(0)).append((HTableDescriptor)any(), (HRegionInfo)any(), + (WALKey)any(), (WALEdit)any(), (AtomicLong)any(), anyBoolean(), (List) any()); + + secondaryRegion.close(); + verify(walSecondary, times(0)).append((HTableDescriptor)any(), (HRegionInfo)any(), + (WALKey)any(), (WALEdit)any(), (AtomicLong)any(), anyBoolean(), (List) any()); + } + + /** + * Tests the reads enabled flag for the region. When unset all reads should be rejected + */ + @Test + public void testRegionReadsEnabledFlag() throws IOException { + + putDataByReplay(secondaryRegion, 0, 100, cq, families); + + verifyData(secondaryRegion, 0, 100, cq, families); + + // now disable reads + secondaryRegion.setReadsEnabled(false); + try { + verifyData(secondaryRegion, 0, 100, cq, families); + fail("Should have failed with IOException"); + } catch(IOException ex) { + // expected + } + + // verify that we can still replay data + putDataByReplay(secondaryRegion, 100, 100, cq, families); + + // now enable reads again + secondaryRegion.setReadsEnabled(true); + verifyData(secondaryRegion, 0, 200, cq, families); + } + + /** + * Tests the case where a request for flush cache is sent to the region, but region cannot flush. + * It should write the flush request marker instead. + */ + @Test + public void testWriteFlushRequestMarker() throws IOException { + // primary region is empty at this point. Request a flush with writeFlushRequestWalMarker=false + FlushResultImpl result = (FlushResultImpl)((HRegion)primaryRegion).flushcache(true, false); + assertNotNull(result); + assertEquals(result.result, FlushResultImpl.Result.CANNOT_FLUSH_MEMSTORE_EMPTY); + assertFalse(result.wroteFlushWalMarker); + + // request flush again, but this time with writeFlushRequestWalMarker = true + result = (FlushResultImpl)((HRegion)primaryRegion).flushcache(true, true); + assertNotNull(result); + assertEquals(result.result, FlushResultImpl.Result.CANNOT_FLUSH_MEMSTORE_EMPTY); + assertTrue(result.wroteFlushWalMarker); + + List flushes = Lists.newArrayList(); + reader = createWALReaderForPrimary(); + while (true) { + WAL.Entry entry = reader.next(); + if (entry == null) { + break; + } + FlushDescriptor flush = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0)); + if (flush != null) { + flushes.add(flush); + } + } + + assertEquals(1, flushes.size()); + assertNotNull(flushes.get(0)); + assertEquals(FlushDescriptor.FlushAction.CANNOT_FLUSH, flushes.get(0).getAction()); + } + + /** + * Test the case where the secondary region replica is not in reads enabled state because it is + * waiting for a flush or region open marker from primary region. Replaying CANNOT_FLUSH + * flush marker entry should restore the reads enabled status in the region and allow the reads + * to continue. + */ + @Test + public void testReplayingFlushRequestRestoresReadsEnabledState() throws IOException { + disableReads(secondaryRegion); + + // Test case 1: Test that replaying CANNOT_FLUSH request marker assuming this came from + // triggered flush restores readsEnabled + primaryRegion.flushcache(true, true); + reader = createWALReaderForPrimary(); + while (true) { + WAL.Entry entry = reader.next(); + if (entry == null) { + break; + } + FlushDescriptor flush = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0)); + if (flush != null) { + secondaryRegion.replayWALFlushMarker(flush, entry.getKey().getLogSeqNum()); + } + } + + // now reads should be enabled + secondaryRegion.get(new Get(Bytes.toBytes(0))); + } + + /** + * Test the case where the secondary region replica is not in reads enabled state because it is + * waiting for a flush or region open marker from primary region. Replaying flush start and commit + * entries should restore the reads enabled status in the region and allow the reads + * to continue. + */ + @Test + public void testReplayingFlushRestoresReadsEnabledState() throws IOException { + // Test case 2: Test that replaying FLUSH_START and FLUSH_COMMIT markers assuming these came + // from triggered flush restores readsEnabled + disableReads(secondaryRegion); + + // put some data in primary + putData(primaryRegion, Durability.SYNC_WAL, 0, 100, cq, families); + primaryRegion.flush(true); + + reader = createWALReaderForPrimary(); + while (true) { + WAL.Entry entry = reader.next(); + if (entry == null) { + break; + } + FlushDescriptor flush = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0)); + if (flush != null) { + secondaryRegion.replayWALFlushMarker(flush, entry.getKey().getLogSeqNum()); + } else { + replayEdit(secondaryRegion, entry); + } + } + + // now reads should be enabled + verifyData(secondaryRegion, 0, 100, cq, families); + } + + /** + * Test the case where the secondary region replica is not in reads enabled state because it is + * waiting for a flush or region open marker from primary region. Replaying flush start and commit + * entries should restore the reads enabled status in the region and allow the reads + * to continue. + */ + @Test + public void testReplayingFlushWithEmptyMemstoreRestoresReadsEnabledState() throws IOException { + // Test case 2: Test that replaying FLUSH_START and FLUSH_COMMIT markers assuming these came + // from triggered flush restores readsEnabled + disableReads(secondaryRegion); + + // put some data in primary + putData(primaryRegion, Durability.SYNC_WAL, 0, 100, cq, families); + primaryRegion.flush(true); + + reader = createWALReaderForPrimary(); + while (true) { + WAL.Entry entry = reader.next(); + if (entry == null) { + break; + } + FlushDescriptor flush = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0)); + if (flush != null) { + secondaryRegion.replayWALFlushMarker(flush, entry.getKey().getLogSeqNum()); + } + } + + // now reads should be enabled + verifyData(secondaryRegion, 0, 100, cq, families); + } + + /** + * Test the case where the secondary region replica is not in reads enabled state because it is + * waiting for a flush or region open marker from primary region. Replaying region open event + * entry from primary should restore the reads enabled status in the region and allow the reads + * to continue. + */ + @Test + public void testReplayingRegionOpenEventRestoresReadsEnabledState() throws IOException { + // Test case 3: Test that replaying region open event markers restores readsEnabled + disableReads(secondaryRegion); + + primaryRegion.close(); + primaryRegion = HRegion.openHRegion(rootDir, primaryHri, htd, walPrimary, CONF, rss, null); + + reader = createWALReaderForPrimary(); + while (true) { + WAL.Entry entry = reader.next(); + if (entry == null) { + break; + } + + RegionEventDescriptor regionEventDesc + = WALEdit.getRegionEventDescriptor(entry.getEdit().getCells().get(0)); + + if (regionEventDesc != null) { + secondaryRegion.replayWALRegionEventMarker(regionEventDesc); + } + } + + // now reads should be enabled + secondaryRegion.get(new Get(Bytes.toBytes(0))); + } + + @Test + public void testRefreshStoreFiles() throws IOException { + assertEquals(0, primaryRegion.getStoreFileList(families).size()); + assertEquals(0, secondaryRegion.getStoreFileList(families).size()); + + // Test case 1: refresh with an empty region + secondaryRegion.refreshStoreFiles(); + assertEquals(0, secondaryRegion.getStoreFileList(families).size()); + + // do one flush + putDataWithFlushes(primaryRegion, 100, 100, 0); + int numRows = 100; + + // refresh the store file list, and ensure that the files are picked up. + secondaryRegion.refreshStoreFiles(); + assertPathListsEqual(primaryRegion.getStoreFileList(families), + secondaryRegion.getStoreFileList(families)); + assertEquals(families.length, secondaryRegion.getStoreFileList(families).size()); + + LOG.info("-- Verifying edits from secondary"); + verifyData(secondaryRegion, 0, numRows, cq, families); + + // Test case 2: 3 some more flushes + putDataWithFlushes(primaryRegion, 100, 300, 0); + numRows = 300; + + // refresh the store file list, and ensure that the files are picked up. + secondaryRegion.refreshStoreFiles(); + assertPathListsEqual(primaryRegion.getStoreFileList(families), + secondaryRegion.getStoreFileList(families)); + assertEquals(families.length * 4, secondaryRegion.getStoreFileList(families).size()); + + LOG.info("-- Verifying edits from secondary"); + verifyData(secondaryRegion, 0, numRows, cq, families); + + if (FSUtils.WINDOWS) { + // compaction cannot move files while they are open in secondary on windows. Skip remaining. + return; + } + + // Test case 3: compact primary files + primaryRegion.compactStores(); + secondaryRegion.refreshStoreFiles(); + assertPathListsEqual(primaryRegion.getStoreFileList(families), + secondaryRegion.getStoreFileList(families)); + assertEquals(families.length, secondaryRegion.getStoreFileList(families).size()); + + LOG.info("-- Verifying edits from secondary"); + verifyData(secondaryRegion, 0, numRows, cq, families); + + LOG.info("-- Replaying edits in secondary"); + + // Test case 4: replay some edits, ensure that memstore is dropped. + assertTrue(secondaryRegion.getMemstoreSize() == 0); + putDataWithFlushes(primaryRegion, 400, 400, 0); + numRows = 400; + + reader = createWALReaderForPrimary(); + while (true) { + WAL.Entry entry = reader.next(); + if (entry == null) { + break; + } + FlushDescriptor flush = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0)); + if (flush != null) { + // do not replay flush + } else { + replayEdit(secondaryRegion, entry); + } + } + + assertTrue(secondaryRegion.getMemstoreSize() > 0); + + secondaryRegion.refreshStoreFiles(); + + assertTrue(secondaryRegion.getMemstoreSize() == 0); + + LOG.info("-- Verifying edits from primary"); + verifyData(primaryRegion, 0, numRows, cq, families); + LOG.info("-- Verifying edits from secondary"); + verifyData(secondaryRegion, 0, numRows, cq, families); + } + + /** + * Paths can be qualified or not. This does the assertion using String->Path conversion. + */ + private void assertPathListsEqual(List list1, List list2) { + List l1 = new ArrayList<>(list1.size()); + for (String path : list1) { + l1.add(Path.getPathWithoutSchemeAndAuthority(new Path(path))); + } + List l2 = new ArrayList<>(list2.size()); + for (String path : list2) { + l2.add(Path.getPathWithoutSchemeAndAuthority(new Path(path))); + } + assertEquals(l1, l2); + } + + private void disableReads(HRegion region) { + region.setReadsEnabled(false); + try { + verifyData(region, 0, 1, cq, families); + fail("Should have failed with IOException"); + } catch(IOException ex) { + // expected + } + } + + private void replay(HRegion region, Put put, long replaySeqId) throws IOException { + put.setDurability(Durability.SKIP_WAL); + MutationReplay mutation = new MutationReplay(MutationType.PUT, put, 0, 0); + region.batchReplay(new MutationReplay[] {mutation}, replaySeqId); + } + + /** + * Tests replaying region open markers from primary region. Checks whether the files are picked up + */ + @Test + public void testReplayBulkLoadEvent() throws IOException { + LOG.info("testReplayBulkLoadEvent starts"); + putDataWithFlushes(primaryRegion, 100, 0, 100); // no flush + + // close the region and open again. + primaryRegion.close(); + primaryRegion = HRegion.openHRegion(rootDir, primaryHri, htd, walPrimary, CONF, rss, null); + + // bulk load a file into primary region + Random random = new Random(); + byte[] randomValues = new byte[20]; + random.nextBytes(randomValues); + Path testPath = TEST_UTIL.getDataTestDirOnTestFS(); + + List> familyPaths = new ArrayList>(); + int expectedLoadFileCount = 0; + for (byte[] family : families) { + familyPaths.add(new Pair(family, createHFileForFamilies(testPath, family, + randomValues))); + expectedLoadFileCount++; + } + primaryRegion.bulkLoadHFiles(familyPaths, false, null); + + // now replay the edits and the bulk load marker + reader = createWALReaderForPrimary(); + + LOG.info("-- Replaying edits and region events in secondary"); + BulkLoadDescriptor bulkloadEvent = null; + while (true) { + WAL.Entry entry = reader.next(); + if (entry == null) { + break; + } + bulkloadEvent = WALEdit.getBulkLoadDescriptor(entry.getEdit().getCells().get(0)); + if (bulkloadEvent != null) { + break; + } + } + + // we should have 1 bulk load event + assertTrue(bulkloadEvent != null); + assertEquals(expectedLoadFileCount, bulkloadEvent.getStoresCount()); + + // replay the bulk load event + secondaryRegion.replayWALBulkLoadEventMarker(bulkloadEvent); + + + List storeFileName = new ArrayList(); + for (StoreDescriptor storeDesc : bulkloadEvent.getStoresList()) { + storeFileName.addAll(storeDesc.getStoreFileList()); + } + // assert that the bulk loaded files are picked + for (Store s : secondaryRegion.getStores()) { + for (StoreFile sf : s.getStorefiles()) { + storeFileName.remove(sf.getPath().getName()); + } + } + assertTrue("Found some store file isn't loaded:" + storeFileName, storeFileName.isEmpty()); + + LOG.info("-- Verifying edits from secondary"); + for (byte[] family : families) { + assertGet(secondaryRegion, family, randomValues); + } + } + + private String createHFileForFamilies(Path testPath, byte[] family, + byte[] valueBytes) throws IOException { + HFile.WriterFactory hFileFactory = HFile.getWriterFactoryNoCache(TEST_UTIL.getConfiguration()); + // TODO We need a way to do this without creating files + Path testFile = new Path(testPath, UUID.randomUUID().toString()); + FSDataOutputStream out = TEST_UTIL.getTestFileSystem().create(testFile); + try { + hFileFactory.withOutputStream(out); + hFileFactory.withFileContext(new HFileContext()); + HFile.Writer writer = hFileFactory.create(); + try { + writer.append(new KeyValue(CellUtil.createCell(valueBytes, family, valueBytes, 0l, + KeyValue.Type.Put.getCode(), valueBytes))); + } finally { + writer.close(); + } + } finally { + out.close(); + } + return testFile.toString(); + } + + /** Puts a total of numRows + numRowsAfterFlush records indexed with numeric row keys. Does + * a flush every flushInterval number of records. Then it puts numRowsAfterFlush number of + * more rows but does not execute flush after + * @throws IOException */ + private void putDataWithFlushes(HRegion region, int flushInterval, + int numRows, int numRowsAfterFlush) throws IOException { + int start = 0; + for (; start < numRows; start += flushInterval) { + LOG.info("-- Writing some data to primary from " + start + " to " + (start+flushInterval)); + putData(region, Durability.SYNC_WAL, start, flushInterval, cq, families); + LOG.info("-- Flushing primary, creating 3 files for 3 stores"); + region.flush(true); + } + LOG.info("-- Writing some more data to primary, not flushing"); + putData(region, Durability.SYNC_WAL, start, numRowsAfterFlush, cq, families); + } + + private void putDataByReplay(HRegion region, + int startRow, int numRows, byte[] qf, byte[]... families) throws IOException { + for (int i = startRow; i < startRow + numRows; i++) { + Put put = new Put(Bytes.toBytes("" + i)); + put.setDurability(Durability.SKIP_WAL); + for (byte[] family : families) { + put.add(family, qf, EnvironmentEdgeManager.currentTime(), null); + } + replay(region, put, i+1); + } + } + + private static HRegion initHRegion(byte[] tableName, + String callingMethod, byte[]... families) throws IOException { + return initHRegion(tableName, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, + callingMethod, TEST_UTIL.getConfiguration(), false, Durability.SYNC_WAL, null, families); + } + + private static HRegion initHRegion(byte[] tableName, byte[] startKey, byte[] stopKey, + String callingMethod, Configuration conf, boolean isReadOnly, Durability durability, + WAL wal, byte[]... families) throws IOException { + return TEST_UTIL.createLocalHRegion(tableName, startKey, stopKey, callingMethod, conf, + isReadOnly, durability, wal, families); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java index b96a6a5daf2..2965071331d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java @@ -415,12 +415,12 @@ public class TestHeapMemoryManager { } @Override - public void requestFlush(HRegion region, boolean forceFlushAllStores) { + public void requestFlush(Region region, boolean forceFlushAllStores) { this.listener.flushRequested(flushType, region); } @Override - public void requestDelayedFlush(HRegion region, long delay, boolean forceFlushAllStores) { + public void requestDelayedFlush(Region region, long delay, boolean forceFlushAllStores) { } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeepDeletes.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeepDeletes.java index 49ee7e96100..9286e0d5dda 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeepDeletes.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestKeepDeletes.java @@ -33,13 +33,13 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeepDeletedCells; -import org.apache.hadoop.hbase.testclassification.RegionServerTests; -import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.testclassification.RegionServerTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper; @@ -98,7 +98,7 @@ public class TestKeepDeletes { // keep 3 versions, rows do not expire HTableDescriptor htd = hbu.createTableDescriptor(name.getMethodName(), 0, 3, HConstants.FOREVER, KeepDeletedCells.TRUE); - HRegion region = hbu.createLocalHRegion(htd, null, null); + Region region = hbu.createLocalHRegion(htd, null, null); long ts = EnvironmentEdgeManager.currentTime(); Put p = new Put(T1, ts); @@ -130,15 +130,15 @@ public class TestKeepDeletes { checkResult(r, c0, c0, T2,T1); // flush - region.flushcache(); + region.flush(true); // yep, T2 still there, T1 gone r = region.get(g); checkResult(r, c0, c0, T2); // major compact - region.compactStores(true); - region.compactStores(true); + region.compact(true); + region.compact(true); // one delete marker left (the others did not // have older puts) @@ -169,9 +169,9 @@ public class TestKeepDeletes { r = region.get(g); assertTrue(r.isEmpty()); - region.flushcache(); - region.compactStores(true); - region.compactStores(true); + region.flush(true); + region.compact(true); + region.compact(true); // verify that the delete marker itself was collected region.put(p); @@ -195,7 +195,7 @@ public class TestKeepDeletes { // KEEP_DELETED_CELLS is NOT enabled HTableDescriptor htd = hbu.createTableDescriptor(name.getMethodName(), 0, 3, HConstants.FOREVER, KeepDeletedCells.FALSE); - HRegion region = hbu.createLocalHRegion(htd, null, null); + Region region = hbu.createLocalHRegion(htd, null, null); long ts = EnvironmentEdgeManager.currentTime(); Put p = new Put(T1, ts); @@ -215,8 +215,8 @@ public class TestKeepDeletes { scan.next(kvs); assertEquals(2, kvs.size()); - region.flushcache(); - region.compactStores(true); + region.flush(true); + region.compact(true); // after compaction they are gone // (note that this a test with a Store without @@ -240,7 +240,7 @@ public class TestKeepDeletes { // KEEP_DELETED_CELLS is NOT enabled HTableDescriptor htd = hbu.createTableDescriptor(name.getMethodName(), 0, 3, HConstants.FOREVER, KeepDeletedCells.FALSE); - HRegion region = hbu.createLocalHRegion(htd, null, null); + Region region = hbu.createLocalHRegion(htd, null, null); long ts = EnvironmentEdgeManager.currentTime(); Put p = new Put(T1, ts); @@ -263,14 +263,15 @@ public class TestKeepDeletes { s.setTimeRange(0L, ts+1); InternalScanner scanner = region.getScanner(s); List kvs = new ArrayList(); - while(scanner.next(kvs)); + while (scanner.next(kvs)) + ; assertTrue(kvs.isEmpty()); // flushing and minor compaction keep delete markers - region.flushcache(); - region.compactStores(); + region.flush(true); + region.compact(false); assertEquals(1, countDeleteMarkers(region)); - region.compactStores(true); + region.compact(true); // major compaction deleted it assertEquals(0, countDeleteMarkers(region)); @@ -284,7 +285,7 @@ public class TestKeepDeletes { public void testRawScanWithColumns() throws Exception { HTableDescriptor htd = hbu.createTableDescriptor(name.getMethodName(), 0, 3, HConstants.FOREVER, KeepDeletedCells.TRUE); - HRegion region = hbu.createLocalHRegion(htd, null, null); + Region region = hbu.createLocalHRegion(htd, null, null); Scan s = new Scan(); s.setRaw(true); @@ -308,7 +309,7 @@ public class TestKeepDeletes { public void testRawScan() throws Exception { HTableDescriptor htd = hbu.createTableDescriptor(name.getMethodName(), 0, 3, HConstants.FOREVER, KeepDeletedCells.TRUE); - HRegion region = hbu.createLocalHRegion(htd, null, null); + Region region = hbu.createLocalHRegion(htd, null, null); long ts = EnvironmentEdgeManager.currentTime(); Put p = new Put(T1, ts); @@ -398,7 +399,7 @@ public class TestKeepDeletes { public void testDeleteMarkerExpirationEmptyStore() throws Exception { HTableDescriptor htd = hbu.createTableDescriptor(name.getMethodName(), 0, 1, HConstants.FOREVER, KeepDeletedCells.TRUE); - HRegion region = hbu.createLocalHRegion(htd, null, null); + Region region = hbu.createLocalHRegion(htd, null, null); long ts = EnvironmentEdgeManager.currentTime(); @@ -422,13 +423,13 @@ public class TestKeepDeletes { assertEquals(4, countDeleteMarkers(region)); // neither flush nor minor compaction removes any marker - region.flushcache(); + region.flush(true); assertEquals(4, countDeleteMarkers(region)); - region.compactStores(false); + region.compact(false); assertEquals(4, countDeleteMarkers(region)); // major compaction removes all, since there are no puts they affect - region.compactStores(true); + region.compact(true); assertEquals(0, countDeleteMarkers(region)); HBaseTestingUtility.closeRegionAndWAL(region); @@ -441,7 +442,7 @@ public class TestKeepDeletes { public void testDeleteMarkerExpiration() throws Exception { HTableDescriptor htd = hbu.createTableDescriptor(name.getMethodName(), 0, 1, HConstants.FOREVER, KeepDeletedCells.TRUE); - HRegion region = hbu.createLocalHRegion(htd, null, null); + Region region = hbu.createLocalHRegion(htd, null, null); long ts = EnvironmentEdgeManager.currentTime(); @@ -474,9 +475,9 @@ public class TestKeepDeletes { // 1 family marker, 1 column marker, 2 version markers assertEquals(4, countDeleteMarkers(region)); - region.flushcache(); + region.flush(true); assertEquals(4, countDeleteMarkers(region)); - region.compactStores(false); + region.compact(false); assertEquals(4, countDeleteMarkers(region)); // another put will push out the earlier put... @@ -484,14 +485,14 @@ public class TestKeepDeletes { p.add(c0, c0, T1); region.put(p); - region.flushcache(); + region.flush(true); // no markers are collected, since there is an affected put - region.compactStores(true); + region.compact(true); assertEquals(4, countDeleteMarkers(region)); // the last collections collected the earlier put // so after this collection all markers - region.compactStores(true); + region.compact(true); assertEquals(0, countDeleteMarkers(region)); HBaseTestingUtility.closeRegionAndWAL(region); @@ -504,7 +505,7 @@ public class TestKeepDeletes { public void testWithOldRow() throws Exception { HTableDescriptor htd = hbu.createTableDescriptor(name.getMethodName(), 0, 1, HConstants.FOREVER, KeepDeletedCells.TRUE); - HRegion region = hbu.createLocalHRegion(htd, null, null); + Region region = hbu.createLocalHRegion(htd, null, null); long ts = EnvironmentEdgeManager.currentTime(); @@ -537,9 +538,9 @@ public class TestKeepDeletes { // 1 family marker, 1 column marker, 2 version markers assertEquals(4, countDeleteMarkers(region)); - region.flushcache(); + region.flush(true); assertEquals(4, countDeleteMarkers(region)); - region.compactStores(false); + region.compact(false); assertEquals(4, countDeleteMarkers(region)); // another put will push out the earlier put... @@ -547,14 +548,14 @@ public class TestKeepDeletes { p.add(c0, c0, T1); region.put(p); - region.flushcache(); + region.flush(true); // no markers are collected, since there is an affected put - region.compactStores(true); + region.compact(true); assertEquals(4, countDeleteMarkers(region)); // all markers remain, since we have the older row // and we haven't pushed the inlined markers past MAX_VERSIONS - region.compactStores(true); + region.compact(true); assertEquals(4, countDeleteMarkers(region)); // another put will push out the earlier put... @@ -564,12 +565,12 @@ public class TestKeepDeletes { // this pushed out the column and version marker // but the family markers remains. THIS IS A PROBLEM! - region.compactStores(true); + region.compact(true); assertEquals(1, countDeleteMarkers(region)); // no amount of compacting is getting this of this one // KEEP_DELETED_CELLS=>TTL is an option to avoid this. - region.compactStores(true); + region.compact(true); assertEquals(1, countDeleteMarkers(region)); HBaseTestingUtility.closeRegionAndWAL(region); @@ -582,7 +583,7 @@ public class TestKeepDeletes { public void testRanges() throws Exception { HTableDescriptor htd = hbu.createTableDescriptor(name.getMethodName(), 0, 3, HConstants.FOREVER, KeepDeletedCells.TRUE); - HRegion region = hbu.createLocalHRegion(htd, null, null); + Region region = hbu.createLocalHRegion(htd, null, null); long ts = EnvironmentEdgeManager.currentTime(); Put p = new Put(T1, ts); @@ -664,7 +665,7 @@ public class TestKeepDeletes { public void testDeleteMarkerVersioning() throws Exception { HTableDescriptor htd = hbu.createTableDescriptor(name.getMethodName(), 0, 1, HConstants.FOREVER, KeepDeletedCells.TRUE); - HRegion region = hbu.createLocalHRegion(htd, null, null); + Region region = hbu.createLocalHRegion(htd, null, null); long ts = EnvironmentEdgeManager.currentTime(); Put p = new Put(T1, ts); @@ -690,9 +691,9 @@ public class TestKeepDeletes { d.deleteColumn(c0, c0, ts+3); region.delete(d); - region.flushcache(); - region.compactStores(true); - region.compactStores(true); + region.flush(true); + region.compact(true); + region.compact(true); assertEquals(3, countDeleteMarkers(region)); // add two more puts, since max version is 1 @@ -722,7 +723,7 @@ public class TestKeepDeletes { assertEquals(1, countDeleteMarkers(region)); // flush cache only sees what is in the memstore - region.flushcache(); + region.flush(true); // Here we have the three markers again, because the flush above // removed the 2nd put before the file is written. @@ -731,7 +732,7 @@ public class TestKeepDeletes { // delete, put, delete, delete assertEquals(3, countDeleteMarkers(region)); - region.compactStores(true); + region.compact(true); assertEquals(3, countDeleteMarkers(region)); // add one more put @@ -739,12 +740,12 @@ public class TestKeepDeletes { p.add(c0, c0, T4); region.put(p); - region.flushcache(); + region.flush(true); // one trailing delete marker remains (but only one) // because delete markers do not increase the version count assertEquals(1, countDeleteMarkers(region)); - region.compactStores(true); - region.compactStores(true); + region.compact(true); + region.compact(true); assertEquals(1, countDeleteMarkers(region)); HBaseTestingUtility.closeRegionAndWAL(region); @@ -756,7 +757,7 @@ public class TestKeepDeletes { public void testWithMixedCFs() throws Exception { HTableDescriptor htd = hbu.createTableDescriptor(name.getMethodName(), 0, 1, HConstants.FOREVER, KeepDeletedCells.TRUE); - HRegion region = hbu.createLocalHRegion(htd, null, null); + Region region = hbu.createLocalHRegion(htd, null, null); long ts = EnvironmentEdgeManager.currentTime(); @@ -808,7 +809,7 @@ public class TestKeepDeletes { public void testWithMinVersions() throws Exception { HTableDescriptor htd = hbu.createTableDescriptor(name.getMethodName(), 3, 1000, 1, KeepDeletedCells.TRUE); - HRegion region = hbu.createLocalHRegion(htd, null, null); + Region region = hbu.createLocalHRegion(htd, null, null); long ts = EnvironmentEdgeManager.currentTime() - 2000; // 2s in the past @@ -844,7 +845,7 @@ public class TestKeepDeletes { // 3 families, one column delete marker assertEquals(4, countDeleteMarkers(region)); - region.flushcache(); + region.flush(true); // no delete marker removes by the flush assertEquals(4, countDeleteMarkers(region)); @@ -853,7 +854,7 @@ public class TestKeepDeletes { p = new Put(T1, ts+1); p.add(c0, c0, T4); region.put(p); - region.flushcache(); + region.flush(true); assertEquals(4, countDeleteMarkers(region)); @@ -866,14 +867,14 @@ public class TestKeepDeletes { p.add(c0, c0, T5); region.put(p); - region.flushcache(); - region.compactStores(true); + region.flush(true); + region.compact(true); // the two family markers without puts are gone assertEquals(2, countDeleteMarkers(region)); // the last compactStores updated the earliestPutTs, // so after the next compaction the last family delete marker is also gone - region.compactStores(true); + region.compact(true); assertEquals(0, countDeleteMarkers(region)); HBaseTestingUtility.closeRegionAndWAL(region); @@ -887,7 +888,7 @@ public class TestKeepDeletes { public void testWithTTL() throws Exception { HTableDescriptor htd = hbu.createTableDescriptor(name.getMethodName(), 1, 1000, 1, KeepDeletedCells.TTL); - HRegion region = hbu.createLocalHRegion(htd, null, null); + Region region = hbu.createLocalHRegion(htd, null, null); long ts = EnvironmentEdgeManager.currentTime() - 2000; // 2s in the past @@ -910,21 +911,21 @@ public class TestKeepDeletes { // 3 families, one column delete marker assertEquals(3, countDeleteMarkers(region)); - region.flushcache(); + region.flush(true); // no delete marker removes by the flush assertEquals(3, countDeleteMarkers(region)); // but the Put is gone checkGet(region, T1, c0, c0, ts+1); - region.compactStores(true); + region.compact(true); // all delete marker gone assertEquals(0, countDeleteMarkers(region)); HBaseTestingUtility.closeRegionAndWAL(region); } - private void checkGet(HRegion region, byte[] row, byte[] fam, byte[] col, + private void checkGet(Region region, byte[] row, byte[] fam, byte[] col, long time, byte[]... vals) throws IOException { Get g = new Get(row); g.addColumn(fam, col); @@ -935,11 +936,11 @@ public class TestKeepDeletes { } - private int countDeleteMarkers(HRegion region) throws IOException { + private int countDeleteMarkers(Region region) throws IOException { Scan s = new Scan(); s.setRaw(true); // use max versions from the store(s) - s.setMaxVersions(region.getStores().values().iterator().next().getScanInfo().getMaxVersions()); + s.setMaxVersions(region.getStores().iterator().next().getScanInfo().getMaxVersions()); InternalScanner scan = region.getScanner(s); List kvs = new ArrayList(); int res = 0; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java index df43bd0227f..cc168049b18 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java @@ -44,8 +44,6 @@ import org.apache.hadoop.hbase.HBaseTestCase.HRegionIncommon; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Result; @@ -57,8 +55,10 @@ import org.apache.hadoop.hbase.io.hfile.HFileScanner; import org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress; import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; import org.apache.hadoop.hbase.regionserver.compactions.RatioBasedCompactionPolicy; -import org.apache.hadoop.hbase.wal.WAL; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.wal.WAL; import org.junit.After; import org.junit.Before; import org.junit.Rule; @@ -77,7 +77,7 @@ public class TestMajorCompaction { private static final HBaseTestingUtility UTIL = HBaseTestingUtility.createLocalHTU(); protected Configuration conf = UTIL.getConfiguration(); - private HRegion r = null; + private Region r = null; private HTableDescriptor htd = null; private static final byte [] COLUMN_FAMILY = fam1; private final byte [] STARTROW = Bytes.toBytes(START_KEY); @@ -110,8 +110,8 @@ public class TestMajorCompaction { @After public void tearDown() throws Exception { - WAL wal = r.getWAL(); - this.r.close(); + WAL wal = ((HRegion)r).getWAL(); + ((HRegion)r).close(); wal.close(); } @@ -137,9 +137,9 @@ public class TestMajorCompaction { } while(true); s.close(); // Flush - r.flushcache(); + r.flush(true); // Major compact. - r.compactStores(true); + r.compact(true); s = r.getScanner(new Scan()); int counter = 0; do { @@ -173,24 +173,22 @@ public class TestMajorCompaction { public void majorCompactionWithDataBlockEncoding(boolean inCacheOnly) throws Exception { - Map replaceBlockCache = - new HashMap(); - for (Entry pair : r.getStores().entrySet()) { - HStore store = (HStore) pair.getValue(); + Map replaceBlockCache = + new HashMap(); + for (Store store : r.getStores()) { HFileDataBlockEncoder blockEncoder = store.getDataBlockEncoder(); replaceBlockCache.put(store, blockEncoder); final DataBlockEncoding inCache = DataBlockEncoding.PREFIX; final DataBlockEncoding onDisk = inCacheOnly ? DataBlockEncoding.NONE : inCache; - store.setDataBlockEncoderInTest(new HFileDataBlockEncoderImpl(onDisk)); + ((HStore)store).setDataBlockEncoderInTest(new HFileDataBlockEncoderImpl(onDisk)); } majorCompaction(); // restore settings - for (Entry entry : - replaceBlockCache.entrySet()) { - entry.getKey().setDataBlockEncoderInTest(entry.getValue()); + for (Entry entry : replaceBlockCache.entrySet()) { + ((HStore)entry.getKey()).setDataBlockEncoderInTest(entry.getValue()); } } @@ -210,16 +208,16 @@ public class TestMajorCompaction { assertEquals(compactionThreshold, result.size()); // see if CompactionProgress is in place but null - for (Store store : this.r.stores.values()) { + for (Store store : r.getStores()) { assertNull(store.getCompactionProgress()); } - r.flushcache(); - r.compactStores(true); + r.flush(true); + r.compact(true); // see if CompactionProgress has done its thing on at least one store int storeCount = 0; - for (Store store : this.r.stores.values()) { + for (Store store : r.getStores()) { CompactionProgress progress = store.getCompactionProgress(); if( progress != null ) { ++storeCount; @@ -258,20 +256,20 @@ public class TestMajorCompaction { result = r.get(new Get(secondRowBytes).addFamily(COLUMN_FAMILY_TEXT).setMaxVersions(100)); assertTrue("Second row should have been deleted", result.isEmpty()); - r.flushcache(); + r.flush(true); result = r.get(new Get(secondRowBytes).addFamily(COLUMN_FAMILY_TEXT).setMaxVersions(100)); assertTrue("Second row should have been deleted", result.isEmpty()); // Add a bit of data and flush. Start adding at 'bbb'. createSmallerStoreFile(this.r); - r.flushcache(); + r.flush(true); // Assert that the second row is still deleted. result = r.get(new Get(secondRowBytes).addFamily(COLUMN_FAMILY_TEXT).setMaxVersions(100)); assertTrue("Second row should still be deleted", result.isEmpty()); // Force major compaction. - r.compactStores(true); + r.compact(true); assertEquals(r.getStore(COLUMN_FAMILY_TEXT).getStorefiles().size(), 1); result = r.get(new Get(secondRowBytes).addFamily(COLUMN_FAMILY_TEXT).setMaxVersions(100)); @@ -285,7 +283,7 @@ public class TestMajorCompaction { // Multiple versions allowed for an entry, so the delete isn't enough // Lower TTL and expire to ensure that all our entries have been wiped final int ttl = 1000; - for (Store hstore : this.r.stores.values()) { + for (Store hstore : r.getStores()) { HStore store = ((HStore) hstore); ScanInfo old = store.getScanInfo(); ScanInfo si = new ScanInfo(old.getFamily(), @@ -295,7 +293,7 @@ public class TestMajorCompaction { } Thread.sleep(1000); - r.compactStores(true); + r.compact(true); int count = count(); assertEquals("Should not see anything after TTL has expired", 0, count); } @@ -313,11 +311,11 @@ public class TestMajorCompaction { try { createStoreFile(r); createStoreFile(r); - r.compactStores(true); + r.compact(true); // add one more file & verify that a regular compaction won't work createStoreFile(r); - r.compactStores(false); + r.compact(false); assertEquals(2, s.getStorefilesCount()); // ensure that major compaction time is deterministic @@ -337,7 +335,7 @@ public class TestMajorCompaction { Thread.sleep(mcTime); // trigger a compaction request and ensure that it's upgraded to major - r.compactStores(false); + r.compact(false); assertEquals(1, s.getStorefilesCount()); } finally { // reset the timed compaction settings @@ -345,7 +343,7 @@ public class TestMajorCompaction { conf.setFloat("hbase.hregion.majorcompaction.jitter", 0.20F); // run a major to reset the cache createStoreFile(r); - r.compactStores(true); + r.compact(true); assertEquals(1, s.getStorefilesCount()); } } @@ -353,7 +351,7 @@ public class TestMajorCompaction { private void verifyCounts(int countRow1, int countRow2) throws Exception { int count1 = 0; int count2 = 0; - for (StoreFile f: this.r.stores.get(COLUMN_FAMILY_TEXT).getStorefiles()) { + for (StoreFile f: r.getStore(COLUMN_FAMILY_TEXT).getStorefiles()) { HFileScanner scanner = f.getReader().getScanner(false, false); scanner.seekTo(); do { @@ -372,8 +370,7 @@ public class TestMajorCompaction { private int count() throws IOException { int count = 0; - for (StoreFile f: this.r.stores. - get(COLUMN_FAMILY_TEXT).getStorefiles()) { + for (StoreFile f: r.getStore(COLUMN_FAMILY_TEXT).getStorefiles()) { HFileScanner scanner = f.getReader().getScanner(false, false); if (!scanner.seekTo()) { continue; @@ -385,17 +382,17 @@ public class TestMajorCompaction { return count; } - private void createStoreFile(final HRegion region) throws IOException { + private void createStoreFile(final Region region) throws IOException { createStoreFile(region, Bytes.toString(COLUMN_FAMILY)); } - private void createStoreFile(final HRegion region, String family) throws IOException { + private void createStoreFile(final Region region, String family) throws IOException { HRegionIncommon loader = new HRegionIncommon(region); HBaseTestCase.addContent(loader, family); loader.flushcache(); } - private void createSmallerStoreFile(final HRegion region) throws IOException { + private void createSmallerStoreFile(final Region region) throws IOException { HRegionIncommon loader = new HRegionIncommon(region); HBaseTestCase.addContent(loader, Bytes.toString(COLUMN_FAMILY), ("" + "bbb").getBytes(), null); @@ -465,9 +462,9 @@ public class TestMajorCompaction { } while (true); s.close(); // Flush - r.flushcache(); + r.flush(true); // Major compact. - r.compactStores(true); + r.compact(true); scan = new Scan(); scan.setReversed(true); s = r.getScanner(scan); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServer.java index e777c1d07e5..ffaae0c752b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServer.java @@ -76,6 +76,7 @@ public class TestMetricsRegionServer { HELPER.assertGauge("mutationsWithoutWALCount", 409, serverSource); HELPER.assertGauge("mutationsWithoutWALSize", 410, serverSource); HELPER.assertGauge("percentFilesLocal", 99, serverSource); + HELPER.assertGauge("percentFilesLocalSecondaryRegions", 99, serverSource); HELPER.assertGauge("compactionQueueLength", 411, serverSource); HELPER.assertGauge("flushQueueLength", 412, serverSource); HELPER.assertGauge("blockCacheFreeSize", 413, serverSource); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinVersions.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinVersions.java index 16f29dc3bdc..7f8a20b11fb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinVersions.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinVersions.java @@ -68,7 +68,7 @@ public class TestMinVersions { public void testGetClosestBefore() throws Exception { HTableDescriptor htd = hbu.createTableDescriptor(name.getMethodName(), 1, 1000, 1, KeepDeletedCells.FALSE); - HRegion region = hbu.createLocalHRegion(htd, null, null); + Region region = hbu.createLocalHRegion(htd, null, null); try { // 2s in the past @@ -96,8 +96,8 @@ public class TestMinVersions { checkResult(r, c0, T4); // now flush/compact - region.flushcache(); - region.compactStores(true); + region.flush(true); + region.compact(true); r = region.getClosestRowBefore(T1, c0); checkResult(r, c0, T4); @@ -118,7 +118,7 @@ public class TestMinVersions { // keep 3 versions minimum HTableDescriptor htd = hbu.createTableDescriptor(name.getMethodName(), 3, 1000, 1, KeepDeletedCells.FALSE); - HRegion region = hbu.createLocalHRegion(htd, null, null); + Region region = hbu.createLocalHRegion(htd, null, null); // 2s in the past long ts = EnvironmentEdgeManager.currentTime() - 2000; @@ -132,8 +132,8 @@ public class TestMinVersions { region.put(p); // now flush/compact - region.flushcache(); - region.compactStores(true); + region.flush(true); + region.compact(true); p = new Put(T1, ts); p.add(c0, c0, T3); @@ -173,7 +173,7 @@ public class TestMinVersions { public void testDelete() throws Exception { HTableDescriptor htd = hbu.createTableDescriptor(name.getMethodName(), 3, 1000, 1, KeepDeletedCells.FALSE); - HRegion region = hbu.createLocalHRegion(htd, null, null); + Region region = hbu.createLocalHRegion(htd, null, null); // 2s in the past long ts = EnvironmentEdgeManager.currentTime() - 2000; @@ -206,8 +206,8 @@ public class TestMinVersions { checkResult(r, c0, T3); // now flush/compact - region.flushcache(); - region.compactStores(true); + region.flush(true); + region.compact(true); // try again g = new Get(T1); @@ -232,7 +232,7 @@ public class TestMinVersions { public void testMemStore() throws Exception { HTableDescriptor htd = hbu.createTableDescriptor(name.getMethodName(), 2, 1000, 1, KeepDeletedCells.FALSE); - HRegion region = hbu.createLocalHRegion(htd, null, null); + Region region = hbu.createLocalHRegion(htd, null, null); // 2s in the past long ts = EnvironmentEdgeManager.currentTime() - 2000; @@ -254,8 +254,8 @@ public class TestMinVersions { region.put(p); // now flush/compact - region.flushcache(); - region.compactStores(true); + region.flush(true); + region.compact(true); // now put the first version (backdated) p = new Put(T1, ts-3); @@ -308,7 +308,7 @@ public class TestMinVersions { // 1 version minimum, 1000 versions maximum, ttl = 1s HTableDescriptor htd = hbu.createTableDescriptor(name.getMethodName(), 2, 1000, 1, KeepDeletedCells.FALSE); - HRegion region = hbu.createLocalHRegion(htd, null, null); + Region region = hbu.createLocalHRegion(htd, null, null); try { // 2s in the past @@ -361,7 +361,7 @@ public class TestMinVersions { checkResult(r, c0, T4,T3); // now flush - region.flushcache(); + region.flush(true); // with HBASE-4241 a flush will eliminate the expired rows g = new Get(T1); @@ -370,7 +370,7 @@ public class TestMinVersions { assertTrue(r.isEmpty()); // major compaction - region.compactStores(true); + region.compact(true); // after compaction the 4th version is still available g = new Get(T1); @@ -400,7 +400,7 @@ public class TestMinVersions { public void testFilters() throws Exception { HTableDescriptor htd = hbu.createTableDescriptor(name.getMethodName(), 2, 1000, 1, KeepDeletedCells.FALSE); - HRegion region = hbu.createLocalHRegion(htd, null, null); + Region region = hbu.createLocalHRegion(htd, null, null); final byte [] c1 = COLUMNS[1]; // 2s in the past @@ -446,8 +446,8 @@ public class TestMinVersions { checkResult(r, c0, T2,T1); // now flush/compact - region.flushcache(); - region.compactStores(true); + region.flush(true); + region.compact(true); g = new Get(T1); g.addColumn(c1,c1); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinorCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinorCompaction.java index 7ac6eefd71e..b694fe2b351 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinorCompaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMinorCompaction.java @@ -57,7 +57,7 @@ public class TestMinorCompaction { private static final HBaseTestingUtility UTIL = HBaseTestingUtility.createLocalHTU(); protected Configuration conf = UTIL.getConfiguration(); - private HRegion r = null; + private Region r = null; private HTableDescriptor htd = null; private int compactionThreshold; private byte[] firstRowBytes, secondRowBytes, thirdRowBytes; @@ -90,8 +90,8 @@ public class TestMinorCompaction { @After public void tearDown() throws Exception { - WAL wal = r.getWAL(); - this.r.close(); + WAL wal = ((HRegion)r).getWAL(); + ((HRegion)r).close(); wal.close(); } @@ -172,7 +172,7 @@ public class TestMinorCompaction { thirdRowBytes, i); HBaseTestCase.addContent(loader, Bytes.toString(fam2), Bytes.toString(col2), firstRowBytes, thirdRowBytes, i); - r.flushcache(); + r.flush(true); } Result result = r.get(new Get(firstRowBytes).addColumn(fam1, col1).setMaxVersions(100)); @@ -193,7 +193,7 @@ public class TestMinorCompaction { result = r.get(new Get(firstRowBytes).addColumn(fam1, col1).setMaxVersions(100)); assertEquals(compactionThreshold, result.size()); - r.flushcache(); + r.flush(true); // should not change anything. // Let us check again @@ -205,7 +205,7 @@ public class TestMinorCompaction { assertEquals(compactionThreshold, result.size()); // do a compaction - Store store2 = this.r.stores.get(fam2); + Store store2 = r.getStore(fam2); int numFiles1 = store2.getStorefiles().size(); assertTrue("Was expecting to see 4 store files", numFiles1 > compactionThreshold); // > 3 ((HStore)store2).compactRecentForTestingAssumingDefaultPolicy(compactionThreshold); // = 3 diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobCompaction.java index d429de5ca14..005bdfe8f69 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobCompaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobCompaction.java @@ -45,7 +45,6 @@ import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Put; @@ -196,7 +195,7 @@ public class TestMobCompaction { Path hbaseRootDir = FSUtils.getRootDir(conf); Path basedir = new Path(hbaseRootDir, htd.getNameAsString()); - List> hfiles = new ArrayList>(1); + List> hfiles = new ArrayList<>(1); for (int i = 0; i < compactionThreshold; i++) { Path hpath = new Path(basedir, "hfile" + i); hfiles.add(Pair.newPair(COLUMN_FAMILY, hpath.toString())); @@ -205,7 +204,7 @@ public class TestMobCompaction { // The following will bulk load the above generated store files and compact, with 600(fileSize) // > 300(threshold) - boolean result = region.bulkLoadHFiles(hfiles, true); + boolean result = region.bulkLoadHFiles(hfiles, true, null); assertTrue("Bulkload result:", result); assertEquals("Before compaction: store files", compactionThreshold, countStoreFiles()); assertEquals("Before compaction: mob file count", 0, countMobFiles()); @@ -244,13 +243,14 @@ public class TestMobCompaction { assertEquals("Before compaction: number of mob cells", numHfiles, countMobCellsInMetadata()); // now let's delete some cells that contain mobs Delete delete = new Delete(deleteRow); - delete.deleteFamily(COLUMN_FAMILY); + delete.addFamily(COLUMN_FAMILY); region.delete(delete); loader.flushcache(); assertEquals("Before compaction: store files", numHfiles + 1, countStoreFiles()); assertEquals("Before compaction: mob files", numHfiles, countMobFiles()); - region.compactStores(true); + // region.compactStores(); + region.compact(true); assertEquals("After compaction: store files", 1, countStoreFiles()); // still have original mob hfiles and now added a mob del file assertEquals("After compaction: mob files", numHfiles + 1, countMobFiles()); @@ -258,7 +258,7 @@ public class TestMobCompaction { Scan scan = new Scan(); scan.setRaw(true); InternalScanner scanner = region.getScanner(scan); - List results = new ArrayList(); + List results = new ArrayList<>(); scanner.next(results); int deleteCount = 0; while (!results.isEmpty()) { @@ -316,7 +316,7 @@ public class TestMobCompaction { private Put createPut(int rowIdx, byte[] dummyData) throws IOException { Put p = new Put(Bytes.add(STARTROW, Bytes.toBytes(rowIdx))); p.setDurability(Durability.SKIP_WAL); - p.add(COLUMN_FAMILY, Bytes.toBytes("colX"), dummyData); + p.addColumn(COLUMN_FAMILY, Bytes.toBytes("colX"), dummyData); return p; } @@ -345,7 +345,7 @@ public class TestMobCompaction { InternalScanner scanner = region.getScanner(scan); int scannedCount = 0; - List results = new ArrayList(); + List results = new ArrayList<>(); boolean hasMore = true; while (hasMore) { hasMore = scanner.next(results); @@ -391,15 +391,14 @@ public class TestMobCompaction { scan.setAttribute(MobConstants.MOB_SCAN_RAW, Bytes.toBytes(Boolean.TRUE)); InternalScanner scanner = region.getScanner(scan); - List kvs = new ArrayList(); + List kvs = new ArrayList<>(); boolean hasMore = true; String fileName; - Set files = new HashSet(); + Set files = new HashSet<>(); do { kvs.clear(); hasMore = scanner.next(kvs); - for (Cell c : kvs) { - KeyValue kv = KeyValueUtil.ensureKeyValue(c); + for (Cell kv : kvs) { if (!MobUtils.isMobReferenceCell(kv)) { continue; } @@ -432,7 +431,7 @@ public class TestMobCompaction { CacheConfig cacheConfig = new CacheConfig(copyOfConf); Path mobDirPath = new Path(MobUtils.getMobRegionPath(conf, htd.getTableName()), hcd.getNameAsString()); - List sfs = new ArrayList(); + List sfs = new ArrayList<>(); int numDelfiles = 0; int size = 0; if (fs.exists(mobDirPath)) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreScanner.java index 27a0b062a5f..3b5a47473de 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreScanner.java @@ -55,7 +55,7 @@ public class TestMobStoreScanner { private final static byte [] qf1 = Bytes.toBytes("qualifier1"); private final static byte [] qf2 = Bytes.toBytes("qualifier2"); protected final byte[] qf3 = Bytes.toBytes("qualifier3"); - private static HTable table; + private static Table table; private static HBaseAdmin admin; private static HColumnDescriptor hcd; private static HTableDescriptor desc; @@ -83,9 +83,10 @@ public class TestMobStoreScanner { hcd.setMobThreshold(threshold); hcd.setMaxVersions(4); desc.addFamily(hcd); - admin = new HBaseAdmin(TEST_UTIL.getConfiguration()); + admin = TEST_UTIL.getHBaseAdmin(); admin.createTable(desc); - table = new HTable(TEST_UTIL.getConfiguration(), TN); + table = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()) + .getTable(TableName.valueOf(TN)); } /** @@ -143,9 +144,9 @@ public class TestMobStoreScanner { byte[] bigValue = new byte[25*1024*1024]; Put put = new Put(row1); - put.add(family, qf1, bigValue); - put.add(family, qf2, bigValue); - put.add(family, qf3, bigValue); + put.addColumn(family, qf1, bigValue); + put.addColumn(family, qf2, bigValue); + put.addColumn(family, qf3, bigValue); table.put(put); Get g = new Get(row1); @@ -155,6 +156,7 @@ public class TestMobStoreScanner { public void testGetFromFiles(boolean reversed) throws Exception { String TN = "testGetFromFiles" + reversed; + TableName tn = TableName.valueOf(TN); setUp(defaultThreshold, TN); long ts1 = System.currentTimeMillis(); long ts2 = ts1 + 1; @@ -162,13 +164,12 @@ public class TestMobStoreScanner { byte [] value = generateMobValue((int)defaultThreshold+1); Put put1 = new Put(row1); - put1.add(family, qf1, ts3, value); - put1.add(family, qf2, ts2, value); - put1.add(family, qf3, ts1, value); + put1.addColumn(family, qf1, ts3, value); + put1.addColumn(family, qf2, ts2, value); + put1.addColumn(family, qf3, ts1, value); table.put(put1); - table.flushCommits(); - admin.flush(TN); + admin.flush(tn); Scan scan = new Scan(); setScan(scan, reversed, false); @@ -197,9 +198,9 @@ public class TestMobStoreScanner { byte [] value = generateMobValue((int)defaultThreshold+1);; Put put1 = new Put(row1); - put1.add(family, qf1, ts3, value); - put1.add(family, qf2, ts2, value); - put1.add(family, qf3, ts1, value); + put1.addColumn(family, qf1, ts3, value); + put1.addColumn(family, qf2, ts2, value); + put1.addColumn(family, qf3, ts1, value); table.put(put1); Scan scan = new Scan(); @@ -222,6 +223,7 @@ public class TestMobStoreScanner { public void testGetReferences(boolean reversed) throws Exception { String TN = "testGetReferences" + reversed; + TableName tn = TableName.valueOf(TN); setUp(defaultThreshold, TN); long ts1 = System.currentTimeMillis(); long ts2 = ts1 + 1; @@ -229,13 +231,12 @@ public class TestMobStoreScanner { byte [] value = generateMobValue((int)defaultThreshold+1);; Put put1 = new Put(row1); - put1.add(family, qf1, ts3, value); - put1.add(family, qf2, ts2, value); - put1.add(family, qf3, ts1, value); + put1.addColumn(family, qf1, ts3, value); + put1.addColumn(family, qf2, ts2, value); + put1.addColumn(family, qf3, ts1, value); table.put(put1); - table.flushCommits(); - admin.flush(TN); + admin.flush(tn); Scan scan = new Scan(); setScan(scan, reversed, true); @@ -256,6 +257,7 @@ public class TestMobStoreScanner { public void testMobThreshold(boolean reversed) throws Exception { String TN = "testMobThreshold" + reversed; + TableName tn = TableName.valueOf(TN); setUp(defaultThreshold, TN); byte [] valueLess = generateMobValue((int)defaultThreshold-1); byte [] valueEqual = generateMobValue((int)defaultThreshold); @@ -265,13 +267,12 @@ public class TestMobStoreScanner { long ts3 = ts1 + 2; Put put1 = new Put(row1); - put1.add(family, qf1, ts3, valueLess); - put1.add(family, qf2, ts2, valueEqual); - put1.add(family, qf3, ts1, valueGreater); + put1.addColumn(family, qf1, ts3, valueLess); + put1.addColumn(family, qf2, ts2, valueEqual); + put1.addColumn(family, qf3, ts1, valueGreater); table.put(put1); - table.flushCommits(); - admin.flush(TN); + admin.flush(tn); Scan scan = new Scan(); setScan(scan, reversed, true); @@ -307,6 +308,7 @@ public class TestMobStoreScanner { public void testGetFromArchive(boolean reversed) throws Exception { String TN = "testGetFromArchive" + reversed; + TableName tn = TableName.valueOf(TN); setUp(defaultThreshold, TN); long ts1 = System.currentTimeMillis(); long ts2 = ts1 + 1; @@ -314,13 +316,12 @@ public class TestMobStoreScanner { byte [] value = generateMobValue((int)defaultThreshold+1);; // Put some data Put put1 = new Put(row1); - put1.add(family, qf1, ts3, value); - put1.add(family, qf2, ts2, value); - put1.add(family, qf3, ts1, value); + put1.addColumn(family, qf1, ts3, value); + put1.addColumn(family, qf2, ts2, value); + put1.addColumn(family, qf3, ts1, value); table.put(put1); - table.flushCommits(); - admin.flush(TN); + admin.flush(tn); // Get the files in the mob path Path mobFamilyPath; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScanner.java index 9f0b339bd6e..dfdc4e431e5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiColumnScanner.java @@ -44,13 +44,13 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueTestUtil; -import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.Bytes; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -145,7 +145,7 @@ public class TestMultiColumnScanner { @Test public void testMultiColumnScanner() throws IOException { - HRegion region = TEST_UTIL.createTestRegion(TABLE_NAME, + Region region = TEST_UTIL.createTestRegion(TABLE_NAME, new HColumnDescriptor(FAMILY) .setCompressionType(comprAlgo) .setBloomFilterType(bloomType) @@ -220,7 +220,7 @@ public class TestMultiColumnScanner { region.delete(d); } } - region.flushcache(); + region.flush(true); } Collections.sort(kvs, KeyValue.COMPARATOR); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java index e3f51ea53ed..82689e424ef 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.regionserver; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; import java.io.IOException; @@ -39,6 +40,7 @@ import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; @@ -49,10 +51,11 @@ import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.regionserver.wal.FSHLog; import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.JVMClusterUtil; import org.apache.hadoop.hbase.util.Pair; -import org.apache.hadoop.hbase.util.Threads; +import org.apache.hadoop.hbase.wal.WAL; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -61,7 +64,7 @@ import com.google.common.hash.Hashing; /** * This test verifies the correctness of the Per Column Family flushing strategy */ -@Category(LargeTests.class) +@Category({ RegionServerTests.class, LargeTests.class }) public class TestPerColumnFamilyFlush { private static final Log LOG = LogFactory.getLog(TestPerColumnFamilyFlush.class); @@ -71,18 +74,18 @@ public class TestPerColumnFamilyFlush { public static final TableName TABLENAME = TableName.valueOf("TestPerColumnFamilyFlush", "t1"); - public static final byte[][] families = { Bytes.toBytes("f1"), Bytes.toBytes("f2"), + public static final byte[][] FAMILIES = { Bytes.toBytes("f1"), Bytes.toBytes("f2"), Bytes.toBytes("f3"), Bytes.toBytes("f4"), Bytes.toBytes("f5") }; - public static final byte[] FAMILY1 = families[0]; + public static final byte[] FAMILY1 = FAMILIES[0]; - public static final byte[] FAMILY2 = families[1]; + public static final byte[] FAMILY2 = FAMILIES[1]; - public static final byte[] FAMILY3 = families[2]; + public static final byte[] FAMILY3 = FAMILIES[2]; private HRegion initHRegion(String callingMethod, Configuration conf) throws IOException { HTableDescriptor htd = new HTableDescriptor(TABLENAME); - for (byte[] family : families) { + for (byte[] family : FAMILIES) { htd.addFamily(new HColumnDescriptor(family)); } HRegionInfo info = new HRegionInfo(TABLENAME, null, null, false); @@ -96,7 +99,7 @@ public class TestPerColumnFamilyFlush { byte[] row = Bytes.toBytes("row" + familyNum + "-" + putNum); byte[] val = Bytes.toBytes("val" + familyNum + "-" + putNum); Put p = new Put(row); - p.add(families[familyNum - 1], qf, val); + p.addColumn(FAMILIES[familyNum - 1], qf, val); return p; } @@ -109,7 +112,7 @@ public class TestPerColumnFamilyFlush { // A helper function to verify edits. void verifyEdit(int familyNum, int putNum, HTable table) throws IOException { Result r = table.get(createGet(familyNum, putNum)); - byte[] family = families[familyNum - 1]; + byte[] family = FAMILIES[familyNum - 1]; byte[] qf = Bytes.toBytes("q" + familyNum); byte[] val = Bytes.toBytes("val" + familyNum + "-" + putNum); assertNotNull(("Missing Put#" + putNum + " for CF# " + familyNum), r.getFamilyMap(family)); @@ -119,15 +122,15 @@ public class TestPerColumnFamilyFlush { Arrays.equals(r.getFamilyMap(family).get(qf), val)); } - @Test (timeout=180000) + @Test(timeout = 180000) public void testSelectiveFlushWhenEnabled() throws IOException { // Set up the configuration Configuration conf = HBaseConfiguration.create(); conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 200 * 1024); conf.set(FlushPolicyFactory.HBASE_FLUSH_POLICY_KEY, FlushLargeStoresPolicy.class.getName()); conf.setLong(FlushLargeStoresPolicy.HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND, 100 * 1024); - // Intialize the HRegion - HRegion region = initHRegion("testSelectiveFlushWhenEnabled", conf); + // Intialize the region + Region region = initHRegion("testSelectiveFlushWhenEnabled", conf); // Add 1200 entries for CF1, 100 for CF2 and 50 for CF3 for (int i = 1; i <= 1200; i++) { region.put(createPut(1, i)); @@ -140,7 +143,7 @@ public class TestPerColumnFamilyFlush { } } - long totalMemstoreSize = region.getMemstoreSize().get(); + long totalMemstoreSize = region.getMemstoreSize(); // Find the smallest LSNs for edits wrt to each CF. long smallestSeqCF1 = region.getOldestSeqIdOfStore(FAMILY1); @@ -153,8 +156,8 @@ public class TestPerColumnFamilyFlush { long cf3MemstoreSize = region.getStore(FAMILY3).getMemStoreSize(); // Get the overall smallest LSN in the region's memstores. - long smallestSeqInRegionCurrentMemstore = - region.getWAL().getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); + long smallestSeqInRegionCurrentMemstore = getWAL(region) + .getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); // The overall smallest LSN in the region's memstores should be the same as // the LSN of the smallest edit in CF1 @@ -173,7 +176,7 @@ public class TestPerColumnFamilyFlush { + cf2MemstoreSize + cf3MemstoreSize); // Flush! - region.flushcache(false); + region.flush(false); // Will use these to check if anything changed. long oldCF2MemstoreSize = cf2MemstoreSize; @@ -183,9 +186,9 @@ public class TestPerColumnFamilyFlush { cf1MemstoreSize = region.getStore(FAMILY1).getMemStoreSize(); cf2MemstoreSize = region.getStore(FAMILY2).getMemStoreSize(); cf3MemstoreSize = region.getStore(FAMILY3).getMemStoreSize(); - totalMemstoreSize = region.getMemstoreSize().get(); - smallestSeqInRegionCurrentMemstore = - region.getWAL().getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); + totalMemstoreSize = region.getMemstoreSize(); + smallestSeqInRegionCurrentMemstore = getWAL(region) + .getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); // We should have cleared out only CF1, since we chose the flush thresholds // and number of puts accordingly. @@ -215,15 +218,15 @@ public class TestPerColumnFamilyFlush { oldCF3MemstoreSize = region.getStore(FAMILY3).getMemStoreSize(); // Flush again - region.flushcache(false); + region.flush(false); // Recalculate everything cf1MemstoreSize = region.getStore(FAMILY1).getMemStoreSize(); cf2MemstoreSize = region.getStore(FAMILY2).getMemStoreSize(); cf3MemstoreSize = region.getStore(FAMILY3).getMemStoreSize(); - totalMemstoreSize = region.getMemstoreSize().get(); - smallestSeqInRegionCurrentMemstore = - region.getWAL().getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); + totalMemstoreSize = region.getMemstoreSize(); + smallestSeqInRegionCurrentMemstore = getWAL(region) + .getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); // CF1 and CF2, both should be absent. assertEquals(DefaultMemStore.DEEP_OVERHEAD, cf1MemstoreSize); @@ -238,7 +241,7 @@ public class TestPerColumnFamilyFlush { // In that case, we should flush all the CFs. // Clearing the existing memstores. - region.flushcache(true); + region.flush(true); // The memstore limit is 200*1024 and the column family flush threshold is // around 50*1024. We try to just hit the memstore limit with each CF's @@ -251,14 +254,15 @@ public class TestPerColumnFamilyFlush { region.put(createPut(5, i)); } - region.flushcache(false); + region.flush(false); + // Since we won't find any CF above the threshold, and hence no specific // store to flush, we should flush all the memstores. - assertEquals(0, region.getMemstoreSize().get()); + assertEquals(0, region.getMemstoreSize()); HBaseTestingUtility.closeRegionAndWAL(region); } - @Test (timeout=180000) + @Test(timeout = 180000) public void testSelectiveFlushWhenNotEnabled() throws IOException { // Set up the configuration Configuration conf = HBaseConfiguration.create(); @@ -278,7 +282,7 @@ public class TestPerColumnFamilyFlush { } } - long totalMemstoreSize = region.getMemstoreSize().get(); + long totalMemstoreSize = region.getMemstoreSize(); // Find the sizes of the memstores of each CF. long cf1MemstoreSize = region.getStore(FAMILY1).getMemStoreSize(); @@ -296,12 +300,12 @@ public class TestPerColumnFamilyFlush { + cf2MemstoreSize + cf3MemstoreSize); // Flush! - region.flushcache(false); + region.flush(false); cf1MemstoreSize = region.getStore(FAMILY1).getMemStoreSize(); cf2MemstoreSize = region.getStore(FAMILY2).getMemStoreSize(); cf3MemstoreSize = region.getStore(FAMILY3).getMemStoreSize(); - totalMemstoreSize = region.getMemstoreSize().get(); + totalMemstoreSize = region.getMemstoreSize(); long smallestSeqInRegionCurrentMemstore = region.getWAL().getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); @@ -315,20 +319,19 @@ public class TestPerColumnFamilyFlush { } // Find the (first) region which has the specified name. - private static Pair getRegionWithName(TableName tableName) { + private static Pair getRegionWithName(TableName tableName) { MiniHBaseCluster cluster = TEST_UTIL.getMiniHBaseCluster(); List rsts = cluster.getRegionServerThreads(); for (int i = 0; i < cluster.getRegionServerThreads().size(); i++) { HRegionServer hrs = rsts.get(i).getRegionServer(); - for (HRegion region : hrs.getOnlineRegions(tableName)) { + for (Region region : hrs.getOnlineRegions(tableName)) { return Pair.newPair(region, hrs); } } return null; } - @Test (timeout=180000) - public void testLogReplay() throws Exception { + private void doTestLogReplay() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 20000); // Carefully chosen limits so that the memstore just flushes when we're done @@ -338,11 +341,11 @@ public class TestPerColumnFamilyFlush { try { TEST_UTIL.startMiniCluster(numRegionServers); TEST_UTIL.getHBaseAdmin().createNamespace( - NamespaceDescriptor.create(TABLENAME.getNamespaceAsString()).build()); - HTable table = TEST_UTIL.createTable(TABLENAME, families); + NamespaceDescriptor.create(TABLENAME.getNamespaceAsString()).build()); + HTable table = TEST_UTIL.createTable(TABLENAME, FAMILIES); HTableDescriptor htd = table.getTableDescriptor(); - for (byte[] family : families) { + for (byte[] family : FAMILIES) { if (!htd.hasFamily(family)) { htd.addFamily(new HColumnDescriptor(family)); } @@ -360,16 +363,16 @@ public class TestPerColumnFamilyFlush { table.flushCommits(); Thread.sleep(1000); - Pair desiredRegionAndServer = getRegionWithName(TABLENAME); - HRegion desiredRegion = desiredRegionAndServer.getFirst(); + Pair desiredRegionAndServer = getRegionWithName(TABLENAME); + Region desiredRegion = desiredRegionAndServer.getFirst(); assertTrue("Could not find a region which hosts the new region.", desiredRegion != null); // Flush the region selectively. - desiredRegion.flushcache(false); + desiredRegion.flush(false); long totalMemstoreSize; long cf1MemstoreSize, cf2MemstoreSize, cf3MemstoreSize; - totalMemstoreSize = desiredRegion.getMemstoreSize().get(); + totalMemstoreSize = desiredRegion.getMemstoreSize(); // Find the sizes of the memstores of each CF. cf1MemstoreSize = desiredRegion.getStore(FAMILY1).getMemStoreSize(); @@ -415,10 +418,25 @@ public class TestPerColumnFamilyFlush { // In distributed log replay, the log splitters ask the master for the // last flushed sequence id for a region. This test would ensure that we // are doing the book-keeping correctly. - @Test (timeout=180000) + @Test(timeout = 180000) public void testLogReplayWithDistributedReplay() throws Exception { TEST_UTIL.getConfiguration().setBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, true); - testLogReplay(); + doTestLogReplay(); + } + + // Test Log Replay with Distributed log split on. + @Test(timeout = 180000) + public void testLogReplayWithDistributedLogSplit() throws Exception { + TEST_UTIL.getConfiguration().setBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, false); + doTestLogReplay(); + } + + private WAL getWAL(Region region) { + return ((HRegion)region).getWAL(); + } + + private int getNumRolledLogFiles(Region region) { + return ((FSHLog)getWAL(region)).getNumRolledLogFiles(); } /** @@ -427,65 +445,85 @@ public class TestPerColumnFamilyFlush { * test ensures that we do a full-flush in that scenario. * @throws IOException */ - @Test (timeout=180000) + @Test(timeout = 180000) public void testFlushingWhenLogRolling() throws Exception { TableName tableName = TableName.valueOf("testFlushingWhenLogRolling"); Configuration conf = TEST_UTIL.getConfiguration(); - conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 300000); + conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 128 * 1024 * 1024); conf.set(FlushPolicyFactory.HBASE_FLUSH_POLICY_KEY, FlushLargeStoresPolicy.class.getName()); - conf.setLong(FlushLargeStoresPolicy.HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND, 100000); + long cfFlushSizeLowerBound = 2048; + conf.setLong(FlushLargeStoresPolicy.HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND, + cfFlushSizeLowerBound); - // Also, let us try real hard to get a log roll to happen. - // Keeping the log roll period to 2s. - conf.setLong("hbase.regionserver.logroll.period", 2000); - // Keep the block size small so that we fill up the log files very fast. - conf.setLong("hbase.regionserver.hlog.blocksize", 6144); + // One hour, prevent periodic rolling + conf.setLong("hbase.regionserver.logroll.period", 60L * 60 * 1000); + // prevent rolling by size + conf.setLong("hbase.regionserver.hlog.blocksize", 128L * 1024 * 1024); // Make it 10 as max logs before a flush comes on. - final int walcount = 10; - conf.setInt("hbase.regionserver.maxlogs", walcount); - int maxLogs = conf.getInt("hbase.regionserver.maxlogs", walcount); + final int maxLogs = 10; + conf.setInt("hbase.regionserver.maxlogs", maxLogs); - final int numRegionServers = 4; + final int numRegionServers = 1; + TEST_UTIL.startMiniCluster(numRegionServers); try { - TEST_UTIL.startMiniCluster(numRegionServers); HTable table = null; - table = TEST_UTIL.createTable(tableName, families); + table = TEST_UTIL.createTable(tableName, FAMILIES); // Force flush the namespace table so edits to it are not hanging around as oldest // edits. Otherwise, below, when we make maximum number of WAL files, then it will be // the namespace region that is flushed and not the below 'desiredRegion'. try (Admin admin = TEST_UTIL.getConnection().getAdmin()) { admin.flush(TableName.NAMESPACE_TABLE_NAME); } - HRegion desiredRegion = getRegionWithName(tableName).getFirst(); + Pair desiredRegionAndServer = getRegionWithName(tableName); + final Region desiredRegion = desiredRegionAndServer.getFirst(); assertTrue("Could not find a region which hosts the new region.", desiredRegion != null); LOG.info("Writing to region=" + desiredRegion); - // Add some edits. Most will be for CF1, some for CF2 and CF3. - for (int i = 1; i <= 10000; i++) { - table.put(createPut(1, i)); - if (i <= 200) { - table.put(createPut(2, i)); - table.put(createPut(3, i)); + // Add one row for both CFs. + for (int i = 1; i <= 3; i++) { + table.put(createPut(i, 0)); + } + // Now only add row to CF1, make sure when we force a flush, CF1 is larger than the lower + // bound and CF2 and CF3 are smaller than the lower bound. + for (int i = 0; i < maxLogs; i++) { + for (int j = 0; j < 100; j++) { + table.put(createPut(1, i * 100 + j)); } table.flushCommits(); - // Keep adding until we exceed the number of log files, so that we are - // able to trigger the cleaning of old log files. - int currentNumLogFiles = ((FSHLog) (desiredRegion.getWAL())).getNumLogFiles(); - if (currentNumLogFiles > maxLogs) { - LOG.info("The number of log files is now: " + currentNumLogFiles - + ". Expect a log roll and memstore flush."); - break; + // Roll the WAL. The log file count is less than maxLogs so no flush is triggered. + int currentNumRolledLogFiles = getNumRolledLogFiles(desiredRegion); + assertNull(getWAL(desiredRegion).rollWriter()); + while (getNumRolledLogFiles(desiredRegion) <= currentNumRolledLogFiles) { + Thread.sleep(100); } } table.close(); + assertEquals(maxLogs, getNumRolledLogFiles(desiredRegion)); + assertTrue(desiredRegion.getStore(FAMILY1).getMemStoreSize() > cfFlushSizeLowerBound); + assertTrue(desiredRegion.getStore(FAMILY2).getMemStoreSize() < cfFlushSizeLowerBound); + assertTrue(desiredRegion.getStore(FAMILY3).getMemStoreSize() < cfFlushSizeLowerBound); + table.put(createPut(1, 12345678)); + table.flushCommits(); + // Make numRolledLogFiles greater than maxLogs + desiredRegionAndServer.getSecond().walRoller.requestRollAll(); // Wait for some time till the flush caused by log rolling happens. - while (((FSHLog) (desiredRegion.getWAL())).getNumLogFiles() > maxLogs) Threads.sleep(100); + TEST_UTIL.waitFor(30000, new Waiter.ExplainingPredicate() { + + @Override + public boolean evaluate() throws Exception { + return desiredRegion.getMemstoreSize() == 0; + } + + @Override + public String explainFailure() throws Exception { + long memstoreSize = desiredRegion.getMemstoreSize(); + if (memstoreSize > 0) { + return "Still have unflushed entries in memstore, memstore size is " + memstoreSize; + } + return "Unknown"; + } + }); LOG.info("Finished waiting on flush after too many WALs..."); - - // We have artificially created the conditions for a log roll. When a - // log roll happens, we should flush all the column families. Testing that - // case here. - // Individual families should have been flushed. assertEquals(DefaultMemStore.DEEP_OVERHEAD, desiredRegion.getStore(FAMILY1).getMemStoreSize()); @@ -493,16 +531,16 @@ public class TestPerColumnFamilyFlush { desiredRegion.getStore(FAMILY2).getMemStoreSize()); assertEquals(DefaultMemStore.DEEP_OVERHEAD, desiredRegion.getStore(FAMILY3).getMemStoreSize()); - - // And of course, the total memstore should also be clean. - assertEquals(0, desiredRegion.getMemstoreSize().get()); + // let WAL cleanOldLogs + assertNull(getWAL(desiredRegion).rollWriter(true)); + assertTrue(getNumRolledLogFiles(desiredRegion) < maxLogs); } finally { TEST_UTIL.shutdownMiniCluster(); } } private void doPut(Table table, long memstoreFlushSize) throws IOException, InterruptedException { - HRegion region = getRegionWithName(table.getName()).getFirst(); + Region region = getRegionWithName(table.getName()).getFirst(); // cf1 4B per row, cf2 40B per row and cf3 400B per row byte[] qf = Bytes.toBytes("qf"); Random rand = new Random(); @@ -515,12 +553,12 @@ public class TestPerColumnFamilyFlush { rand.nextBytes(value1); rand.nextBytes(value2); rand.nextBytes(value3); - put.add(FAMILY1, qf, value1); - put.add(FAMILY2, qf, value2); - put.add(FAMILY3, qf, value3); + put.addColumn(FAMILY1, qf, value1); + put.addColumn(FAMILY2, qf, value2); + put.addColumn(FAMILY3, qf, value3); table.put(put); // slow down to let regionserver flush region. - while (region.getMemstoreSize().get() > memstoreFlushSize) { + while (region.getMemstoreSize() > memstoreFlushSize) { Thread.sleep(100); } } @@ -528,7 +566,7 @@ public class TestPerColumnFamilyFlush { // Under the same write load, small stores should have less store files when // percolumnfamilyflush enabled. - @Test (timeout=180000) + @Test(timeout = 180000) public void testCompareStoreFileCount() throws Exception { long memstoreFlushSize = 1024L * 1024; Configuration conf = TEST_UTIL.getConfiguration(); @@ -555,7 +593,7 @@ public class TestPerColumnFamilyFlush { try { TEST_UTIL.startMiniCluster(1); TEST_UTIL.getHBaseAdmin().createNamespace( - NamespaceDescriptor.create(TABLENAME.getNamespaceAsString()).build()); + NamespaceDescriptor.create(TABLENAME.getNamespaceAsString()).build()); TEST_UTIL.getHBaseAdmin().createTable(htd); TEST_UTIL.waitTableAvailable(TABLENAME); Connection conn = ConnectionFactory.createConnection(conf); @@ -564,7 +602,7 @@ public class TestPerColumnFamilyFlush { table.close(); conn.close(); - HRegion region = getRegionWithName(TABLENAME).getFirst(); + Region region = getRegionWithName(TABLENAME).getFirst(); cf1StoreFileCount = region.getStore(FAMILY1).getStorefilesCount(); cf2StoreFileCount = region.getStore(FAMILY2).getStorefilesCount(); cf3StoreFileCount = region.getStore(FAMILY3).getStorefilesCount(); @@ -572,12 +610,12 @@ public class TestPerColumnFamilyFlush { TEST_UTIL.shutdownMiniCluster(); } - LOG.info("==============Test with selective flush enabled==============="); - conf.set(FlushPolicyFactory.HBASE_FLUSH_POLICY_KEY, FlushLargeStoresPolicy.class.getName()); + LOG.info("==============Test with selective flush enabled==============="); + conf.set(FlushPolicyFactory.HBASE_FLUSH_POLICY_KEY, FlushLargeStoresPolicy.class.getName()); try { TEST_UTIL.startMiniCluster(1); TEST_UTIL.getHBaseAdmin().createNamespace( - NamespaceDescriptor.create(TABLENAME.getNamespaceAsString()).build()); + NamespaceDescriptor.create(TABLENAME.getNamespaceAsString()).build()); TEST_UTIL.getHBaseAdmin().createTable(htd); Connection conn = ConnectionFactory.createConnection(conf); Table table = conn.getTable(TABLENAME); @@ -585,7 +623,7 @@ public class TestPerColumnFamilyFlush { table.close(); conn.close(); - HRegion region = getRegionWithName(TABLENAME).getFirst(); + Region region = getRegionWithName(TABLENAME).getFirst(); cf1StoreFileCount1 = region.getStore(FAMILY1).getStorefilesCount(); cf2StoreFileCount1 = region.getStore(FAMILY2).getStorefilesCount(); cf3StoreFileCount1 = region.getStore(FAMILY3).getStorefilesCount(); @@ -644,9 +682,9 @@ public class TestPerColumnFamilyFlush { rand.nextBytes(value1); rand.nextBytes(value2); rand.nextBytes(value3); - put.add(FAMILY1, qf, value1); - put.add(FAMILY2, qf, value2); - put.add(FAMILY3, qf, value3); + put.addColumn(FAMILY1, qf, value1); + put.addColumn(FAMILY2, qf, value2); + put.addColumn(FAMILY3, qf, value3); table.put(put); if (i % 10000 == 0) { LOG.info(i + " rows put"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestQueryMatcher.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestQueryMatcher.java index 64762881f6d..2df2f5a2d70 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestQueryMatcher.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestQueryMatcher.java @@ -147,27 +147,6 @@ public class TestQueryMatcher extends HBaseTestCase { _testMatch_ExplicitColumns(scan, expected); } - @Test - public void testMatch_ExplicitColumnsWithLookAhead() - throws IOException { - //Moving up from the Tracker by using Gets and List instead - //of just byte [] - - //Expected result - List expected = new ArrayList(); - expected.add(ScanQueryMatcher.MatchCode.SKIP); - expected.add(ScanQueryMatcher.MatchCode.INCLUDE_AND_SEEK_NEXT_COL); - expected.add(ScanQueryMatcher.MatchCode.SKIP); - expected.add(ScanQueryMatcher.MatchCode.INCLUDE_AND_SEEK_NEXT_COL); - expected.add(ScanQueryMatcher.MatchCode.INCLUDE_AND_SEEK_NEXT_ROW); - expected.add(ScanQueryMatcher.MatchCode.DONE); - - Scan s = new Scan(scan); - s.setAttribute(Scan.HINT_LOOKAHEAD, Bytes.toBytes(2)); - _testMatch_ExplicitColumns(s, expected); - } - - @Test public void testMatch_Wildcard() throws IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionFavoredNodes.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionFavoredNodes.java index 8e7fe0488cf..ebce100d1f7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionFavoredNodes.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionFavoredNodes.java @@ -108,11 +108,11 @@ public class TestRegionFavoredNodes { } // For each region, choose some datanodes as the favored nodes then assign - // them as favored nodes through the HRegion. + // them as favored nodes through the region. for (int i = 0; i < REGION_SERVERS; i++) { HRegionServer server = TEST_UTIL.getHBaseCluster().getRegionServer(i); - List regions = server.getOnlineRegions(TABLE_NAME); - for (HRegion region : regions) { + List regions = server.getOnlineRegions(TABLE_NAME); + for (Region region : regions) { ListfavoredNodes = new ArrayList(3); String encodedRegionName = region.getRegionInfo().getEncodedName(); @@ -139,8 +139,8 @@ public class TestRegionFavoredNodes { // they are consistent with the favored nodes for that region. for (int i = 0; i < REGION_SERVERS; i++) { HRegionServer server = TEST_UTIL.getHBaseCluster().getRegionServer(i); - List regions = server.getOnlineRegions(TABLE_NAME); - for (HRegion region : regions) { + List regions = server.getOnlineRegions(TABLE_NAME); + for (Region region : regions) { List files = region.getStoreFileList(new byte[][]{COLUMN_FAMILY}); for (String file : files) { FileStatus status = TEST_UTIL.getDFSCluster().getFileSystem(). diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransaction.java index 8bcd89eb4f9..b2115b34ea4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransaction.java @@ -44,11 +44,11 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.wal.WALFactory; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; +import org.apache.hadoop.hbase.wal.WALFactory; import org.apache.zookeeper.KeeperException; import org.junit.After; import org.junit.Before; @@ -59,7 +59,7 @@ import org.mockito.Mockito; import com.google.common.collect.ImmutableList; /** - * Test the {@link RegionMergeTransaction} class against two HRegions (as + * Test the {@link RegionMergeTransactionImpl} class against two HRegions (as * opposed to running cluster). */ @Category({RegionServerTests.class, SmallTests.class}) @@ -119,14 +119,14 @@ public class TestRegionMergeTransaction { prepareOnGoodRegions(); } - private RegionMergeTransaction prepareOnGoodRegions() throws IOException { - RegionMergeTransaction mt = new RegionMergeTransaction(region_a, region_b, + private RegionMergeTransactionImpl prepareOnGoodRegions() throws IOException { + RegionMergeTransactionImpl mt = new RegionMergeTransactionImpl(region_a, region_b, false); - RegionMergeTransaction spyMT = Mockito.spy(mt); + RegionMergeTransactionImpl spyMT = Mockito.spy(mt); doReturn(false).when(spyMT).hasMergeQualifierInMeta(null, - region_a.getRegionName()); + region_a.getRegionInfo().getRegionName()); doReturn(false).when(spyMT).hasMergeQualifierInMeta(null, - region_b.getRegionName()); + region_b.getRegionInfo().getRegionName()); assertTrue(spyMT.prepare(null)); return spyMT; } @@ -136,7 +136,7 @@ public class TestRegionMergeTransaction { */ @Test public void testPrepareWithSameRegion() throws IOException { - RegionMergeTransaction mt = new RegionMergeTransaction(this.region_a, + RegionMergeTransactionImpl mt = new RegionMergeTransactionImpl(this.region_a, this.region_a, true); assertFalse("should not merge the same region even if it is forcible ", mt.prepare(null)); @@ -147,7 +147,7 @@ public class TestRegionMergeTransaction { */ @Test public void testPrepareWithRegionsNotAdjacent() throws IOException { - RegionMergeTransaction mt = new RegionMergeTransaction(this.region_a, + RegionMergeTransactionImpl mt = new RegionMergeTransactionImpl(this.region_a, this.region_c, false); assertFalse("should not merge two regions if they are adjacent except it is forcible", mt.prepare(null)); @@ -159,13 +159,13 @@ public class TestRegionMergeTransaction { @Test public void testPrepareWithRegionsNotAdjacentUnderCompulsory() throws IOException { - RegionMergeTransaction mt = new RegionMergeTransaction(region_a, region_c, + RegionMergeTransactionImpl mt = new RegionMergeTransactionImpl(region_a, region_c, true); - RegionMergeTransaction spyMT = Mockito.spy(mt); + RegionMergeTransactionImpl spyMT = Mockito.spy(mt); doReturn(false).when(spyMT).hasMergeQualifierInMeta(null, - region_a.getRegionName()); + region_a.getRegionInfo().getRegionName()); doReturn(false).when(spyMT).hasMergeQualifierInMeta(null, - region_c.getRegionName()); + region_c.getRegionInfo().getRegionName()); assertTrue("Since focible is true, should merge two regions even if they are not adjacent", spyMT.prepare(null)); } @@ -180,7 +180,7 @@ public class TestRegionMergeTransaction { when(storeMock.getFamily()).thenReturn(new HColumnDescriptor("cf")); when(storeMock.close()).thenReturn(ImmutableList.of()); this.region_a.stores.put(Bytes.toBytes(""), storeMock); - RegionMergeTransaction mt = new RegionMergeTransaction(this.region_a, + RegionMergeTransactionImpl mt = new RegionMergeTransactionImpl(this.region_a, this.region_b, false); assertFalse( "a region should not be mergeable if it has instances of store file references", @@ -190,7 +190,7 @@ public class TestRegionMergeTransaction { @Test public void testPrepareWithClosedRegion() throws IOException { this.region_a.close(); - RegionMergeTransaction mt = new RegionMergeTransaction(this.region_a, + RegionMergeTransactionImpl mt = new RegionMergeTransactionImpl(this.region_a, this.region_b, false); assertFalse(mt.prepare(null)); } @@ -201,13 +201,13 @@ public class TestRegionMergeTransaction { */ @Test public void testPrepareWithRegionsWithMergeReference() throws IOException { - RegionMergeTransaction mt = new RegionMergeTransaction(region_a, region_b, + RegionMergeTransactionImpl mt = new RegionMergeTransactionImpl(region_a, region_b, false); - RegionMergeTransaction spyMT = Mockito.spy(mt); + RegionMergeTransactionImpl spyMT = Mockito.spy(mt); doReturn(true).when(spyMT).hasMergeQualifierInMeta(null, - region_a.getRegionName()); + region_a.getRegionInfo().getRegionName()); doReturn(true).when(spyMT).hasMergeQualifierInMeta(null, - region_b.getRegionName()); + region_b.getRegionInfo().getRegionName()); assertFalse(spyMT.prepare(null)); } @@ -220,14 +220,14 @@ public class TestRegionMergeTransaction { assertEquals(rowCountOfRegionB, countRows(this.region_b)); // Start transaction. - RegionMergeTransaction mt = prepareOnGoodRegions(); + RegionMergeTransactionImpl mt = prepareOnGoodRegions(); // Run the execute. Look at what it returns. TEST_UTIL.getConfiguration().setInt(HConstants.REGIONSERVER_PORT, 0); CoordinatedStateManager cp = CoordinatedStateManagerFactory.getCoordinatedStateManager( TEST_UTIL.getConfiguration()); Server mockServer = new HRegionServer(TEST_UTIL.getConfiguration(), cp); - HRegion mergedRegion = mt.execute(mockServer, null); + HRegion mergedRegion = (HRegion)mt.execute(mockServer, null); // Do some assertions about execution. assertTrue(this.fs.exists(mt.getMergesDir())); // Assert region_a and region_b is closed. @@ -238,10 +238,10 @@ public class TestRegionMergeTransaction { // to be under the merged region dirs. assertEquals(0, this.fs.listStatus(mt.getMergesDir()).length); // Check merged region have correct key span. - assertTrue(Bytes.equals(this.region_a.getStartKey(), - mergedRegion.getStartKey())); - assertTrue(Bytes.equals(this.region_b.getEndKey(), - mergedRegion.getEndKey())); + assertTrue(Bytes.equals(this.region_a.getRegionInfo().getStartKey(), + mergedRegion.getRegionInfo().getStartKey())); + assertTrue(Bytes.equals(this.region_b.getRegionInfo().getEndKey(), + mergedRegion.getRegionInfo().getEndKey())); // Count rows. merged region are already open try { int mergedRegionRowCount = countRows(mergedRegion); @@ -264,7 +264,7 @@ public class TestRegionMergeTransaction { assertEquals(rowCountOfRegionB, countRows(this.region_b)); // Start transaction. - RegionMergeTransaction mt = prepareOnGoodRegions(); + RegionMergeTransactionImpl mt = prepareOnGoodRegions(); when(mt.createMergedRegionFromMerges(region_a, region_b, mt.getMergedRegionInfo())).thenThrow( @@ -300,7 +300,7 @@ public class TestRegionMergeTransaction { // Now retry the merge but do not throw an exception this time. assertTrue(mt.prepare(null)); - HRegion mergedRegion = mt.execute(mockServer, null); + HRegion mergedRegion = (HRegion)mt.execute(mockServer, null); // Count rows. daughters are already open // Count rows. merged region are already open try { @@ -324,7 +324,7 @@ public class TestRegionMergeTransaction { assertEquals(rowCountOfRegionB, countRows(this.region_b)); // Start transaction. - RegionMergeTransaction mt = prepareOnGoodRegions(); + RegionMergeTransactionImpl mt = prepareOnGoodRegions(); Mockito.doThrow(new MockedFailedMergedRegionOpen()) .when(mt) .openMergedRegion((Server) Mockito.anyObject(), @@ -364,31 +364,31 @@ public class TestRegionMergeTransaction { byte[] z = Bytes.toBytes("z"); HRegionInfo r1 = new HRegionInfo(tableName); HRegionInfo r2 = new HRegionInfo(tableName, a, z); - HRegionInfo m = RegionMergeTransaction.getMergedRegionInfo(r1, r2); + HRegionInfo m = RegionMergeTransactionImpl.getMergedRegionInfo(r1, r2); assertTrue(Bytes.equals(m.getStartKey(), r1.getStartKey()) && Bytes.equals(m.getEndKey(), r1.getEndKey())); r1 = new HRegionInfo(tableName, null, a); r2 = new HRegionInfo(tableName, a, z); - m = RegionMergeTransaction.getMergedRegionInfo(r1, r2); + m = RegionMergeTransactionImpl.getMergedRegionInfo(r1, r2); assertTrue(Bytes.equals(m.getStartKey(), r1.getStartKey()) && Bytes.equals(m.getEndKey(), r2.getEndKey())); r1 = new HRegionInfo(tableName, null, a); r2 = new HRegionInfo(tableName, z, null); - m = RegionMergeTransaction.getMergedRegionInfo(r1, r2); + m = RegionMergeTransactionImpl.getMergedRegionInfo(r1, r2); assertTrue(Bytes.equals(m.getStartKey(), r1.getStartKey()) && Bytes.equals(m.getEndKey(), r2.getEndKey())); r1 = new HRegionInfo(tableName, a, z); r2 = new HRegionInfo(tableName, z, null); - m = RegionMergeTransaction.getMergedRegionInfo(r1, r2); + m = RegionMergeTransactionImpl.getMergedRegionInfo(r1, r2); assertTrue(Bytes.equals(m.getStartKey(), r1.getStartKey()) && Bytes.equals(m.getEndKey(), r2.getEndKey())); r1 = new HRegionInfo(tableName, a, b); r2 = new HRegionInfo(tableName, b, z); - m = RegionMergeTransaction.getMergedRegionInfo(r1, r2); + m = RegionMergeTransactionImpl.getMergedRegionInfo(r1, r2); assertTrue(Bytes.equals(m.getStartKey(), r1.getStartKey()) && Bytes.equals(m.getEndKey(), r2.getEndKey())); } @@ -467,7 +467,7 @@ public class TestRegionMergeTransaction { } } if (flush) { - r.flushcache(); + r.flush(true); } } return rowCount; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java index f4b6f02bcc8..2a949a101c5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java @@ -81,7 +81,7 @@ import com.google.protobuf.ServiceException; /** * Like {@link TestRegionMergeTransaction} in that we're testing - * {@link RegionMergeTransaction} only the below tests are against a running + * {@link RegionMergeTransactionImpl} only the below tests are against a running * cluster where {@link TestRegionMergeTransaction} is tests against bare * {@link HRegion}. */ diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicaFailover.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicaFailover.java new file mode 100644 index 00000000000..b18a0f48c1e --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicaFailover.java @@ -0,0 +1,373 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.regionserver; + +import static org.junit.Assert.*; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.commons.logging.impl.Log4JLogger; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.Waiter.Predicate; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.Consistency; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.RpcRetryingCallerImpl; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.replication.regionserver.TestRegionReplicaReplicationEndpoint; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil; +import org.apache.hadoop.hbase.util.Threads; +import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; +import org.apache.log4j.Level; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.TestName; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +/** + * Tests failover of secondary region replicas. + */ +@RunWith(Parameterized.class) +@Category(LargeTests.class) +public class TestRegionReplicaFailover { + + private static final Log LOG = LogFactory.getLog(TestRegionReplicaReplicationEndpoint.class); + + static { + ((Log4JLogger)RpcRetryingCallerImpl.LOG).getLogger().setLevel(Level.ALL); + } + + private static final HBaseTestingUtility HTU = new HBaseTestingUtility(); + + private static final int NB_SERVERS = 3; + + protected final byte[][] families = new byte[][] {HBaseTestingUtility.fam1, + HBaseTestingUtility.fam2, HBaseTestingUtility.fam3}; + protected final byte[] fam = HBaseTestingUtility.fam1; + protected final byte[] qual1 = Bytes.toBytes("qual1"); + protected final byte[] value1 = Bytes.toBytes("value1"); + protected final byte[] row = Bytes.toBytes("rowA"); + protected final byte[] row2 = Bytes.toBytes("rowB"); + + @Rule public TestName name = new TestName(); + + private HTableDescriptor htd; + + /* + * We are testing with dist log split and dist log replay separately + */ + @Parameters + public static Collection getParameters() { + Object[][] params = + new Boolean[][] { {true}, {false} }; + return Arrays.asList(params); + } + + @Parameterized.Parameter(0) + public boolean distributedLogReplay; + + @Before + public void before() throws Exception { + Configuration conf = HTU.getConfiguration(); + conf.setBoolean(HConstants.REPLICATION_ENABLE_KEY, true); + conf.setBoolean(ServerRegionReplicaUtil.REGION_REPLICA_REPLICATION_CONF_KEY, true); + conf.setBoolean(ServerRegionReplicaUtil.REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY, true); + conf.setInt("replication.stats.thread.period.seconds", 5); + conf.setBoolean("hbase.tests.use.shortcircuit.reads", false); + conf.setBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, distributedLogReplay); + + HTU.startMiniCluster(NB_SERVERS); + htd = HTU.createTableDescriptor( + name.getMethodName().substring(0, name.getMethodName().length()-3)); + htd.setRegionReplication(3); + HTU.getHBaseAdmin().createTable(htd); + } + + @After + public void after() throws Exception { + HTU.deleteTableIfAny(htd.getTableName()); + HTU.shutdownMiniCluster(); + } + + /** + * Tests the case where a newly created table with region replicas and no data, the secondary + * region replicas are available to read immediately. + */ + @Test(timeout = 60000) + public void testSecondaryRegionWithEmptyRegion() throws IOException { + // Create a new table with region replication, don't put any data. Test that the secondary + // region replica is available to read. + try (Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration()); + Table table = connection.getTable(htd.getTableName())) { + + Get get = new Get(row); + get.setConsistency(Consistency.TIMELINE); + get.setReplicaId(1); + table.get(get); // this should not block + } + } + + /** + * Tests the case where if there is some data in the primary region, reopening the region replicas + * (enable/disable table, etc) makes the region replicas readable. + * @throws IOException + */ + @Test(timeout = 60000) + public void testSecondaryRegionWithNonEmptyRegion() throws IOException { + // Create a new table with region replication and load some data + // than disable and enable the table again and verify the data from secondary + try (Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration()); + Table table = connection.getTable(htd.getTableName())) { + + HTU.loadNumericRows(table, fam, 0, 1000); + + HTU.getHBaseAdmin().disableTable(htd.getTableName()); + HTU.getHBaseAdmin().enableTable(htd.getTableName()); + + HTU.verifyNumericRows(table, fam, 0, 1000, 1); + } + } + + /** + * Tests the case where killing a primary region with unflushed data recovers + */ + @Test (timeout = 120000) + public void testPrimaryRegionKill() throws Exception { + try (Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration()); + Table table = connection.getTable(htd.getTableName())) { + + HTU.loadNumericRows(table, fam, 0, 1000); + + // wal replication is async, we have to wait until the replication catches up, or we timeout + verifyNumericRowsWithTimeout(table, fam, 0, 1000, 1, 30000); + verifyNumericRowsWithTimeout(table, fam, 0, 1000, 2, 30000); + + // we should not have flushed files now, but data in memstores of primary and secondary + // kill the primary region replica now, and ensure that when it comes back up, we can still + // read from it the same data from primary and secondaries + boolean aborted = false; + for (RegionServerThread rs : HTU.getMiniHBaseCluster().getRegionServerThreads()) { + for (Region r : rs.getRegionServer().getOnlineRegions(htd.getTableName())) { + if (r.getRegionInfo().getReplicaId() == 0) { + LOG.info("Aborting region server hosting primary region replica"); + rs.getRegionServer().abort("for test"); + aborted = true; + } + } + } + assertTrue(aborted); + + // wal replication is async, we have to wait until the replication catches up, or we timeout + verifyNumericRowsWithTimeout(table, fam, 0, 1000, 0, 30000); + verifyNumericRowsWithTimeout(table, fam, 0, 1000, 1, 30000); + verifyNumericRowsWithTimeout(table, fam, 0, 1000, 2, 30000); + } + + // restart the region server + HTU.getMiniHBaseCluster().startRegionServer(); + } + + /** wal replication is async, we have to wait until the replication catches up, or we timeout + */ + private void verifyNumericRowsWithTimeout(final Table table, final byte[] f, final int startRow, + final int endRow, final int replicaId, final long timeout) throws Exception { + try { + HTU.waitFor(timeout, new Predicate() { + @Override + public boolean evaluate() throws Exception { + try { + HTU.verifyNumericRows(table, f, startRow, endRow, replicaId); + return true; + } catch (AssertionError ae) { + return false; + } + } + }); + } catch (Throwable t) { + // ignore this, but redo the verify do get the actual exception + HTU.verifyNumericRows(table, f, startRow, endRow, replicaId); + } + } + + /** + * Tests the case where killing a secondary region with unflushed data recovers, and the replica + * becomes available to read again shortly. + */ + @Test (timeout = 120000) + public void testSecondaryRegionKill() throws Exception { + try (Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration()); + Table table = connection.getTable(htd.getTableName())) { + HTU.loadNumericRows(table, fam, 0, 1000); + + // wait for some time to ensure that async wal replication does it's magic + verifyNumericRowsWithTimeout(table, fam, 0, 1000, 1, 30000); + verifyNumericRowsWithTimeout(table, fam, 0, 1000, 2, 30000); + + // we should not have flushed files now, but data in memstores of primary and secondary + // kill the secondary region replica now, and ensure that when it comes back up, we can still + // read from it the same data + boolean aborted = false; + for (RegionServerThread rs : HTU.getMiniHBaseCluster().getRegionServerThreads()) { + for (Region r : rs.getRegionServer().getOnlineRegions(htd.getTableName())) { + if (r.getRegionInfo().getReplicaId() == 1) { + LOG.info("Aborting region server hosting secondary region replica"); + rs.getRegionServer().abort("for test"); + aborted = true; + } + } + } + assertTrue(aborted); + + Threads.sleep(5000); + + HTU.verifyNumericRows(table, fam, 0, 1000, 1); + HTU.verifyNumericRows(table, fam, 0, 1000, 2); + } + + // restart the region server + HTU.getMiniHBaseCluster().startRegionServer(); + } + + /** + * Tests the case where there are 3 region replicas and the primary is continuously accepting + * new writes while one of the secondaries is killed. Verification is done for both of the + * secondary replicas. + */ + @Test (timeout = 120000) + public void testSecondaryRegionKillWhilePrimaryIsAcceptingWrites() throws Exception { + try (Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration()); + Table table = connection.getTable(htd.getTableName()); + Admin admin = connection.getAdmin()) { + // start a thread to do the loading of primary + HTU.loadNumericRows(table, fam, 0, 1000); // start with some base + admin.flush(table.getName()); + HTU.loadNumericRows(table, fam, 1000, 2000); + + final AtomicReference ex = new AtomicReference(null); + final AtomicBoolean done = new AtomicBoolean(false); + final AtomicInteger key = new AtomicInteger(2000); + + Thread loader = new Thread() { + @Override + public void run() { + while (!done.get()) { + try { + HTU.loadNumericRows(table, fam, key.get(), key.get()+1000); + key.addAndGet(1000); + } catch (Throwable e) { + ex.compareAndSet(null, e); + } + } + } + }; + loader.start(); + + Thread aborter = new Thread() { + @Override + public void run() { + try { + boolean aborted = false; + for (RegionServerThread rs : HTU.getMiniHBaseCluster().getRegionServerThreads()) { + for (Region r : rs.getRegionServer().getOnlineRegions(htd.getTableName())) { + if (r.getRegionInfo().getReplicaId() == 1) { + LOG.info("Aborting region server hosting secondary region replica"); + rs.getRegionServer().abort("for test"); + aborted = true; + } + } + } + assertTrue(aborted); + } catch (Throwable e) { + ex.compareAndSet(null, e); + } + }; + }; + + aborter.start(); + aborter.join(); + done.set(true); + loader.join(); + + assertNull(ex.get()); + + assertTrue(key.get() > 1000); // assert that the test is working as designed + LOG.info("Loaded up to key :" + key.get()); + verifyNumericRowsWithTimeout(table, fam, 0, key.get(), 0, 30000); + verifyNumericRowsWithTimeout(table, fam, 0, key.get(), 1, 30000); + verifyNumericRowsWithTimeout(table, fam, 0, key.get(), 2, 30000); + } + + // restart the region server + HTU.getMiniHBaseCluster().startRegionServer(); + } + + /** + * Tests the case where we are creating a table with a lot of regions and replicas. Opening region + * replicas should not block handlers on RS indefinitely. + */ + @Test (timeout = 120000) + public void testLotsOfRegionReplicas() throws IOException { + int numRegions = NB_SERVERS * 20; + int regionReplication = 10; + String tableName = htd.getTableName().getNameAsString() + "2"; + htd = HTU.createTableDescriptor(tableName); + htd.setRegionReplication(regionReplication); + + // dont care about splits themselves too much + byte[] startKey = Bytes.toBytes("aaa"); + byte[] endKey = Bytes.toBytes("zzz"); + byte[][] splits = HTU.getRegionSplitStartKeys(startKey, endKey, numRegions); + HTU.getHBaseAdmin().createTable(htd, startKey, endKey, numRegions); + + try (Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration()); + Table table = connection.getTable(htd.getTableName())) { + + for (int i = 1; i < splits.length; i++) { + for (int j = 0; j < regionReplication; j++) { + Get get = new Get(splits[i]); + get.setConsistency(Consistency.TIMELINE); + get.setReplicaId(j); + table.get(get); // this should not block. Regions should be coming online + } + } + } + + HTU.deleteTableIfAny(TableName.valueOf(tableName)); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java index 538e47e60c1..85a8cd23dc2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java @@ -150,12 +150,13 @@ public class TestRegionReplicas { // assert that we can read back from primary Assert.assertEquals(1000, HTU.countRows(table)); // flush so that region replica can read - getRS().getRegionByEncodedName(hriPrimary.getEncodedName()).flushcache(); + Region region = getRS().getRegionByEncodedName(hriPrimary.getEncodedName()); + region.flush(true); openRegion(HTU, getRS(), hriSecondary); // first try directly against region - HRegion region = getRS().getFromOnlineRegions(hriSecondary.getEncodedName()); + region = getRS().getFromOnlineRegions(hriSecondary.getEncodedName()); assertGet(region, 42, true); assertGetRpc(hriSecondary, 42, true); @@ -173,7 +174,8 @@ public class TestRegionReplicas { // assert that we can read back from primary Assert.assertEquals(1000, HTU.countRows(table)); // flush so that region replica can read - getRS().getRegionByEncodedName(hriPrimary.getEncodedName()).flushcache(); + Region region = getRS().getRegionByEncodedName(hriPrimary.getEncodedName()); + region.flush(true); openRegion(HTU, getRS(), hriSecondary); @@ -190,7 +192,7 @@ public class TestRegionReplicas { } } - private void assertGet(HRegion region, int value, boolean expect) throws IOException { + private void assertGet(Region region, int value, boolean expect) throws IOException { byte[] row = Bytes.toBytes(String.valueOf(value)); Get get = new Get(row); Result result = region.get(get); @@ -242,14 +244,15 @@ public class TestRegionReplicas { Assert.assertEquals(1000, HTU.countRows(table)); // flush so that region replica can read LOG.info("Flushing primary region"); - getRS().getRegionByEncodedName(hriPrimary.getEncodedName()).flushcache(); + Region region = getRS().getRegionByEncodedName(hriPrimary.getEncodedName()); + region.flush(true); // ensure that chore is run LOG.info("Sleeping for " + (4 * refreshPeriod)); Threads.sleep(4 * refreshPeriod); LOG.info("Checking results from secondary region replica"); - HRegion secondaryRegion = getRS().getFromOnlineRegions(hriSecondary.getEncodedName()); + Region secondaryRegion = getRS().getFromOnlineRegions(hriSecondary.getEncodedName()); Assert.assertEquals(1, secondaryRegion.getStore(f).getStorefilesCount()); assertGet(secondaryRegion, 42, true); @@ -258,10 +261,12 @@ public class TestRegionReplicas { //load some data to primary HTU.loadNumericRows(table, f, 1000, 1100); - getRS().getRegionByEncodedName(hriPrimary.getEncodedName()).flushcache(); + region = getRS().getRegionByEncodedName(hriPrimary.getEncodedName()); + region.flush(true); HTU.loadNumericRows(table, f, 2000, 2100); - getRS().getRegionByEncodedName(hriPrimary.getEncodedName()).flushcache(); + region = getRS().getRegionByEncodedName(hriPrimary.getEncodedName()); + region.flush(true); // ensure that chore is run Threads.sleep(4 * refreshPeriod); @@ -428,20 +433,21 @@ public class TestRegionReplicas { LOG.info("Loading data to primary region"); for (int i = 0; i < 3; ++i) { HTU.loadNumericRows(table, f, i * 1000, (i + 1) * 1000); - getRS().getRegionByEncodedName(hriPrimary.getEncodedName()).flushcache(); + Region region = getRS().getRegionByEncodedName(hriPrimary.getEncodedName()); + region.flush(true); } - HRegion primaryRegion = getRS().getFromOnlineRegions(hriPrimary.getEncodedName()); + Region primaryRegion = getRS().getFromOnlineRegions(hriPrimary.getEncodedName()); Assert.assertEquals(3, primaryRegion.getStore(f).getStorefilesCount()); // Refresh store files on the secondary - HRegion secondaryRegion = getRS().getFromOnlineRegions(hriSecondary.getEncodedName()); + Region secondaryRegion = getRS().getFromOnlineRegions(hriSecondary.getEncodedName()); secondaryRegion.getStore(f).refreshStoreFiles(); Assert.assertEquals(3, secondaryRegion.getStore(f).getStorefilesCount()); // force compaction LOG.info("Force Major compaction on primary region " + hriPrimary); - primaryRegion.compactStores(true); + primaryRegion.compact(true); Assert.assertEquals(1, primaryRegion.getStore(f).getStorefilesCount()); // scan all the hfiles on the secondary. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerHostname.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerHostname.java new file mode 100644 index 00000000000..44cc94c14ba --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerHostname.java @@ -0,0 +1,108 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.net.InetAddress; +import java.net.NetworkInterface; +import java.net.UnknownHostException; +import java.util.Enumeration; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.CoordinatedStateManager; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.MiniHBaseCluster.MiniHBaseClusterRegionServer; +import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.RegionServerTests; +import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; +import org.apache.hadoop.hbase.util.Threads; +import org.apache.hadoop.hbase.zookeeper.ZKUtil; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +/** + * Tests for the hostname specification by region server + */ +@Category({RegionServerTests.class, MediumTests.class}) +public class TestRegionServerHostname { + private static final Log LOG = LogFactory.getLog(TestRegionServerHostname.class); + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + + @Test (timeout=30000) + public void testInvalidRegionServerHostnameAbortsServer() throws Exception { + final int NUM_MASTERS = 1; + final int NUM_RS = 1; + String invalidHostname = "hostAddr.invalid"; + TEST_UTIL.getConfiguration().set(HRegionServer.HOSTNAME_KEY, invalidHostname); + try { + TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS); + } catch (IOException ioe) { + Throwable t1 = ioe.getCause(); + Throwable t2 = t1.getCause(); + assertTrue(t2.getMessage().contains("Failed resolve of " + invalidHostname)); + return; + } finally { + TEST_UTIL.shutdownMiniCluster(); + } + assertTrue("Failed to validate against invalid hostname", false); + } + + @Test(timeout=120000) + public void testRegionServerHostname() throws Exception { + final int NUM_MASTERS = 1; + final int NUM_RS = 1; + Enumeration netInterfaceList = NetworkInterface.getNetworkInterfaces(); + + while (netInterfaceList.hasMoreElements()) { + NetworkInterface ni = netInterfaceList.nextElement(); + Enumeration addrList = ni.getInetAddresses(); + // iterate through host addresses and use each as hostname + while (addrList.hasMoreElements()) { + InetAddress addr = addrList.nextElement(); + if (addr.isLoopbackAddress() || addr.isLinkLocalAddress() || addr.isMulticastAddress()) { + continue; + } + String hostName = addr.getHostName(); + LOG.info("Found " + hostName + " on " + ni); + + TEST_UTIL.getConfiguration().set(HRegionServer.HOSTNAME_KEY, hostName); + TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS); + try { + ZooKeeperWatcher zkw = TEST_UTIL.getZooKeeperWatcher(); + List servers = ZKUtil.listChildrenNoWatch(zkw, zkw.rsZNode); + // there would be NUM_RS+1 children - one for the master + assertTrue(servers.size() == NUM_RS+1); + for (String server : servers) { + assertTrue(server.startsWith(hostName+",")); + } + zkw.close(); + } finally { + TEST_UTIL.shutdownMiniCluster(); + } + } + } + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java index 57143515a97..fa634d15b4b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java @@ -53,6 +53,8 @@ public class TestRegionServerMetrics { private static HBaseTestingUtility TEST_UTIL; private static MetricsRegionServer metricsRegionServer; private static MetricsRegionServerSource serverSource; + private static final int NUM_SCAN_NEXT = 30; + private static int numScanNext = 0; @BeforeClass public static void startCluster() throws Exception { @@ -97,6 +99,7 @@ public class TestRegionServerMetrics { @Test public void testLocalFiles() throws Exception { metricsHelper.assertGauge("percentFilesLocal", 0, serverSource); + metricsHelper.assertGauge("percentFilesLocalSecondaryRegions", 0, serverSource); } @Test @@ -328,7 +331,6 @@ public class TestRegionServerMetrics { byte[] qualifier = Bytes.toBytes("qual"); byte[] val = Bytes.toBytes("One"); - List puts = new ArrayList<>(); for (int insertCount =0; insertCount < 100; insertCount++) { Put p = new Put(Bytes.toBytes("" + insertCount + "row")); @@ -343,12 +345,13 @@ public class TestRegionServerMetrics { s.setCaching(1); ResultScanner resultScanners = t.getScanner(s); - for (int nextCount = 0; nextCount < 30; nextCount++) { + for (int nextCount = 0; nextCount < NUM_SCAN_NEXT; nextCount++) { Result result = resultScanners.next(); assertNotNull(result); assertEquals(1, result.size()); } } + numScanNext += NUM_SCAN_NEXT; try (RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName)) { for ( HRegionLocation location: locator.getAllRegionLocations()) { HRegionInfo i = location.getRegionInfo(); @@ -360,8 +363,63 @@ public class TestRegionServerMetrics { "_table_"+tableNameString + "_region_" + i.getEncodedName()+ "_metric"; - metricsHelper.assertCounter(prefix + "_scanNextNumOps", 30, agg); + metricsHelper.assertCounter(prefix + "_scanNextNumOps", NUM_SCAN_NEXT, agg); } + metricsHelper.assertCounter("ScanNext_num_ops", numScanNext, serverSource); + } + try (Admin admin = TEST_UTIL.getHBaseAdmin()) { + admin.disableTable(tableName); + admin.deleteTable(tableName); + } + } + + @Test + public void testScanNextForSmallScan() throws IOException { + String tableNameString = "testScanNextSmall"; + TableName tableName = TableName.valueOf(tableNameString); + byte[] cf = Bytes.toBytes("d"); + byte[] qualifier = Bytes.toBytes("qual"); + byte[] val = Bytes.toBytes("One"); + + List puts = new ArrayList<>(); + for (int insertCount =0; insertCount < 100; insertCount++) { + Put p = new Put(Bytes.toBytes("" + insertCount + "row")); + p.add(cf, qualifier, val); + puts.add(p); + } + try (HTable t = TEST_UTIL.createTable(tableName, cf)) { + t.put(puts); + + Scan s = new Scan(); + s.setSmall(true); + s.setCaching(1); + ResultScanner resultScanners = t.getScanner(s); + + for (int nextCount = 0; nextCount < NUM_SCAN_NEXT; nextCount++) { + Result result = resultScanners.next(); + assertNotNull(result); + assertEquals(1, result.size()); + } + } + numScanNext += NUM_SCAN_NEXT; + try (RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName)) { + for ( HRegionLocation location: locator.getAllRegionLocations()) { + HRegionInfo i = location.getRegionInfo(); + MetricsRegionAggregateSource agg = rs.getRegion(i.getRegionName()) + .getMetrics() + .getSource() + .getAggregateSource(); + String prefix = "namespace_"+NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR+ + "_table_"+tableNameString + + "_region_" + i.getEncodedName()+ + "_metric"; + metricsHelper.assertCounter(prefix + "_scanNextNumOps", NUM_SCAN_NEXT, agg); + } + metricsHelper.assertCounter("ScanNext_num_ops", numScanNext, serverSource); + } + try (Admin admin = TEST_UTIL.getHBaseAdmin()) { + admin.disableTable(tableName); + admin.deleteTable(tableName); } } @@ -380,7 +438,7 @@ public class TestRegionServerMetrics { htd.addFamily(hcd); HBaseAdmin admin = new HBaseAdmin(conf); HTable t = TEST_UTIL.createTable(htd, new byte[0][0], conf); - HRegion region = rs.getOnlineRegions(tableName).get(0); + Region region = rs.getOnlineRegions(tableName).get(0); t.setAutoFlush(true, true); for (int insertCount = 0; insertCount < numHfiles; insertCount++) { Put p = new Put(Bytes.toBytes(insertCount)); @@ -393,18 +451,20 @@ public class TestRegionServerMetrics { Scan scan = new Scan(Bytes.toBytes(0), Bytes.toBytes(2)); ResultScanner scanner = t.getScanner(scan); scanner.next(100); + numScanNext++; // this is an ugly construct scanner.close(); metricsRegionServer.getRegionServerWrapper().forceRecompute(); metricsHelper.assertCounter("mobScanCellsCount", 2, serverSource); region.getTableDesc().getFamily(cf).setMobThreshold(100); - region.initialize(); - region.compactStores(true); + ((HRegion)region).initialize(); + region.compact(true); metricsRegionServer.getRegionServerWrapper().forceRecompute(); metricsHelper.assertCounter("mobCompactedFromMobCellsCount", numHfiles, serverSource); metricsHelper.assertCounter("mobCompactedIntoMobCellsCount", 0, serverSource); scanner = t.getScanner(scan); scanner.next(100); + numScanNext++; // this is an ugly construct metricsRegionServer.getRegionServerWrapper().forceRecompute(); // metrics are reset by the region initialization metricsHelper.assertCounter("mobScanCellsCount", 0, serverSource); @@ -416,8 +476,8 @@ public class TestRegionServerMetrics { admin.flush(tableName); } region.getTableDesc().getFamily(cf).setMobThreshold(0); - region.initialize(); - region.compactStores(true); + ((HRegion)region).initialize(); + region.compact(true); metricsRegionServer.getRegionServerWrapper().forceRecompute(); // metrics are reset by the region initialization metricsHelper.assertCounter("mobCompactedFromMobCellsCount", 0, serverSource); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java index 65aed5bae00..786a4e18dad 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java @@ -125,9 +125,9 @@ public class TestRegionServerNoMaster { /** Flush the given region in the mini cluster. Since no master, we cannot use HBaseAdmin.flush() */ public static void flushRegion(HBaseTestingUtility HTU, HRegionInfo regionInfo) throws IOException { for (RegionServerThread rst : HTU.getMiniHBaseCluster().getRegionServerThreads()) { - HRegion region = rst.getRegionServer().getRegionByEncodedName(regionInfo.getEncodedName()); + Region region = rst.getRegionServer().getRegionByEncodedName(regionInfo.getEncodedName()); if (region != null) { - region.flushcache(); + region.flush(true); return; } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerOnlineConfigChange.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerOnlineConfigChange.java index c58e9c6c08e..9c6ee1a7f94 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerOnlineConfigChange.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerOnlineConfigChange.java @@ -55,7 +55,7 @@ public class TestRegionServerOnlineConfigChange { private static HTable t1 = null; private static HRegionServer rs1 = null; private static byte[] r1name = null; - private static HRegion r1 = null; + private static Region r1 = null; private final static String table1Str = "table1"; private final static String columnFamily1Str = "columnFamily1"; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReportForDuty.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReportForDuty.java new file mode 100644 index 00000000000..ebb82097aca --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReportForDuty.java @@ -0,0 +1,183 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.CoordinatedStateManager; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.LocalHBaseCluster; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.MiniHBaseCluster.MiniHBaseClusterRegionServer; +import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.master.ServerManager; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread; +import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; +import org.apache.zookeeper.KeeperException; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category(MediumTests.class) +public class TestRegionServerReportForDuty { + + private static final Log LOG = LogFactory.getLog(TestRegionServerReportForDuty.class); + + private static final long SLEEP_INTERVAL = 500; + + private HBaseTestingUtility testUtil; + private LocalHBaseCluster cluster; + private RegionServerThread rs; + private RegionServerThread rs2; + private MasterThread master; + private MasterThread backupMaster; + + @Before + public void setUp() throws Exception { + testUtil = new HBaseTestingUtility(); + testUtil.startMiniDFSCluster(1); + testUtil.startMiniZKCluster(1); + testUtil.createRootDir(); + cluster = new LocalHBaseCluster(testUtil.getConfiguration(), 0, 0); + } + + @After + public void tearDown() throws Exception { + cluster.shutdown(); + cluster.join(); + testUtil.shutdownMiniZKCluster(); + testUtil.shutdownMiniDFSCluster(); + } + + /** + * Tests region sever reportForDuty with backup master becomes primary master after + * the first master goes away. + */ + @Test (timeout=180000) + public void testReportForDutyWithMasterChange() throws Exception { + + // Start a master and wait for it to become the active/primary master. + // Use a random unique port + cluster.getConfiguration().setInt(HConstants.MASTER_PORT, HBaseTestingUtility.randomFreePort()); + // master has a rs. defaultMinToStart = 2 + cluster.getConfiguration().setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, 2); + cluster.getConfiguration().setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, 2); + master = cluster.addMaster(); + rs = cluster.addRegionServer(); + LOG.debug("Starting master: " + master.getMaster().getServerName()); + master.start(); + rs.start(); + + // Add a 2nd region server + cluster.getConfiguration().set(HConstants.REGION_SERVER_IMPL, MyRegionServer.class.getName()); + rs2 = cluster.addRegionServer(); + // Start the region server. This region server will refresh RPC connection + // from the current active master to the next active master before completing + // reportForDuty + LOG.debug("Starting 2nd region server: " + rs2.getRegionServer().getServerName()); + rs2.start(); + + waitForClusterOnline(master); + + // Stop the current master. + master.getMaster().stop("Stopping master"); + + // Start a new master and use another random unique port + // Also let it wait for exactly 2 region severs to report in. + cluster.getConfiguration().setInt(HConstants.MASTER_PORT, HBaseTestingUtility.randomFreePort()); + cluster.getConfiguration().setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, 3); + cluster.getConfiguration().setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, 3); + backupMaster = cluster.addMaster(); + LOG.debug("Starting new master: " + backupMaster.getMaster().getServerName()); + backupMaster.start(); + + waitForClusterOnline(backupMaster); + + // Do some checking/asserts here. + assertTrue(backupMaster.getMaster().isActiveMaster()); + assertTrue(backupMaster.getMaster().isInitialized()); + assertEquals(backupMaster.getMaster().getServerManager().getOnlineServersList().size(), 3); + + } + + private void waitForClusterOnline(MasterThread master) throws InterruptedException { + while (true) { + if (master.getMaster().isInitialized() + && ((MyRegionServer) rs2.getRegionServer()).getRpcStubCreatedFlag() == true) { + break; + } + Thread.sleep(SLEEP_INTERVAL); + LOG.debug("Waiting for master to come online ..."); + } + rs.waitForServerOnline(); + } + + // Create a Region Server that provide a hook so that we can wait for the master switch over + // before continuing reportForDuty to the mater. + // The idea is that we get a RPC connection to the first active master, then we wait. + // The first master goes down, the second master becomes the active master. The region + // server continues reportForDuty. It should succeed with the new master. + public static class MyRegionServer extends MiniHBaseClusterRegionServer { + + private ServerName sn; + // This flag is to make sure this rs has obtained the rpcStub to the first master. + // The first master will go down after this. + private boolean rpcStubCreatedFlag = false; + private boolean masterChanged = false; + + public MyRegionServer(Configuration conf, CoordinatedStateManager cp) + throws IOException, KeeperException, + InterruptedException { + super(conf, cp); + } + + @Override + protected synchronized ServerName createRegionServerStatusStub() { + sn = super.createRegionServerStatusStub(); + rpcStubCreatedFlag = true; + + // Wait for master switch over. Only do this for the second region server. + while (!masterChanged) { + ServerName newSn = super.getMasterAddressTracker().getMasterAddress(true); + if (newSn != null && !newSn.equals(sn)) { + masterChanged = true; + break; + } + try { + Thread.sleep(SLEEP_INTERVAL); + } catch (InterruptedException e) { + } + LOG.debug("Waiting for master switch over ... "); + } + return sn; + } + + public boolean getRpcStubCreatedFlag() { + return rpcStubCreatedFlag; + } + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionSplitPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionSplitPolicy.java index 924a196ef20..dd7c61acfd6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionSplitPolicy.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionSplitPolicy.java @@ -25,7 +25,6 @@ import static org.junit.Assert.assertTrue; import java.io.IOException; import java.util.ArrayList; import java.util.List; -import java.util.TreeMap; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.TableName; @@ -47,7 +46,7 @@ public class TestRegionSplitPolicy { private Configuration conf; private HTableDescriptor htd; private HRegion mockRegion; - private TreeMap stores; + private List stores; private static final TableName TABLENAME = TableName.valueOf("t"); @Before @@ -58,8 +57,7 @@ public class TestRegionSplitPolicy { mockRegion = Mockito.mock(HRegion.class); Mockito.doReturn(htd).when(mockRegion).getTableDesc(); Mockito.doReturn(hri).when(mockRegion).getRegionInfo(); - - stores = new TreeMap(Bytes.BYTES_COMPARATOR); + stores = new ArrayList(); Mockito.doReturn(stores).when(mockRegion).getStores(); } @@ -71,7 +69,7 @@ public class TestRegionSplitPolicy { // Now make it so the mock region has a RegionServerService that will // return 'online regions'. RegionServerServices rss = Mockito.mock(RegionServerServices.class); - final List regions = new ArrayList(); + final List regions = new ArrayList(); Mockito.when(rss.getOnlineRegions(TABLENAME)).thenReturn(regions); Mockito.when(mockRegion.getRegionServerServices()).thenReturn(rss); // Set max size for this 'table'. @@ -95,7 +93,7 @@ public class TestRegionSplitPolicy { HStore mockStore = Mockito.mock(HStore.class); Mockito.doReturn(2000L).when(mockStore).getSize(); Mockito.doReturn(true).when(mockStore).canSplit(); - stores.put(new byte[]{1}, mockStore); + stores.add(mockStore); // It should split assertTrue(policy.shouldSplit()); @@ -112,14 +110,21 @@ public class TestRegionSplitPolicy { // now be no longer be splittable since split size has gone up. regions.add(mockRegion); assertFalse(policy.shouldSplit()); - // Quadruple (2 squared) the store size and make sure its just over; verify it'll split - Mockito.doReturn((flushSize * 2 * 2 * 2) + 1).when(mockStore).getSize(); + // make sure its just over; verify it'll split + Mockito.doReturn((long)(maxSplitSize * 1.25 + 1)).when(mockStore).getSize(); assertTrue(policy.shouldSplit()); // Finally assert that even if loads of regions, we'll split at max size - assertEquals(maxSplitSize, policy.getSizeToCheck(1000)); + assertWithinJitter(maxSplitSize, policy.getSizeToCheck(1000)); // Assert same is true if count of regions is zero. - assertEquals(maxSplitSize, policy.getSizeToCheck(0)); + assertWithinJitter(maxSplitSize, policy.getSizeToCheck(0)); + } + + private void assertWithinJitter(long maxSplitSize, long sizeToCheck) { + assertTrue("Size greater than lower bound of jitter", + (long)(maxSplitSize * 0.75) <= sizeToCheck); + assertTrue("Size less than upper bound of jitter", + (long)(maxSplitSize * 1.25) >= sizeToCheck); } @Test @@ -131,13 +136,13 @@ public class TestRegionSplitPolicy { ConstantSizeRegionSplitPolicy policy = (ConstantSizeRegionSplitPolicy)RegionSplitPolicy.create( mockRegion, conf); - assertEquals(1234L, policy.getDesiredMaxFileSize()); + assertWithinJitter(1234L, policy.getDesiredMaxFileSize()); // If specified in HTD, should use that htd.setMaxFileSize(9999L); policy = (ConstantSizeRegionSplitPolicy)RegionSplitPolicy.create( mockRegion, conf); - assertEquals(9999L, policy.getDesiredMaxFileSize()); + assertWithinJitter(9999L, policy.getDesiredMaxFileSize()); } /** @@ -158,7 +163,7 @@ public class TestRegionSplitPolicy { Mockito.doReturn(2000L).when(mockStore).getSize(); Mockito.doReturn(true).when(mockStore).canSplit(); Mockito.doReturn(Bytes.toBytes("abcd")).when(mockStore).getSplitPoint(); - stores.put(new byte[] { 1 }, mockStore); + stores.add(mockStore); KeyPrefixRegionSplitPolicy policy = (KeyPrefixRegionSplitPolicy) RegionSplitPolicy .create(myMockRegion, conf); @@ -195,7 +200,7 @@ public class TestRegionSplitPolicy { HStore mockStore = Mockito.mock(HStore.class); Mockito.doReturn(2000L).when(mockStore).getSize(); Mockito.doReturn(true).when(mockStore).canSplit(); - stores.put(new byte[]{1}, mockStore); + stores.add(mockStore); assertTrue(policy.shouldSplit()); @@ -235,7 +240,7 @@ public class TestRegionSplitPolicy { Mockito.doReturn(true).when(mockStore).canSplit(); Mockito.doReturn(Bytes.toBytes("store 1 split")) .when(mockStore).getSplitPoint(); - stores.put(new byte[]{1}, mockStore); + stores.add(mockStore); assertEquals("store 1 split", Bytes.toString(policy.getSplitPoint())); @@ -246,7 +251,7 @@ public class TestRegionSplitPolicy { Mockito.doReturn(true).when(mockStore2).canSplit(); Mockito.doReturn(Bytes.toBytes("store 2 split")) .when(mockStore2).getSplitPoint(); - stores.put(new byte[]{2}, mockStore2); + stores.add(mockStore2); assertEquals("store 2 split", Bytes.toString(policy.getSplitPoint())); @@ -267,7 +272,7 @@ public class TestRegionSplitPolicy { Mockito.doReturn(2000L).when(mockStore).getSize(); Mockito.doReturn(true).when(mockStore).canSplit(); Mockito.doReturn(Bytes.toBytes("ab,cd")).when(mockStore).getSplitPoint(); - stores.put(new byte[] { 1 }, mockStore); + stores.add(mockStore); DelimitedKeyPrefixRegionSplitPolicy policy = (DelimitedKeyPrefixRegionSplitPolicy) RegionSplitPolicy .create(myMockRegion, conf); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestResettingCounters.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestResettingCounters.java index dd7ef29d2dc..3e022434206 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestResettingCounters.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestResettingCounters.java @@ -69,7 +69,7 @@ public class TestResettingCounters { throw new IOException("Failed delete of " + path); } } - HRegion region = HBaseTestingUtility.createRegionAndWAL(hri, path, conf, htd); + Region region = HBaseTestingUtility.createRegionAndWAL(hri, path, conf, htd); try { Increment odd = new Increment(rows[0]); odd.setDurability(Durability.SKIP_WAL); @@ -84,14 +84,14 @@ public class TestResettingCounters { } // increment odd qualifiers 5 times and flush - for (int i=0;i<5;i++) region.increment(odd); - region.flushcache(); + for (int i=0;i<5;i++) region.increment(odd, HConstants.NO_NONCE, HConstants.NO_NONCE); + region.flush(true); // increment even qualifiers 5 times - for (int i=0;i<5;i++) region.increment(even); + for (int i=0;i<5;i++) region.increment(even, HConstants.NO_NONCE, HConstants.NO_NONCE); // increment all qualifiers, should have value=6 for all - Result result = region.increment(all); + Result result = region.increment(all, HConstants.NO_NONCE, HConstants.NO_NONCE); assertEquals(numQualifiers, result.size()); Cell [] kvs = result.rawCells(); for (int i=0;i allColIds = new TreeSet(); - private HRegion region; + private Region region; private BloomType bloomType; private FileSystem fs; private Configuration conf; @@ -209,7 +209,7 @@ public class TestScanWithBloomError { p.add(kv); } region.put(p); - region.flushcache(); + region.flush(true); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java index 72f556e0813..84cb355f81d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScanner.java @@ -43,8 +43,6 @@ import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.testclassification.RegionServerTests; -import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.UnknownScannerException; import org.apache.hadoop.hbase.client.Delete; @@ -56,6 +54,8 @@ import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.filter.InclusiveStopFilter; import org.apache.hadoop.hbase.filter.PrefixFilter; import org.apache.hadoop.hbase.filter.WhileMatchFilter; +import org.apache.hadoop.hbase.testclassification.RegionServerTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; import org.junit.Rule; import org.junit.Test; @@ -99,7 +99,7 @@ public class TestScanner { private static final long START_CODE = Long.MAX_VALUE; - private HRegion r; + private Region r; private HRegionIncommon region; private byte[] firstRowBytes, secondRowBytes, thirdRowBytes; @@ -271,7 +271,7 @@ public class TestScanner { // Close and re-open - r.close(); + ((HRegion)r).close(); r = HRegion.openHRegion(r, null); region = new HRegionIncommon(r); @@ -309,7 +309,7 @@ public class TestScanner { // Close and reopen - r.close(); + ((HRegion)r).close(); r = HRegion.openHRegion(r,null); region = new HRegionIncommon(r); @@ -344,7 +344,7 @@ public class TestScanner { // Close and reopen - r.close(); + ((HRegion)r).close(); r = HRegion.openHRegion(r,null); region = new HRegionIncommon(r); @@ -525,17 +525,17 @@ public class TestScanner { /* delete column1 of firstRow */ dc.deleteColumns(fam1, col1); r.delete(dc); - r.flushcache(); + r.flush(true); HBaseTestCase.addContent(hri, Bytes.toString(fam1), Bytes.toString(col1), secondRowBytes, thirdRowBytes); HBaseTestCase.addContent(hri, Bytes.toString(fam2), Bytes.toString(col1), secondRowBytes, thirdRowBytes); - r.flushcache(); + r.flush(true); InternalScanner s = r.getScanner(new Scan()); // run a major compact, column1 of firstRow will be cleaned. - r.compactStores(true); + r.compact(true); List results = new ArrayList(); s.next(results); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSeekOptimizations.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSeekOptimizations.java index ffc76f3c6af..1e09c406737 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSeekOptimizations.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSeekOptimizations.java @@ -40,12 +40,12 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.compress.Compression; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.Bytes; import org.junit.After; import org.junit.Before; @@ -106,7 +106,7 @@ public class TestSeekOptimizations { private static final int[] MAX_VERSIONS_VALUES = new int[] { 1, 2 }; // Instance variables - private HRegion region; + private Region region; private Put put; private Delete del; private Random rand; @@ -433,7 +433,7 @@ public class TestSeekOptimizations { } } - region.flushcache(); + region.flush(true); } @After diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java index 9c9fa6f060b..4f371bdb32a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java @@ -21,7 +21,6 @@ package org.apache.hadoop.hbase.regionserver; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; -import static org.mockito.Matchers.any; import static org.mockito.Matchers.anyInt; import static org.mockito.Matchers.eq; import static org.mockito.Mockito.doNothing; @@ -44,19 +43,19 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.io.hfile.LruBlockCache; import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.coprocessor.ObserverContext; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; -import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.io.hfile.CacheConfig; +import org.apache.hadoop.hbase.io.hfile.LruBlockCache; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.PairOfSameType; +import org.apache.hadoop.hbase.wal.WALFactory; import org.apache.zookeeper.KeeperException; import org.junit.After; import org.junit.Before; @@ -67,7 +66,7 @@ import org.mockito.Mockito; import com.google.common.collect.ImmutableList; /** - * Test the {@link SplitTransaction} class against an HRegion (as opposed to + * Test the {@link SplitTransactionImpl} class against an HRegion (as opposed to * running cluster). */ @Category({RegionServerTests.class, SmallTests.class}) @@ -120,8 +119,8 @@ public class TestSplitTransaction { assertEquals(rowcount, parentRowCount); // Start transaction. - SplitTransaction st = prepareGOOD_SPLIT_ROW(); - SplitTransaction spiedUponSt = spy(st); + SplitTransactionImpl st = prepareGOOD_SPLIT_ROW(); + SplitTransactionImpl spiedUponSt = spy(st); Mockito .doThrow(new MockedFailedDaughterOpen()) .when(spiedUponSt) @@ -161,12 +160,13 @@ public class TestSplitTransaction { prepareGOOD_SPLIT_ROW(); } - private SplitTransaction prepareGOOD_SPLIT_ROW() { + private SplitTransactionImpl prepareGOOD_SPLIT_ROW() throws IOException { return prepareGOOD_SPLIT_ROW(this.parent); } - private SplitTransaction prepareGOOD_SPLIT_ROW(final HRegion parentRegion) { - SplitTransaction st = new SplitTransaction(parentRegion, GOOD_SPLIT_ROW); + private SplitTransactionImpl prepareGOOD_SPLIT_ROW(final HRegion parentRegion) + throws IOException { + SplitTransactionImpl st = new SplitTransactionImpl(parentRegion, GOOD_SPLIT_ROW); assertTrue(st.prepare()); return st; } @@ -181,7 +181,7 @@ public class TestSplitTransaction { when(storeMock.close()).thenReturn(ImmutableList.of()); this.parent.stores.put(Bytes.toBytes(""), storeMock); - SplitTransaction st = new SplitTransaction(this.parent, GOOD_SPLIT_ROW); + SplitTransactionImpl st = new SplitTransactionImpl(this.parent, GOOD_SPLIT_ROW); assertFalse("a region should not be splittable if it has instances of store file references", st.prepare()); @@ -192,19 +192,19 @@ public class TestSplitTransaction { */ @Test public void testPrepareWithBadSplitRow() throws IOException { // Pass start row as split key. - SplitTransaction st = new SplitTransaction(this.parent, STARTROW); + SplitTransactionImpl st = new SplitTransactionImpl(this.parent, STARTROW); assertFalse(st.prepare()); - st = new SplitTransaction(this.parent, HConstants.EMPTY_BYTE_ARRAY); + st = new SplitTransactionImpl(this.parent, HConstants.EMPTY_BYTE_ARRAY); assertFalse(st.prepare()); - st = new SplitTransaction(this.parent, new byte [] {'A', 'A', 'A'}); + st = new SplitTransactionImpl(this.parent, new byte [] {'A', 'A', 'A'}); assertFalse(st.prepare()); - st = new SplitTransaction(this.parent, ENDROW); + st = new SplitTransactionImpl(this.parent, ENDROW); assertFalse(st.prepare()); } @Test public void testPrepareWithClosedRegion() throws IOException { this.parent.close(); - SplitTransaction st = new SplitTransaction(this.parent, GOOD_SPLIT_ROW); + SplitTransactionImpl st = new SplitTransactionImpl(this.parent, GOOD_SPLIT_ROW); assertFalse(st.prepare()); } @@ -220,12 +220,12 @@ public class TestSplitTransaction { ((LruBlockCache) cacheConf.getBlockCache()).clearCache(); // Start transaction. - SplitTransaction st = prepareGOOD_SPLIT_ROW(); + SplitTransactionImpl st = prepareGOOD_SPLIT_ROW(); // Run the execute. Look at what it returns. Server mockServer = Mockito.mock(Server.class); when(mockServer.getConfiguration()).thenReturn(TEST_UTIL.getConfiguration()); - PairOfSameType daughters = st.execute(mockServer, null); + PairOfSameType daughters = st.execute(mockServer, null); // Do some assertions about execution. assertTrue(this.fs.exists(this.parent.getRegionFileSystem().getSplitsDir())); // Assert the parent region is closed. @@ -235,13 +235,15 @@ public class TestSplitTransaction { // to be under the daughter region dirs. assertEquals(0, this.fs.listStatus(this.parent.getRegionFileSystem().getSplitsDir()).length); // Check daughters have correct key span. - assertTrue(Bytes.equals(this.parent.getStartKey(), daughters.getFirst().getStartKey())); - assertTrue(Bytes.equals(GOOD_SPLIT_ROW, daughters.getFirst().getEndKey())); - assertTrue(Bytes.equals(daughters.getSecond().getStartKey(), GOOD_SPLIT_ROW)); - assertTrue(Bytes.equals(this.parent.getEndKey(), daughters.getSecond().getEndKey())); + assertTrue(Bytes.equals(parent.getRegionInfo().getStartKey(), + daughters.getFirst().getRegionInfo().getStartKey())); + assertTrue(Bytes.equals(GOOD_SPLIT_ROW, daughters.getFirst().getRegionInfo().getEndKey())); + assertTrue(Bytes.equals(daughters.getSecond().getRegionInfo().getStartKey(), GOOD_SPLIT_ROW)); + assertTrue(Bytes.equals(parent.getRegionInfo().getEndKey(), + daughters.getSecond().getRegionInfo().getEndKey())); // Count rows. daughters are already open int daughtersRowCount = 0; - for (HRegion openRegion: daughters) { + for (Region openRegion: daughters) { try { int count = countRows(openRegion); assertTrue(count > 0 && count != rowcount); @@ -264,8 +266,8 @@ public class TestSplitTransaction { // Start transaction. HRegion spiedRegion = spy(this.parent); - SplitTransaction st = prepareGOOD_SPLIT_ROW(spiedRegion); - SplitTransaction spiedUponSt = spy(st); + SplitTransactionImpl st = prepareGOOD_SPLIT_ROW(spiedRegion); + SplitTransactionImpl spiedUponSt = spy(st); doThrow(new IOException("Failing split. Expected reference file count isn't equal.")) .when(spiedUponSt).assertReferenceFileCount(anyInt(), eq(new Path(this.parent.getRegionFileSystem().getTableDir(), @@ -292,8 +294,8 @@ public class TestSplitTransaction { // Start transaction. HRegion spiedRegion = spy(this.parent); - SplitTransaction st = prepareGOOD_SPLIT_ROW(spiedRegion); - SplitTransaction spiedUponSt = spy(st); + SplitTransactionImpl st = prepareGOOD_SPLIT_ROW(spiedRegion); + SplitTransactionImpl spiedUponSt = spy(st); doNothing().when(spiedUponSt).assertReferenceFileCount(anyInt(), eq(parent.getRegionFileSystem().getSplitsDir(st.getFirstDaughter()))); when(spiedRegion.createDaughterRegionFromSplits(spiedUponSt.getSecondDaughter())). @@ -322,10 +324,10 @@ public class TestSplitTransaction { // Now retry the split but do not throw an exception this time. assertTrue(st.prepare()); - PairOfSameType daughters = st.execute(mockServer, null); + PairOfSameType daughters = st.execute(mockServer, null); // Count rows. daughters are already open int daughtersRowCount = 0; - for (HRegion openRegion: daughters) { + for (Region openRegion: daughters) { try { int count = countRows(openRegion); assertTrue(count > 0 && count != rowcount); @@ -351,7 +353,7 @@ public class TestSplitTransaction { private class MockedFailedDaughterCreation extends IOException {} private class MockedFailedDaughterOpen extends IOException {} - private int countRows(final HRegion r) throws IOException { + private int countRows(final Region r) throws IOException { int rowcount = 0; InternalScanner scanner = r.getScanner(new Scan()); try { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java index 06f9eb8c4d1..66f8cbc0945 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java @@ -57,6 +57,8 @@ import org.apache.hadoop.hbase.UnknownRegionException; import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Consistency; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; @@ -109,7 +111,7 @@ import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; /** - * Like TestSplitTransaction in that we're testing {@link SplitTransaction} + * Like TestSplitTransaction in that we're testing {@link SplitTransactionImpl} * only the below tests are against a running cluster where TestSplitTransaction * is tests against a bare {@link HRegion}. */ @@ -199,7 +201,7 @@ public class TestSplitTransactionOnCluster { Coprocessor.PRIORITY_USER, region.getBaseConf()); // split async - this.admin.split(region.getRegionName(), new byte[] {42}); + this.admin.split(region.getRegionInfo().getRegionName(), new byte[] {42}); // we have to wait until the SPLITTING state is seen by the master FailingSplitRegionObserver observer = (FailingSplitRegionObserver) region @@ -238,7 +240,7 @@ public class TestSplitTransactionOnCluster { HRegion region = cluster.getRegions(tableName).get(0); Store store = region.getStore(cf); - int regionServerIndex = cluster.getServerWith(region.getRegionName()); + int regionServerIndex = cluster.getServerWith(region.getRegionInfo().getRegionName()); HRegionServer regionServer = cluster.getRegionServer(regionServerIndex); Table t = TESTING_UTIL.getConnection().getTable(tableName); @@ -262,7 +264,7 @@ public class TestSplitTransactionOnCluster { assertTrue(fileNum > store.getStorefiles().size()); // 3, Split - SplitTransaction st = new SplitTransaction(region, Bytes.toBytes("row3")); + SplitTransactionImpl st = new SplitTransactionImpl(region, Bytes.toBytes("row3")); assertTrue(st.prepare()); st.execute(regionServer, regionServer); assertEquals(2, cluster.getRegions(tableName).size()); @@ -472,7 +474,6 @@ public class TestSplitTransactionOnCluster { @Test(timeout = 180000) public void testSplitShouldNotThrowNPEEvenARegionHasEmptySplitFiles() throws Exception { - Configuration conf = TESTING_UTIL.getConfiguration(); TableName userTableName = TableName.valueOf("testSplitShouldNotThrowNPEEvenARegionHasEmptySplitFiles"); HTableDescriptor htd = new HTableDescriptor(userTableName); @@ -634,7 +635,8 @@ public class TestSplitTransactionOnCluster { List regions = null; try { regions = cluster.getRegions(tableName); - int regionServerIndex = cluster.getServerWith(regions.get(0).getRegionName()); + int regionServerIndex = cluster.getServerWith(regions.get(0).getRegionInfo() + .getRegionName()); HRegionServer regionServer = cluster.getRegionServer(regionServerIndex); insertData(tableName, admin, t); // Turn off balancer so it doesn't cut in and mess up our placements. @@ -646,7 +648,7 @@ public class TestSplitTransactionOnCluster { assertEquals("The specified table should present.", true, tableExists); final HRegion region = findSplittableRegion(regions); assertTrue("not able to find a splittable region", region != null); - SplitTransaction st = new SplitTransaction(region, Bytes.toBytes("row2")); + SplitTransactionImpl st = new SplitTransactionImpl(region, Bytes.toBytes("row2")); try { st.prepare(); st.createDaughters(regionServer, regionServer); @@ -689,7 +691,8 @@ public class TestSplitTransactionOnCluster { } while (oldRegions.size() != 2); for (HRegion h : oldRegions) LOG.debug("OLDREGION " + h.getRegionInfo()); try { - int regionServerIndex = cluster.getServerWith(oldRegions.get(0).getRegionName()); + int regionServerIndex = cluster.getServerWith(oldRegions.get(0).getRegionInfo() + .getRegionName()); HRegionServer regionServer = cluster.getRegionServer(regionServerIndex); insertData(tableName, admin, t); // Turn off balancer so it doesn't cut in and mess up our placements. @@ -700,10 +703,10 @@ public class TestSplitTransactionOnCluster { tableName); assertEquals("The specified table should be present.", true, tableExists); final HRegion region = findSplittableRegion(oldRegions); - regionServerIndex = cluster.getServerWith(region.getRegionName()); + regionServerIndex = cluster.getServerWith(region.getRegionInfo().getRegionName()); regionServer = cluster.getRegionServer(regionServerIndex); assertTrue("not able to find a splittable region", region != null); - SplitTransaction st = new SplitTransaction(region, Bytes.toBytes("row2")); + SplitTransactionImpl st = new SplitTransactionImpl(region, Bytes.toBytes("row2")); try { st.prepare(); st.execute(regionServer, regionServer); @@ -779,7 +782,8 @@ public class TestSplitTransactionOnCluster { List regions = cluster.getRegions(tableName); HRegionInfo hri = getAndCheckSingleTableRegion(regions); ensureTableRegionNotOnSameServerAsMeta(admin, hri); - int regionServerIndex = cluster.getServerWith(regions.get(0).getRegionName()); + int regionServerIndex = cluster.getServerWith(regions.get(0).getRegionInfo() + .getRegionName()); HRegionServer regionServer = cluster.getRegionServer(regionServerIndex); // Turn off balancer so it doesn't cut in and mess up our placements. this.admin.setBalancerRunning(false, true); @@ -802,7 +806,7 @@ public class TestSplitTransactionOnCluster { assertTrue("not able to find a splittable region", region != null); // Now split. - SplitTransaction st = new MockedSplitTransaction(region, Bytes.toBytes("row2")); + SplitTransactionImpl st = new MockedSplitTransaction(region, Bytes.toBytes("row2")); try { st.prepare(); st.execute(regionServer, regionServer); @@ -862,7 +866,8 @@ public class TestSplitTransactionOnCluster { List regions = cluster.getRegions(tableName); HRegionInfo hri = getAndCheckSingleTableRegion(regions); ensureTableRegionNotOnSameServerAsMeta(admin, hri); - int regionServerIndex = cluster.getServerWith(regions.get(0).getRegionName()); + int regionServerIndex = cluster.getServerWith(regions.get(0).getRegionInfo() + .getRegionName()); HRegionServer regionServer = cluster.getRegionServer(regionServerIndex); // Turn off balancer so it doesn't cut in and mess up our placements. this.admin.setBalancerRunning(false, true); @@ -876,7 +881,7 @@ public class TestSplitTransactionOnCluster { assertTrue("not able to find a splittable region", region != null); // Now split. - SplitTransaction st = new SplitTransaction(region, Bytes.toBytes("row2")); + SplitTransactionImpl st = new SplitTransactionImpl(region, Bytes.toBytes("row2")); try { st.prepare(); st.execute(regionServer, regionServer); @@ -919,8 +924,8 @@ public class TestSplitTransactionOnCluster { if (firstTableRegions.size() == 0 || secondTableRegions.size() == 0) { fail("Each table should have at least one region."); } - ServerName serverName = - cluster.getServerHoldingRegion(firstTable, firstTableRegions.get(0).getRegionName()); + ServerName serverName = cluster.getServerHoldingRegion(firstTable, + firstTableRegions.get(0).getRegionInfo().getRegionName()); admin.move(secondTableRegions.get(0).getRegionInfo().getEncodedNameAsBytes(), Bytes.toBytes(serverName.getServerName())); Table table1 = null; @@ -958,7 +963,8 @@ public class TestSplitTransactionOnCluster { HTableDescriptor desc = new HTableDescriptor(table); desc.addFamily(new HColumnDescriptor(Bytes.toBytes("f"))); admin.createTable(desc); - HTable hTable = new HTable(cluster.getConfiguration(), desc.getTableName()); + Connection connection = ConnectionFactory.createConnection(cluster.getConfiguration()); + HTable hTable = (HTable) connection.getTable(desc.getTableName()); for(int i = 1; i < 5; i++) { Put p1 = new Put(("r"+i).getBytes()); p1.add(Bytes.toBytes("f"), "q1".getBytes(), "v".getBytes()); @@ -966,10 +972,10 @@ public class TestSplitTransactionOnCluster { } admin.flush(desc.getTableName()); List regions = cluster.getRegions(desc.getTableName()); - int serverWith = cluster.getServerWith(regions.get(0).getRegionName()); + int serverWith = cluster.getServerWith(regions.get(0).getRegionInfo().getRegionName()); HRegionServer regionServer = cluster.getRegionServer(serverWith); - cluster.getServerWith(regions.get(0).getRegionName()); - SplitTransaction st = new SplitTransaction(regions.get(0), Bytes.toBytes("r3")); + cluster.getServerWith(regions.get(0).getRegionInfo().getRegionName()); + SplitTransactionImpl st = new SplitTransactionImpl(regions.get(0), Bytes.toBytes("r3")); st.prepare(); st.stepsBeforePONR(regionServer, regionServer, false); Path tableDir = @@ -1007,13 +1013,14 @@ public class TestSplitTransactionOnCluster { List regions = awaitTableRegions(tableName); assertTrue("Table not online", cluster.getRegions(tableName).size() != 0); - int regionServerIndex = cluster.getServerWith(regions.get(0).getRegionName()); + int regionServerIndex = cluster.getServerWith(regions.get(0).getRegionInfo() + .getRegionName()); HRegionServer regionServer = cluster.getRegionServer(regionServerIndex); final HRegion region = findSplittableRegion(regions); assertTrue("not able to find a splittable region", region != null); - SplitTransaction st = new MockedSplitTransaction(region, Bytes.toBytes("row2")) { + SplitTransactionImpl st = new MockedSplitTransaction(region, Bytes.toBytes("row2")) { @Override - public PairOfSameType stepsBeforePONR(final Server server, + public PairOfSameType stepsBeforePONR(final Server server, final RegionServerServices services, boolean testing) throws IOException { throw new SplittingNodeCreationFailedException (); } @@ -1051,7 +1058,7 @@ public class TestSplitTransactionOnCluster { p.add(Bytes.toBytes("f"), Bytes.toBytes("q"), Bytes.toBytes("value"+i)); region.put(p); } - region.flushcache(); + region.flush(true); Store store = region.getStore(Bytes.toBytes("f")); Collection storefiles = store.getStorefiles(); assertEquals(storefiles.size(), 1); @@ -1072,7 +1079,7 @@ public class TestSplitTransactionOnCluster { } } - public static class MockedSplitTransaction extends SplitTransaction { + public static class MockedSplitTransaction extends SplitTransactionImpl { private HRegion currentRegion; public MockedSplitTransaction(HRegion region, byte[] splitrow) { @@ -1310,31 +1317,31 @@ public class TestSplitTransactionOnCluster { } public static class MockedRegionObserver extends BaseRegionObserver { - private SplitTransaction st = null; - private PairOfSameType daughterRegions = null; + private SplitTransactionImpl st = null; + private PairOfSameType daughterRegions = null; @Override public void preSplitBeforePONR(ObserverContext ctx, byte[] splitKey, List metaEntries) throws IOException { RegionCoprocessorEnvironment environment = ctx.getEnvironment(); HRegionServer rs = (HRegionServer) environment.getRegionServerServices(); - List onlineRegions = + List onlineRegions = rs.getOnlineRegions(TableName.valueOf("testSplitHooksBeforeAndAfterPONR_2")); - HRegion region = onlineRegions.get(0); - for (HRegion r : onlineRegions) { + Region region = onlineRegions.get(0); + for (Region r : onlineRegions) { if (r.getRegionInfo().containsRow(splitKey)) { region = r; break; } } - st = new SplitTransaction(region, splitKey); + st = new SplitTransactionImpl((HRegion) region, splitKey); if (!st.prepare()) { LOG.error("Prepare for the table " + region.getTableDesc().getNameAsString() + " failed. So returning null. "); ctx.bypass(); return; } - region.forceSplit(splitKey); + ((HRegion)region).forceSplit(splitKey); daughterRegions = st.stepsBeforePONR(rs, rs, false); HRegionInfo copyOfParent = new HRegionInfo(region.getRegionInfo()); copyOfParent.setOffline(true); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileInfo.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileInfo.java index da39f5955b9..349ec1cfbdc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileInfo.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileInfo.java @@ -18,7 +18,11 @@ */ package org.apache.hadoop.hbase.regionserver; -import org.apache.hadoop.hbase.HBaseTestCase; +import static org.junit.Assert.*; + +import java.io.IOException; + +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.testclassification.SmallTests; @@ -30,7 +34,7 @@ import org.junit.experimental.categories.Category; * Test HStoreFile */ @Category({RegionServerTests.class, SmallTests.class}) -public class TestStoreFileInfo extends HBaseTestCase { +public class TestStoreFileInfo { private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); /** @@ -59,5 +63,25 @@ public class TestStoreFileInfo extends HBaseTestCase { assertFalse("should not be a valid link: " + name, HFileLink.isHFileLink(name)); } } + + @Test + public void testEqualsWithLink() throws IOException { + Path origin = new Path("/origin"); + Path tmp = new Path("/tmp"); + Path mob = new Path("/mob"); + Path archive = new Path("/archive"); + HFileLink link1 = new HFileLink(new Path(origin, "f1"), new Path(tmp, "f1"), + new Path(mob, "f1"), new Path(archive, "f1")); + HFileLink link2 = new HFileLink(new Path(origin, "f1"), new Path(tmp, "f1"), + new Path(mob, "f1"), new Path(archive, "f1")); + + StoreFileInfo info1 = new StoreFileInfo(TEST_UTIL.getConfiguration(), + TEST_UTIL.getTestFileSystem(), null, link1); + StoreFileInfo info2 = new StoreFileInfo(TEST_UTIL.getConfiguration(), + TEST_UTIL.getTestFileSystem(), null, link2); + + assertEquals(info1, info2); + assertEquals(info1.hashCode(), info2.hashCode()); + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileRefresherChore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileRefresherChore.java index 2da1f7e7b7c..ed0ac25f16f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileRefresherChore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileRefresherChore.java @@ -94,13 +94,15 @@ public class TestStoreFileRefresherChore { } } - private HRegion initHRegion(HTableDescriptor htd, byte[] startKey, byte[] stopKey, int replicaId) throws IOException { + private Region initHRegion(HTableDescriptor htd, byte[] startKey, byte[] stopKey, int replicaId) + throws IOException { Configuration conf = TEST_UTIL.getConfiguration(); Path tableDir = FSUtils.getTableDir(testDir, htd.getTableName()); HRegionInfo info = new HRegionInfo(htd.getTableName(), startKey, stopKey, false, 0, replicaId); - HRegionFileSystem fs = new FailingHRegionFileSystem(conf, tableDir.getFileSystem(conf), tableDir, info); + HRegionFileSystem fs = new FailingHRegionFileSystem(conf, tableDir.getFileSystem(conf), tableDir, + info); final Configuration walConf = new Configuration(conf); FSUtils.setRootDir(walConf, tableDir); final WALFactory wals = new WALFactory(walConf, null, "log_" + replicaId); @@ -111,7 +113,8 @@ public class TestStoreFileRefresherChore { return region; } - private void putData(HRegion region, int startRow, int numRows, byte[] qf, byte[]... families) throws IOException { + private void putData(Region region, int startRow, int numRows, byte[] qf, byte[]... families) + throws IOException { for (int i = startRow; i < startRow + numRows; i++) { Put put = new Put(Bytes.toBytes("" + i)); put.setDurability(Durability.SKIP_WAL); @@ -122,7 +125,7 @@ public class TestStoreFileRefresherChore { } } - private void verifyData(HRegion newReg, int startRow, int numRows, byte[] qf, byte[]... families) + private void verifyData(Region newReg, int startRow, int numRows, byte[] qf, byte[]... families) throws IOException { for (int i = startRow; i < startRow + numRows; i++) { byte[] row = Bytes.toBytes("" + i); @@ -160,13 +163,13 @@ public class TestStoreFileRefresherChore { byte[] qf = Bytes.toBytes("cq"); HRegionServer regionServer = mock(HRegionServer.class); - List regions = new ArrayList(); + List regions = new ArrayList(); when(regionServer.getOnlineRegionsLocalContext()).thenReturn(regions); when(regionServer.getConfiguration()).thenReturn(TEST_UTIL.getConfiguration()); HTableDescriptor htd = getTableDesc(TableName.valueOf("testIsStale"), families); - HRegion primary = initHRegion(htd, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, 0); - HRegion replica1 = initHRegion(htd, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, 1); + Region primary = initHRegion(htd, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, 0); + Region replica1 = initHRegion(htd, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, 1); regions.add(primary); regions.add(replica1); @@ -174,7 +177,7 @@ public class TestStoreFileRefresherChore { // write some data to primary and flush putData(primary, 0, 100, qf, families); - primary.flushcache(); + primary.flush(true); verifyData(primary, 0, 100, qf, families); try { @@ -187,11 +190,11 @@ public class TestStoreFileRefresherChore { verifyData(replica1, 0, 100, qf, families); // simulate an fs failure where we cannot refresh the store files for the replica - ((FailingHRegionFileSystem)replica1.getRegionFileSystem()).fail = true; + ((FailingHRegionFileSystem)((HRegion)replica1).getRegionFileSystem()).fail = true; // write some more data to primary and flush putData(primary, 100, 100, qf, families); - primary.flushcache(); + primary.flush(true); verifyData(primary, 0, 200, qf, families); chore.chore(); // should not throw ex, but we cannot refresh the store files diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java index bf9fed69f0c..ee392123497 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreScanner.java @@ -35,9 +35,9 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeepDeletedCells; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueTestUtil; +import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; -import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdge; import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeCompactor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeCompactor.java index 86d670c3a5d..b7435504af3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeCompactor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeCompactor.java @@ -17,11 +17,20 @@ */ package org.apache.hadoop.hbase.regionserver; -import static org.junit.Assert.*; -import static org.mockito.Mockito.*; -import static org.apache.hadoop.hbase.regionserver.StripeStoreFileManager.STRIPE_START_KEY; -import static org.apache.hadoop.hbase.regionserver.StripeStoreFileManager.STRIPE_END_KEY; import static org.apache.hadoop.hbase.regionserver.StripeStoreFileManager.OPEN_KEY; +import static org.apache.hadoop.hbase.regionserver.StripeStoreFileManager.STRIPE_END_KEY; +import static org.apache.hadoop.hbase.regionserver.StripeStoreFileManager.STRIPE_START_KEY; +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyBoolean; +import static org.mockito.Matchers.anyLong; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; import java.io.IOException; import java.util.ArrayList; @@ -38,14 +47,14 @@ import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue.KVComparator; -import org.apache.hadoop.hbase.testclassification.RegionServerTests; -import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; import org.apache.hadoop.hbase.regionserver.compactions.NoLimitCompactionThroughputController; import org.apache.hadoop.hbase.regionserver.compactions.StripeCompactor; +import org.apache.hadoop.hbase.testclassification.RegionServerTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -233,10 +242,13 @@ public class TestStripeCompactor { results.add(kvs.remove(0)); return !kvs.isEmpty(); } + @Override - public boolean next(List result, int limit) throws IOException { + public boolean next(List result, ScannerContext scannerContext) + throws IOException { return next(result); } + @Override public void close() throws IOException {} } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java index ba05e9a2ef7..fb4561b5df9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWideScanner.java @@ -33,12 +33,12 @@ import org.apache.hadoop.hbase.HBaseTestCase; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.testclassification.RegionServerTests; -import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.testclassification.RegionServerTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; import org.junit.Test; import org.junit.experimental.categories.Category; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCompactionWithThroughputController.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCompactionWithThroughputController.java index 8d0d5a845d1..b2f2898d360 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCompactionWithThroughputController.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCompactionWithThroughputController.java @@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.HStore; +import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.regionserver.StoreEngine; import org.apache.hadoop.hbase.regionserver.StripeStoreConfig; @@ -73,8 +74,8 @@ public class TestCompactionWithThroughputController { List rsts = cluster.getRegionServerThreads(); for (int i = 0; i < cluster.getRegionServerThreads().size(); i++) { HRegionServer hrs = rsts.get(i).getRegionServer(); - for (HRegion region : hrs.getOnlineRegions(tableName)) { - return region.getStores().values().iterator().next(); + for (Region region : hrs.getOnlineRegions(tableName)) { + return region.getStores().iterator().next(); } } return null; @@ -122,8 +123,6 @@ public class TestCompactionWithThroughputController { assertEquals(10, store.getStorefilesCount()); long startTime = System.currentTimeMillis(); TEST_UTIL.getHBaseAdmin().majorCompact(tableName); - Thread.sleep(5000); - assertEquals(10, store.getStorefilesCount()); while (store.getStorefilesCount() != 1) { Thread.sleep(20); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java index f3b7be47675..81f811528ad 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java @@ -49,13 +49,12 @@ import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.testclassification.RegionServerTests; -import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.regionserver.InternalScanner; import org.apache.hadoop.hbase.regionserver.ScanType; +import org.apache.hadoop.hbase.regionserver.ScannerContext; import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.regionserver.StoreConfigInformation; import org.apache.hadoop.hbase.regionserver.StoreFile; @@ -66,6 +65,8 @@ import org.apache.hadoop.hbase.regionserver.StripeStoreFileManager; import org.apache.hadoop.hbase.regionserver.StripeStoreFlusher; import org.apache.hadoop.hbase.regionserver.TestStripeCompactor.StoreFileWritersCapture; import org.apache.hadoop.hbase.regionserver.compactions.StripeCompactionPolicy.StripeInformationProvider; +import org.apache.hadoop.hbase.testclassification.RegionServerTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ConcatenatedLists; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; @@ -777,11 +778,13 @@ public class TestStripeCompactionPolicy { public boolean next(List results) throws IOException { if (kvs.isEmpty()) return false; results.add(kvs.remove(0)); + return !kvs.isEmpty(); } @Override - public boolean next(List result, int limit) throws IOException { + public boolean next(List result, ScannerContext scannerContext) + throws IOException { return next(result); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestDurability.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestDurability.java index 10e7e3d66f0..94aa1069a09 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestDurability.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestDurability.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.regionserver.wal; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; import java.io.IOException; @@ -192,6 +193,28 @@ public class TestDurability { assertEquals(3, Bytes.toLong(res.getValue(FAMILY, col3))); verifyWALCount(wals, wal, 2); } + + /* + * Test when returnResults set to false in increment it should not return the result instead it + * resturn null. + */ + @Test + public void testIncrementWithReturnResultsSetToFalse() throws Exception { + byte[] row1 = Bytes.toBytes("row1"); + byte[] col1 = Bytes.toBytes("col1"); + + // Setting up region + final WALFactory wals = new WALFactory(CONF, null, "testIncrementWithReturnResultsSetToFalse"); + byte[] tableName = Bytes.toBytes("testIncrementWithReturnResultsSetToFalse"); + final WAL wal = wals.getWAL(tableName); + HRegion region = createHRegion(tableName, "increment", wal, Durability.USE_DEFAULT); + + Increment inc1 = new Increment(row1); + inc1.setReturnResults(false); + inc1.addColumn(FAMILY, col1, 1); + Result res = region.increment(inc1); + assertNull(res); + } private Put newPut(Durability durability) { Put p = new Put(ROW); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSHLog.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSHLog.java index 2c25e3b1c9b..77071ce1343 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSHLog.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestFSHLog.java @@ -57,6 +57,7 @@ import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.coprocessor.SampleRegionWALObserver; import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.Bytes; @@ -451,7 +452,7 @@ public class TestFSHLog { System.currentTimeMillis(), clusterIds, -1, -1); wal.append(htd, info, logkey, edits, region.getSequenceId(), true, null); } - region.flushcache(); + region.flush(true); // FlushResult.flushSequenceId is not visible here so go get the current sequence id. long currentSequenceId = region.getSequenceId().get(); // Now release the appends diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java index 111acf3a27f..2c75f234beb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java @@ -53,8 +53,8 @@ import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.fs.HFileSystem; -import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.VerySlowRegionServerTests; @@ -223,11 +223,8 @@ public class TestLogRolling { " log files"); // flush all regions - - List regions = - new ArrayList(server.getOnlineRegionsLocalContext()); - for (HRegion r: regions) { - r.flushcache(); + for (Region r: server.getOnlineRegionsLocalContext()) { + r.flush(true); } // Now roll the log @@ -530,9 +527,8 @@ public class TestLogRolling { assertTrue(loggedRows.contains("row1005")); // flush all regions - List regions = new ArrayList(server.getOnlineRegionsLocalContext()); - for (HRegion r: regions) { - r.flushcache(); + for (Region r: server.getOnlineRegionsLocalContext()) { + r.flush(true); } ResultScanner scanner = table.getScanner(new Scan()); @@ -574,7 +570,7 @@ public class TestLogRolling { server = TEST_UTIL.getRSForFirstRegionInTable(table.getName()); final WAL log = server.getWAL(null); - HRegion region = server.getOnlineRegions(table2.getName()).get(0); + Region region = server.getOnlineRegions(table2.getName()).get(0); Store s = region.getStore(HConstants.CATALOG_FAMILY); //have to flush namespace to ensure it doesn't affect wall tests @@ -595,7 +591,7 @@ public class TestLogRolling { assertEquals("Should have WAL; one table is not flushed", 1, DefaultWALProvider.getNumRolledLogFiles(log)); admin.flush(table2.getName()); - region.compactStores(); + region.compact(false); // Wait for compaction in case if flush triggered it before us. Assert.assertNotNull(s); for (int waitTime = 3000; s.getStorefilesCount() > 1 && waitTime > 0; waitTime -= 200) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java index bb634d1b07b..5310a2e6b47 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java @@ -72,6 +72,7 @@ import org.apache.hadoop.hbase.regionserver.FlushRequester; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.MemStoreSnapshot; +import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.RegionScanner; import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.hadoop.hbase.regionserver.Store; @@ -86,17 +87,17 @@ import org.apache.hadoop.hbase.util.HFileTestUtil; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.wal.DefaultWALProvider; import org.apache.hadoop.hbase.wal.WAL; -import org.apache.hadoop.hbase.wal.WALKey; import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hadoop.hbase.wal.WALKey; import org.apache.hadoop.hbase.wal.WALSplitter; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; -import org.junit.Test; import org.junit.Rule; -import org.junit.rules.TestName; +import org.junit.Test; import org.junit.experimental.categories.Category; +import org.junit.rules.TestName; import org.mockito.Mockito; /** @@ -202,9 +203,9 @@ public class TestWALReplay { assertEquals(1, regions.size()); // move region to another regionserver - HRegion destRegion = regions.get(0); + Region destRegion = regions.get(0); int originServerNum = hbaseCluster - .getServerWith(destRegion.getRegionName()); + .getServerWith(destRegion.getRegionInfo().getRegionName()); assertTrue("Please start more than 1 regionserver", hbaseCluster .getRegionServerThreads().size() > 1); int destServerNum = 0; @@ -228,13 +229,13 @@ public class TestWALReplay { assertEquals(0, count); // flush region and make major compaction - destServer.getOnlineRegion(destRegion.getRegionName()).flushcache(); + Region region = destServer.getOnlineRegion(destRegion.getRegionInfo().getRegionName()); + region.flush(true); // wait to complete major compaction - for (Store store : destServer.getOnlineRegion(destRegion.getRegionName()) - .getStores().values()) { + for (Store store : region.getStores()) { store.triggerMajorCompaction(); } - destServer.getOnlineRegion(destRegion.getRegionName()).compactStores(); + region.compact(true); // move region to origin regionserver moveRegionAndWait(destRegion, originServer); @@ -250,7 +251,7 @@ public class TestWALReplay { resultScanner.close(); } - private void moveRegionAndWait(HRegion destRegion, HRegionServer destServer) + private void moveRegionAndWait(Region destRegion, HRegionServer destServer) throws InterruptedException, MasterNotRunningException, ZooKeeperConnectionException, IOException { HMaster master = TEST_UTIL.getMiniHBaseCluster().getMaster(); @@ -285,7 +286,7 @@ public class TestWALReplay { deleteDir(basedir); HTableDescriptor htd = createBasic3FamilyHTD(tableName); - HRegion region2 = HBaseTestingUtility.createRegionAndWAL(hri, hbaseRootDir, this.conf, htd); + Region region2 = HBaseTestingUtility.createRegionAndWAL(hri, hbaseRootDir, this.conf, htd); HBaseTestingUtility.closeRegionAndWAL(region2); final byte [] rowName = tableName.getName(); @@ -346,10 +347,10 @@ public class TestWALReplay { final Path basedir = new Path(this.hbaseRootDir, tableName.getNameAsString()); deleteDir(basedir); final HTableDescriptor htd = createBasic3FamilyHTD(tableName); - HRegion region2 = HBaseTestingUtility.createRegionAndWAL(hri, hbaseRootDir, this.conf, htd); + Region region2 = HBaseTestingUtility.createRegionAndWAL(hri, hbaseRootDir, this.conf, htd); HBaseTestingUtility.closeRegionAndWAL(region2); WAL wal = createWAL(this.conf); - HRegion region = HRegion.openHRegion(hri, htd, wal, this.conf); + Region region = HRegion.openHRegion(hri, htd, wal, this.conf); byte [] family = htd.getFamilies().iterator().next().getName(); Path f = new Path(basedir, "hfile"); @@ -357,7 +358,7 @@ public class TestWALReplay { Bytes.toBytes("z"), 10); List > hfs= new ArrayList>(1); hfs.add(Pair.newPair(family, f.toString())); - region.bulkLoadHFiles(hfs, true); + region.bulkLoadHFiles(hfs, true, null); // Add an edit so something in the WAL byte [] row = tableName.getName(); @@ -430,12 +431,12 @@ public class TestWALReplay { Bytes.toBytes(i + "50"), 10); hfs.add(Pair.newPair(family, f.toString())); } - region.bulkLoadHFiles(hfs, true); + region.bulkLoadHFiles(hfs, true, null); final int rowsInsertedCount = 31; assertEquals(rowsInsertedCount, getScannedCount(region.getScanner(new Scan()))); // major compact to turn all the bulk loaded files into one normal file - region.compactStores(true); + region.compact(true); assertEquals(rowsInsertedCount, getScannedCount(region.getScanner(new Scan()))); // Now 'crash' the region by stealing its wal @@ -497,7 +498,7 @@ public class TestWALReplay { addRegionEdits(rowName, hcd.getName(), countPerFamily, this.ee, region, "x"); if (first ) { // If first, so we have at least one family w/ different seqid to rest. - region.flushcache(); + region.flush(true); first = false; } } @@ -612,7 +613,7 @@ public class TestWALReplay { result.size()); // Let us flush the region - region.flushcache(); + region.flush(true); region.close(true); wal.shutdown(); @@ -706,7 +707,7 @@ public class TestWALReplay { // Let us flush the region CustomStoreFlusher.throwExceptionWhenFlushing.set(true); try { - region.flushcache(); + region.flush(true); fail("Injected exception hasn't been thrown"); } catch (Throwable t) { LOG.info("Expected simulated exception when flushing region," @@ -726,7 +727,7 @@ public class TestWALReplay { // call flush again CustomStoreFlusher.throwExceptionWhenFlushing.set(false); try { - region.flushcache(); + region.flush(true); } catch (IOException t) { LOG.info("Expected exception when flushing region because server is stopped," + t.getMessage()); @@ -831,11 +832,12 @@ public class TestWALReplay { new HRegion(basedir, newWal, newFS, newConf, hri, htd, null) { @Override protected FlushResult internalFlushcache(final WAL wal, final long myseqid, - Collection storesToFlush, MonitoredTask status) - throws IOException { + final Collection storesToFlush, MonitoredTask status, + boolean writeFlushWalMarker) + throws IOException { LOG.info("InternalFlushCache Invoked"); FlushResult fs = super.internalFlushcache(wal, myseqid, storesToFlush, - Mockito.mock(MonitoredTask.class)); + Mockito.mock(MonitoredTask.class), writeFlushWalMarker); flushcount.incrementAndGet(); return fs; }; @@ -881,7 +883,7 @@ public class TestWALReplay { // Let us flush the region // But this time completeflushcache is not yet done - region.flushcache(); + region.flush(true); for (HColumnDescriptor hcd : htd.getFamilies()) { addRegionEdits(rowName, hcd.getName(), 5, this.ee, region, "x"); } @@ -956,16 +958,16 @@ public class TestWALReplay { private HRegion r; @Override - public void requestFlush(HRegion region, boolean forceFlushAllStores) { + public void requestFlush(Region region, boolean force) { try { - r.flushcache(forceFlushAllStores); + r.flush(force); } catch (IOException e) { throw new RuntimeException("Exception flushing", e); } } @Override - public void requestDelayedFlush(HRegion region, long when, boolean forceFlushAllStores) { + public void requestDelayedFlush(Region region, long when, boolean forceFlushAllStores) { // TODO Auto-generated method stub } @@ -1004,7 +1006,7 @@ public class TestWALReplay { } static List addRegionEdits (final byte [] rowName, final byte [] family, - final int count, EnvironmentEdge ee, final HRegion r, + final int count, EnvironmentEdge ee, final Region r, final String qualifierPrefix) throws IOException { List puts = new ArrayList(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java index a501af9c130..0ec410e2838 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java @@ -37,11 +37,11 @@ import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.HBaseAdmin; -import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Table; @@ -350,7 +350,7 @@ public class TestMasterReplication { int numClusters = utilities.length; Table[] htables = new Table[numClusters]; for (int i = 0; i < numClusters; i++) { - Table htable = new HTable(configurations[i], tableName); + Table htable = ConnectionFactory.createConnection(configurations[i]).getTable(tableName); htable.setWriteBufferSize(1024); htables[i] = htable; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMultiSlaveReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMultiSlaveReplication.java index b30820b385e..b670e659ddc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMultiSlaveReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMultiSlaveReplication.java @@ -225,7 +225,7 @@ public class TestMultiSlaveReplication { // request a roll admin.rollWALWriter(cluster.getServerHoldingRegion(region.getTableDesc().getTableName(), - region.getRegionName())); + region.getRegionInfo().getRegionName())); // wait try { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java index f0db865bad2..2dc3c896559 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java @@ -31,11 +31,15 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ClusterStatus; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.ServerLoad; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.HBaseAdmin; @@ -556,4 +560,45 @@ public class TestReplicationSmallTests extends TestReplicationBase { hadmin.close(); } + /** + * Test for HBASE-9531 + * put a few rows into htable1, which should be replicated to htable2 + * create a ClusterStatus instance 'status' from HBaseAdmin + * test : status.getLoad(server).getReplicationLoadSourceList() + * test : status.getLoad(server).getReplicationLoadSink() + * * @throws Exception + */ + @Test(timeout = 300000) + public void testReplicationStatus() throws Exception { + LOG.info("testReplicationStatus"); + + try (Admin admin = utility1.getConnection().getAdmin()) { + + final byte[] qualName = Bytes.toBytes("q"); + Put p; + + for (int i = 0; i < NB_ROWS_IN_BATCH; i++) { + p = new Put(Bytes.toBytes("row" + i)); + p.add(famName, qualName, Bytes.toBytes("val" + i)); + htable1.put(p); + } + + ClusterStatus status = admin.getClusterStatus(); + + for (ServerName server : status.getServers()) { + ServerLoad sl = status.getLoad(server); + List rLoadSourceList = sl.getReplicationLoadSourceList(); + ReplicationLoadSink rLoadSink = sl.getReplicationLoadSink(); + + // check SourceList has at least one entry + assertTrue("failed to get ReplicationLoadSourceList", (rLoadSourceList.size() > 0)); + + // check Sink exist only as it is difficult to verify the value on the fly + assertTrue("failed to get ReplicationLoadSink.AgeOfLastShippedOp ", + (rLoadSink.getAgeOfLastAppliedOp() >= 0)); + assertTrue("failed to get ReplicationLoadSink.TimeStampsOfLastAppliedOp ", + (rLoadSink.getTimeStampsOfLastAppliedOp() >= 0)); + } + } + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpoint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpoint.java index 7ca12f03761..2231f0ea031 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpoint.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpoint.java @@ -39,14 +39,12 @@ import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; -import org.apache.hadoop.hbase.client.HConnection; -import org.apache.hadoop.hbase.client.HConnectionManager; -import org.apache.hadoop.hbase.client.RpcRetryingCaller; +import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.RpcRetryingCallerImpl; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.replication.ReplicationAdmin; -import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.wal.WAL.Entry; import org.apache.hadoop.hbase.wal.WALKey; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; @@ -59,7 +57,6 @@ import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.log4j.Level; import org.junit.AfterClass; import org.junit.BeforeClass; -import org.junit.Ignore; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -84,7 +81,6 @@ public class TestRegionReplicaReplicationEndpoint { @BeforeClass public static void beforeClass() throws Exception { - /* Configuration conf = HTU.getConfiguration(); conf.setFloat("hbase.regionserver.logroll.multiplier", 0.0003f); conf.setInt("replication.source.size.capacity", 10240); @@ -98,20 +94,17 @@ public class TestRegionReplicaReplicationEndpoint { conf.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100); conf.setInt("replication.stats.thread.period.seconds", 5); conf.setBoolean("hbase.tests.use.shortcircuit.reads", false); - conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 3); // less number of retries is needed + conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 5); // less number of retries is needed conf.setInt("hbase.client.serverside.retries.multiplier", 1); - HTU.startMiniCluster(NB_SERVERS);*/ + HTU.startMiniCluster(NB_SERVERS); } @AfterClass public static void afterClass() throws Exception { - /* HTU.shutdownMiniCluster(); - */ } - @Ignore("To be fixed before 1.0") @Test public void testRegionReplicaReplicationPeerIsCreated() throws IOException, ReplicationException { // create a table with region replicas. Check whether the replication peer is created @@ -142,6 +135,38 @@ public class TestRegionReplicaReplicationEndpoint { admin.close(); } + @Test (timeout=240000) + public void testRegionReplicaReplicationPeerIsCreatedForModifyTable() throws Exception { + // modify a table by adding region replicas. Check whether the replication peer is created + // and replication started. + ReplicationAdmin admin = new ReplicationAdmin(HTU.getConfiguration()); + String peerId = "region_replica_replication"; + + if (admin.getPeerConfig(peerId) != null) { + admin.removePeer(peerId); + } + + HTableDescriptor htd + = HTU.createTableDescriptor("testRegionReplicaReplicationPeerIsCreatedForModifyTable"); + HTU.getHBaseAdmin().createTable(htd); + + // assert that replication peer is not created yet + ReplicationPeerConfig peerConfig = admin.getPeerConfig(peerId); + assertNull(peerConfig); + + HTU.getHBaseAdmin().disableTable(htd.getTableName()); + htd.setRegionReplication(2); + HTU.getHBaseAdmin().modifyTable(htd.getTableName(), htd); + HTU.getHBaseAdmin().enableTable(htd.getTableName()); + + // assert peer configuration is correct + peerConfig = admin.getPeerConfig(peerId); + assertNotNull(peerConfig); + assertEquals(peerConfig.getClusterKey(), ZKUtil.getZooKeeperClusterKey(HTU.getConfiguration())); + assertEquals(peerConfig.getReplicationEndpointImpl(), + RegionReplicaReplicationEndpoint.class.getName()); + admin.close(); + } public void testRegionReplicaReplication(int regionReplication) throws Exception { // test region replica replication. Create a table with single region, write some data @@ -167,7 +192,7 @@ public class TestRegionReplicaReplicationEndpoint { // load the data to the table HTU.loadNumericRows(table, HBaseTestingUtility.fam1, 0, 1000); - verifyReplication(tableName, regionReplication, 0, 6000); + verifyReplication(tableName, regionReplication, 0, 1000); } finally { table.close(); @@ -179,33 +204,37 @@ public class TestRegionReplicaReplicationEndpoint { private void verifyReplication(TableName tableName, int regionReplication, final int startRow, final int endRow) throws Exception { + verifyReplication(tableName, regionReplication, startRow, endRow, true); + } + + private void verifyReplication(TableName tableName, int regionReplication, + final int startRow, final int endRow, final boolean present) throws Exception { // find the regions - final HRegion[] regions = new HRegion[regionReplication]; + final Region[] regions = new Region[regionReplication]; for (int i=0; i < NB_SERVERS; i++) { HRegionServer rs = HTU.getMiniHBaseCluster().getRegionServer(i); - List onlineRegions = rs.getOnlineRegions(tableName); - for (HRegion region : onlineRegions) { + List onlineRegions = rs.getOnlineRegions(tableName); + for (Region region : onlineRegions) { regions[region.getRegionInfo().getReplicaId()] = region; } } - for (HRegion region : regions) { + for (Region region : regions) { assertNotNull(region); } for (int i = 1; i < regionReplication; i++) { - final HRegion region = regions[i]; + final Region region = regions[i]; // wait until all the data is replicated to all secondary regions - Waiter.waitFor(HTU.getConfiguration(), 60000, new Waiter.Predicate() { + Waiter.waitFor(HTU.getConfiguration(), 90000, new Waiter.Predicate() { @Override public boolean evaluate() throws Exception { LOG.info("verifying replication for region replica:" + region.getRegionInfo()); try { - HTU.verifyNumericRows(region, HBaseTestingUtility.fam1, startRow, endRow); + HTU.verifyNumericRows(region, HBaseTestingUtility.fam1, startRow, endRow, present); } catch(Throwable ex) { - LOG.warn("Verification from secondary region is not complete yet. Got:" + ex - + " " + ex.getMessage()); + LOG.warn("Verification from secondary region is not complete yet", ex); // still wait return false; } @@ -215,26 +244,54 @@ public class TestRegionReplicaReplicationEndpoint { } } - @Ignore("To be fixed before 1.0") - @Test(timeout = 60000) + @Test(timeout = 240000) public void testRegionReplicaReplicationWith2Replicas() throws Exception { testRegionReplicaReplication(2); } - @Ignore("To be fixed before 1.0") - @Test(timeout = 60000) + @Test(timeout = 240000) public void testRegionReplicaReplicationWith3Replicas() throws Exception { testRegionReplicaReplication(3); } - @Ignore("To be fixed before 1.0") - @Test(timeout = 60000) + @Test(timeout = 240000) public void testRegionReplicaReplicationWith10Replicas() throws Exception { testRegionReplicaReplication(10); } - @Ignore("To be fixed before 1.0") - @Test (timeout = 60000) + @Test (timeout = 240000) + public void testRegionReplicaWithoutMemstoreReplication() throws Exception { + int regionReplication = 3; + TableName tableName = TableName.valueOf("testRegionReplicaWithoutMemstoreReplication"); + HTableDescriptor htd = HTU.createTableDescriptor(tableName.toString()); + htd.setRegionReplication(regionReplication); + htd.setRegionMemstoreReplication(false); + HTU.getHBaseAdmin().createTable(htd); + + Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration()); + Table table = connection.getTable(tableName); + try { + // write data to the primary. The replicas should not receive the data + final int STEP = 100; + for (int i = 0; i < 3; ++i) { + final int startRow = i * STEP; + final int endRow = (i + 1) * STEP; + LOG.info("Writing data from " + startRow + " to " + endRow); + HTU.loadNumericRows(table, HBaseTestingUtility.fam1, startRow, endRow); + verifyReplication(tableName, regionReplication, startRow, endRow, false); + + // Flush the table, now the data should show up in the replicas + LOG.info("flushing table"); + HTU.flush(tableName); + verifyReplication(tableName, regionReplication, 0, endRow, true); + } + } finally { + table.close(); + connection.close(); + } + } + + @Test (timeout = 240000) public void testRegionReplicaReplicationForFlushAndCompaction() throws Exception { // Tests a table with region replication 3. Writes some data, and causes flushes and // compactions. Verifies that the data is readable from the replicas. Note that this @@ -262,21 +319,19 @@ public class TestRegionReplicaReplicationEndpoint { HTU.compact(tableName, false); } - verifyReplication(tableName, regionReplication, 0, 6000); + verifyReplication(tableName, regionReplication, 0, 1000); } finally { table.close(); connection.close(); } } - @Ignore("To be fixed before 1.0") - @Test (timeout = 60000) + @Test (timeout = 240000) public void testRegionReplicaReplicationIgnoresDisabledTables() throws Exception { testRegionReplicaReplicationIgnoresDisabledTables(false); } - @Ignore("To be fixed before 1.0") - @Test (timeout = 60000) + @Test (timeout = 240000) public void testRegionReplicaReplicationIgnoresDroppedTables() throws Exception { testRegionReplicaReplicationIgnoresDisabledTables(true); } @@ -305,7 +360,7 @@ public class TestRegionReplicaReplicationEndpoint { // now that the replication is disabled, write to the table to be dropped, then drop the table. - HConnection connection = HConnectionManager.createConnection(HTU.getConfiguration()); + Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration()); Table table = connection.getTable(tableName); Table tableToBeDisabled = connection.getTable(toBeDisabledTable); @@ -318,9 +373,9 @@ public class TestRegionReplicaReplicationEndpoint { RegionReplicaReplicationEndpoint.RegionReplicaSinkWriter sinkWriter = new RegionReplicaReplicationEndpoint.RegionReplicaSinkWriter(sink, (ClusterConnection) connection, - Executors.newSingleThreadExecutor(), 1000); - - HRegionLocation hrl = connection.locateRegion(toBeDisabledTable, HConstants.EMPTY_BYTE_ARRAY); + Executors.newSingleThreadExecutor(), Integer.MAX_VALUE); + RegionLocator rl = connection.getRegionLocator(toBeDisabledTable); + HRegionLocation hrl = rl.getRegionLocation(HConstants.EMPTY_BYTE_ARRAY); byte[] encodedRegionName = hrl.getRegionInfo().getEncodedNameAsBytes(); Entry entry = new Entry( @@ -346,11 +401,12 @@ public class TestRegionReplicaReplicationEndpoint { // now enable the replication admin.enablePeer(ServerRegionReplicaUtil.getReplicationPeerId()); - verifyReplication(tableName, regionReplication, 0, 6000); + verifyReplication(tableName, regionReplication, 0, 1000); } finally { admin.close(); table.close(); + rl.close(); tableToBeDisabled.close(); HTU.deleteTableIfAny(toBeDisabledTable); connection.close(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpointNoMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpointNoMaster.java index a191bdd79f2..930ffbac5a1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpointNoMaster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpointNoMaster.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.replication.regionserver; import static org.apache.hadoop.hbase.regionserver.TestRegionServerNoMaster.closeRegion; import static org.apache.hadoop.hbase.regionserver.TestRegionServerNoMaster.openRegion; +import static org.junit.Assert.*; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -48,15 +49,19 @@ import org.apache.hadoop.hbase.ipc.RpcControllerFactory; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryResponse; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.TestRegionServerNoMaster; import org.apache.hadoop.hbase.wal.WAL.Entry; import org.apache.hadoop.hbase.wal.WALKey; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; import org.apache.hadoop.hbase.replication.ReplicationEndpoint; import org.apache.hadoop.hbase.replication.ReplicationEndpoint.ReplicateContext; +import org.apache.hadoop.hbase.replication.ReplicationPeer; +import org.apache.hadoop.hbase.replication.WALEntryFilter; import org.apache.hadoop.hbase.replication.regionserver.RegionReplicaReplicationEndpoint.RegionReplicaReplayCallable; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests; +import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil; import org.junit.After; import org.junit.AfterClass; @@ -98,6 +103,7 @@ public class TestRegionReplicaReplicationEndpointNoMaster { Configuration conf = HTU.getConfiguration(); conf.setBoolean(HConstants.REPLICATION_ENABLE_KEY, true); conf.setBoolean(ServerRegionReplicaUtil.REGION_REPLICA_REPLICATION_CONF_KEY, true); + conf.setBoolean(ServerRegionReplicaUtil.REGION_REPLICA_WAIT_FOR_PRIMARY_FLUSH_CONF_KEY, false); // install WALObserver coprocessor for tests String walCoprocs = HTU.getConfiguration().get(CoprocessorHost.WAL_COPROCESSOR_CONF_KEY); @@ -157,7 +163,7 @@ public class TestRegionReplicaReplicationEndpointNoMaster { } } - @Test + @Test (timeout = 240000) public void testReplayCallable() throws Exception { // tests replaying the edits to a secondary region replica using the Callable directly openRegion(HTU, rs0, hriSecondary); @@ -171,7 +177,7 @@ public class TestRegionReplicaReplicationEndpointNoMaster { // replay the edits to the secondary using replay callable replicateUsingCallable(connection, entries); - HRegion region = rs0.getFromOnlineRegions(hriSecondary.getEncodedName()); + Region region = rs0.getFromOnlineRegions(hriSecondary.getEncodedName()); HTU.verifyNumericRows(region, f, 0, 1000); HTU.deleteNumericRows(table, f, 0, 1000); @@ -197,7 +203,7 @@ public class TestRegionReplicaReplicationEndpointNoMaster { } } - @Test + @Test (timeout = 240000) public void testReplayCallableWithRegionMove() throws Exception { // tests replaying the edits to a secondary region replica using the Callable directly while // the region is moved to another location.It tests handling of RME. @@ -211,7 +217,7 @@ public class TestRegionReplicaReplicationEndpointNoMaster { // replay the edits to the secondary using replay callable replicateUsingCallable(connection, entries); - HRegion region = rs0.getFromOnlineRegions(hriSecondary.getEncodedName()); + Region region = rs0.getFromOnlineRegions(hriSecondary.getEncodedName()); HTU.verifyNumericRows(region, f, 0, 1000); HTU.loadNumericRows(table, f, 1000, 2000); // load some more data to primary @@ -232,7 +238,7 @@ public class TestRegionReplicaReplicationEndpointNoMaster { connection.close(); } - @Test + @Test (timeout = 240000) public void testRegionReplicaReplicationEndpointReplicate() throws Exception { // tests replaying the edits to a secondary region replica using the RRRE.replicate() openRegion(HTU, rs0, hriSecondary); @@ -242,6 +248,7 @@ public class TestRegionReplicaReplicationEndpointNoMaster { ReplicationEndpoint.Context context = mock(ReplicationEndpoint.Context.class); when(context.getConfiguration()).thenReturn(HTU.getConfiguration()); + when(context.getMetrics()).thenReturn(mock(MetricsSource.class)); replicator.init(context); replicator.start(); @@ -253,7 +260,7 @@ public class TestRegionReplicaReplicationEndpointNoMaster { // replay the edits to the secondary using replay callable replicator.replicate(new ReplicateContext().setEntries(Lists.newArrayList(entries))); - HRegion region = rs0.getFromOnlineRegions(hriSecondary.getEncodedName()); + Region region = rs0.getFromOnlineRegions(hriSecondary.getEncodedName()); HTU.verifyNumericRows(region, f, 0, 1000); HTU.deleteNumericRows(table, f, 0, 1000); @@ -261,4 +268,52 @@ public class TestRegionReplicaReplicationEndpointNoMaster { connection.close(); } + @Test (timeout = 240000) + public void testReplayedEditsAreSkipped() throws Exception { + openRegion(HTU, rs0, hriSecondary); + ClusterConnection connection = + (ClusterConnection) ConnectionFactory.createConnection(HTU.getConfiguration()); + RegionReplicaReplicationEndpoint replicator = new RegionReplicaReplicationEndpoint(); + + ReplicationEndpoint.Context context = mock(ReplicationEndpoint.Context.class); + when(context.getConfiguration()).thenReturn(HTU.getConfiguration()); + when(context.getMetrics()).thenReturn(mock(MetricsSource.class)); + + ReplicationPeer mockPeer = mock(ReplicationPeer.class); + when(mockPeer.getTableCFs()).thenReturn(null); + when(context.getReplicationPeer()).thenReturn(mockPeer); + + replicator.init(context); + replicator.start(); + + // test the filter for the RE, not actual replication + WALEntryFilter filter = replicator.getWALEntryfilter(); + + //load some data to primary + HTU.loadNumericRows(table, f, 0, 1000); + + Assert.assertEquals(1000, entries.size()); + for (Entry e: entries) { + if (Integer.parseInt(Bytes.toString(e.getEdit().getCells().get(0).getValue())) % 2 == 0) { + e.getKey().setOrigLogSeqNum(1); // simulate dist log replay by setting orig seq id + } + } + + long skipped = 0, replayed = 0; + for (Entry e : entries) { + if (filter.filter(e) == null) { + skipped++; + } else { + replayed++; + } + } + + assertEquals(500, skipped); + assertEquals(500, replayed); + + HTU.deleteNumericRows(table, f, 0, 1000); + closeRegion(HTU, rs0, hriSecondary); + connection.close(); + } + } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/HBaseKerberosUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/HBaseKerberosUtils.java index 5854f0cda0e..237efe95790 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/HBaseKerberosUtils.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/HBaseKerberosUtils.java @@ -17,19 +17,20 @@ */ package org.apache.hadoop.hbase.security; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import com.google.common.base.Strings; @InterfaceAudience.Private -class HBaseKerberosUtils { +public class HBaseKerberosUtils { public static final String KRB_PRINCIPAL = "hbase.regionserver.kerberos.principal"; + public static final String MASTER_KRB_PRINCIPAL = "hbase.master.kerberos.principal"; public static final String KRB_KEYTAB_FILE = "hbase.regionserver.keytab.file"; - static boolean isKerberosPropertySetted() { + public static boolean isKerberosPropertySetted() { String krbPrincipal = System.getProperty(KRB_PRINCIPAL); String krbKeytab = System.getProperty(KRB_KEYTAB_FILE); if (Strings.isNullOrEmpty(krbPrincipal) || Strings.isNullOrEmpty(krbKeytab)) { @@ -38,41 +39,46 @@ class HBaseKerberosUtils { return true; } - static void setPrincipalForTesting(String principal) { + public static void setPrincipalForTesting(String principal) { setSystemProperty(KRB_PRINCIPAL, principal); } - static void setKeytabFileForTesting(String keytabFile) { + public static void setKeytabFileForTesting(String keytabFile) { setSystemProperty(KRB_KEYTAB_FILE, keytabFile); } - static void setSystemProperty(String propertyName, String propertyValue) { + public static void setSystemProperty(String propertyName, String propertyValue) { System.setProperty(propertyName, propertyValue); } - static String getKeytabFileForTesting() { + public static String getKeytabFileForTesting() { return System.getProperty(KRB_KEYTAB_FILE); } - static String getPrincipalForTesting() { + public static String getPrincipalForTesting() { return System.getProperty(KRB_PRINCIPAL); } - static Configuration getConfigurationWoPrincipal() { + public static Configuration getConfigurationWoPrincipal() { Configuration conf = HBaseConfiguration.create(); conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION, "kerberos"); - conf.set("hbase.security.authentication", "kerberos"); - conf.setBoolean("hbase.security.authorization", true); + conf.set(User.HBASE_SECURITY_CONF_KEY, "kerberos"); + conf.setBoolean(User.HBASE_SECURITY_AUTHORIZATION_CONF_KEY, true); return conf; } - static Configuration getSecuredConfiguration() { + public static Configuration getSecuredConfiguration() { Configuration conf = HBaseConfiguration.create(); - conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION, "kerberos"); - conf.set("hbase.security.authentication", "kerberos"); - conf.setBoolean("hbase.security.authorization", true); - conf.set(KRB_KEYTAB_FILE, System.getProperty(KRB_KEYTAB_FILE)); - conf.set(KRB_PRINCIPAL, System.getProperty(KRB_PRINCIPAL)); + setSecuredConfiguration(conf); return conf; } + + public static void setSecuredConfiguration(Configuration conf) { + conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION, "kerberos"); + conf.set(User.HBASE_SECURITY_CONF_KEY, "kerberos"); + conf.setBoolean(User.HBASE_SECURITY_AUTHORIZATION_CONF_KEY, true); + conf.set(KRB_KEYTAB_FILE, System.getProperty(KRB_KEYTAB_FILE)); + conf.set(KRB_PRINCIPAL, System.getProperty(KRB_PRINCIPAL)); + conf.set(MASTER_KRB_PRINCIPAL, System.getProperty(KRB_PRINCIPAL)); + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestSecureRPC.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestSecureRPC.java index b28a1ef8fce..8eff063e794 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestSecureRPC.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestSecureRPC.java @@ -21,32 +21,38 @@ package org.apache.hadoop.hbase.security; import static org.apache.hadoop.hbase.security.HBaseKerberosUtils.getKeytabFileForTesting; import static org.apache.hadoop.hbase.security.HBaseKerberosUtils.getPrincipalForTesting; import static org.apache.hadoop.hbase.security.HBaseKerberosUtils.getSecuredConfiguration; -import static org.apache.hadoop.hbase.security.HBaseKerberosUtils.isKerberosPropertySetted; - import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertSame; -import static org.junit.Assume.assumeTrue; +import java.io.File; +import java.io.IOException; import java.net.InetSocketAddress; import java.util.ArrayList; import java.util.List; +import java.util.Properties; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.ipc.RpcClientFactory; -import org.apache.hadoop.hbase.testclassification.SecurityTests; -import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.ipc.AsyncRpcClient; import org.apache.hadoop.hbase.ipc.FifoRpcScheduler; import org.apache.hadoop.hbase.ipc.RpcClient; +import org.apache.hadoop.hbase.ipc.RpcClientFactory; +import org.apache.hadoop.hbase.ipc.RpcClientImpl; import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.ipc.RpcServerInterface; import org.apache.hadoop.hbase.ipc.TestDelayedRpc.TestDelayedImplementation; import org.apache.hadoop.hbase.ipc.TestDelayedRpc.TestThread; import org.apache.hadoop.hbase.ipc.protobuf.generated.TestDelayedRpcProtos; +import org.apache.hadoop.hbase.testclassification.SecurityTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.minikdc.MiniKdc; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; +import org.junit.AfterClass; +import org.junit.BeforeClass; import org.junit.Test; import org.junit.experimental.categories.Category; import org.mockito.Mockito; @@ -55,19 +61,52 @@ import com.google.common.collect.Lists; import com.google.protobuf.BlockingRpcChannel; import com.google.protobuf.BlockingService; -@Category({SecurityTests.class, SmallTests.class}) +@Category({ SecurityTests.class, SmallTests.class }) public class TestSecureRPC { - public static RpcServerInterface rpcServer; - /** - * To run this test, we must specify the following system properties: - *

      - * hbase.regionserver.kerberos.principal - *

      - * hbase.regionserver.keytab.file - */ + + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + + private static final File KEYTAB_FILE = new File(TEST_UTIL.getDataTestDir("keytab").toUri() + .getPath()); + + private static MiniKdc KDC; + + private static String HOST = "localhost"; + + private static String PRINCIPAL; + + @BeforeClass + public static void setUp() throws Exception { + Properties conf = MiniKdc.createConf(); + conf.put(MiniKdc.DEBUG, true); + KDC = new MiniKdc(conf, new File(TEST_UTIL.getDataTestDir("kdc").toUri().getPath())); + KDC.start(); + PRINCIPAL = "hbase/" + HOST; + KDC.createPrincipal(KEYTAB_FILE, PRINCIPAL); + HBaseKerberosUtils.setKeytabFileForTesting(KEYTAB_FILE.getAbsolutePath()); + HBaseKerberosUtils.setPrincipalForTesting(PRINCIPAL + "@" + KDC.getRealm()); + } + + @AfterClass + public static void tearDown() throws IOException { + if (KDC != null) { + KDC.stop(); + } + TEST_UTIL.cleanupTestDir(); + } + @Test - public void testRpcCallWithEnabledKerberosSaslAuth() throws Exception { - assumeTrue(isKerberosPropertySetted()); + public void testRpc() throws Exception { + testRpcCallWithEnabledKerberosSaslAuth(RpcClientImpl.class); + } + + @Test + public void testAsyncRpc() throws Exception { + testRpcCallWithEnabledKerberosSaslAuth(AsyncRpcClient.class); + } + + private void testRpcCallWithEnabledKerberosSaslAuth(Class rpcImplClass) + throws Exception { String krbKeytab = getKeytabFileForTesting(); String krbPrincipal = getPrincipalForTesting(); @@ -84,40 +123,42 @@ public class TestSecureRPC { assertEquals(krbPrincipal, ugi.getUserName()); Configuration conf = getSecuredConfiguration(); - + conf.set(RpcClientFactory.CUSTOM_RPC_CLIENT_IMPL_CONF_KEY, rpcImplClass.getName()); SecurityInfo securityInfoMock = Mockito.mock(SecurityInfo.class); Mockito.when(securityInfoMock.getServerPrincipal()) - .thenReturn(HBaseKerberosUtils.KRB_PRINCIPAL); + .thenReturn(HBaseKerberosUtils.KRB_PRINCIPAL); SecurityInfo.addInfo("TestDelayedService", securityInfoMock); boolean delayReturnValue = false; - InetSocketAddress isa = new InetSocketAddress("localhost", 0); + InetSocketAddress isa = new InetSocketAddress(HOST, 0); TestDelayedImplementation instance = new TestDelayedImplementation(delayReturnValue); BlockingService service = TestDelayedRpcProtos.TestDelayedService.newReflectiveBlockingService(instance); - rpcServer = new RpcServer(null, "testSecuredDelayedRpc", - Lists.newArrayList(new RpcServer.BlockingServiceAndInterface(service, null)), - isa, conf, new FifoRpcScheduler(conf, 1)); + RpcServerInterface rpcServer = + new RpcServer(null, "testSecuredDelayedRpc", + Lists.newArrayList(new RpcServer.BlockingServiceAndInterface(service, null)), isa, + conf, new FifoRpcScheduler(conf, 1)); rpcServer.start(); - RpcClient rpcClient = RpcClientFactory - .createClient(conf, HConstants.DEFAULT_CLUSTER_ID.toString()); + RpcClient rpcClient = + RpcClientFactory.createClient(conf, HConstants.DEFAULT_CLUSTER_ID.toString()); try { - BlockingRpcChannel channel = rpcClient.createBlockingRpcChannel( - ServerName.valueOf(rpcServer.getListenerAddress().getHostName(), - rpcServer.getListenerAddress().getPort(), System.currentTimeMillis()), - User.getCurrent(), 1000); + BlockingRpcChannel channel = + rpcClient.createBlockingRpcChannel( + ServerName.valueOf(rpcServer.getListenerAddress().getHostName(), rpcServer + .getListenerAddress().getPort(), System.currentTimeMillis()), User.getCurrent(), + 5000); TestDelayedRpcProtos.TestDelayedService.BlockingInterface stub = - TestDelayedRpcProtos.TestDelayedService.newBlockingStub(channel); + TestDelayedRpcProtos.TestDelayedService.newBlockingStub(channel); List results = new ArrayList(); TestThread th1 = new TestThread(stub, true, results); th1.start(); - Thread.sleep(100); th1.join(); assertEquals(0xDEADBEEF, results.get(0).intValue()); } finally { rpcClient.close(); + rpcServer.stop(); } } } \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestUsersOperationsWithSecureHadoop.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestUsersOperationsWithSecureHadoop.java index a66c1249bb4..0226d49e5be 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestUsersOperationsWithSecureHadoop.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestUsersOperationsWithSecureHadoop.java @@ -22,46 +22,76 @@ import static org.apache.hadoop.hbase.security.HBaseKerberosUtils.getConfigurati import static org.apache.hadoop.hbase.security.HBaseKerberosUtils.getKeytabFileForTesting; import static org.apache.hadoop.hbase.security.HBaseKerberosUtils.getPrincipalForTesting; import static org.apache.hadoop.hbase.security.HBaseKerberosUtils.getSecuredConfiguration; -import static org.apache.hadoop.hbase.security.HBaseKerberosUtils.isKerberosPropertySetted; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; -import static org.junit.Assume.assumeTrue; +import java.io.File; import java.io.IOException; +import java.util.Properties; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.testclassification.SecurityTests; import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.minikdc.MiniKdc; import org.apache.hadoop.security.UserGroupInformation; +import org.junit.AfterClass; +import org.junit.BeforeClass; import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({SecurityTests.class, SmallTests.class}) +@Category({ SecurityTests.class, SmallTests.class }) public class TestUsersOperationsWithSecureHadoop { + + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + private static final File KEYTAB_FILE = new File(TEST_UTIL.getDataTestDir("keytab").toUri() + .getPath()); + + private static MiniKdc KDC; + + private static String HOST = "localhost"; + + private static String PRINCIPAL; + + @BeforeClass + public static void setUp() throws Exception { + Properties conf = MiniKdc.createConf(); + conf.put(MiniKdc.DEBUG, true); + KDC = new MiniKdc(conf, new File(TEST_UTIL.getDataTestDir("kdc").toUri().getPath())); + KDC.start(); + PRINCIPAL = "hbase/" + HOST; + KDC.createPrincipal(KEYTAB_FILE, PRINCIPAL); + HBaseKerberosUtils.setKeytabFileForTesting(KEYTAB_FILE.getAbsolutePath()); + HBaseKerberosUtils.setPrincipalForTesting(PRINCIPAL + "@" + KDC.getRealm()); + } + + @AfterClass + public static void tearDown() throws IOException { + if (KDC != null) { + KDC.stop(); + } + TEST_UTIL.cleanupTestDir(); + } + /** - * test login with security enabled configuration - * - * To run this test, we must specify the following system properties: + * test login with security enabled configuration To run this test, we must specify the following + * system properties: *

      * hbase.regionserver.kerberos.principal *

      * hbase.regionserver.keytab.file - * * @throws IOException */ @Test public void testUserLoginInSecureHadoop() throws Exception { UserGroupInformation defaultLogin = UserGroupInformation.getLoginUser(); Configuration conf = getConfigurationWoPrincipal(); - User.login(conf, HBaseKerberosUtils.KRB_KEYTAB_FILE, - HBaseKerberosUtils.KRB_PRINCIPAL, "localhost"); + User.login(conf, HBaseKerberosUtils.KRB_KEYTAB_FILE, HBaseKerberosUtils.KRB_PRINCIPAL, + "localhost"); UserGroupInformation failLogin = UserGroupInformation.getLoginUser(); - assertTrue("ugi should be the same in case fail login", - defaultLogin.equals(failLogin)); - - assumeTrue(isKerberosPropertySetted()); + assertTrue("ugi should be the same in case fail login", defaultLogin.equals(failLogin)); String nnKeyTab = getKeytabFileForTesting(); String dnPrincipal = getPrincipalForTesting(); @@ -72,10 +102,10 @@ public class TestUsersOperationsWithSecureHadoop { conf = getSecuredConfiguration(); UserGroupInformation.setConfiguration(conf); - User.login(conf, HBaseKerberosUtils.KRB_KEYTAB_FILE, - HBaseKerberosUtils.KRB_PRINCIPAL, "localhost"); + User.login(conf, HBaseKerberosUtils.KRB_KEYTAB_FILE, HBaseKerberosUtils.KRB_PRINCIPAL, + "localhost"); UserGroupInformation successLogin = UserGroupInformation.getLoginUser(); assertFalse("ugi should be different in in case success login", - defaultLogin.equals(successLogin)); + defaultLogin.equals(successLogin)); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/SecureTestUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/SecureTestUtil.java index ea1baeb2564..fb06c05457a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/SecureTestUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/SecureTestUtil.java @@ -28,6 +28,7 @@ import java.security.PrivilegedExceptionAction; import java.util.List; import java.util.Map; import java.util.concurrent.Callable; +import java.util.concurrent.CountDownLatch; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -35,21 +36,32 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.MiniHBaseCluster; +import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.TableNotEnabledException; import org.apache.hadoop.hbase.Waiter.Predicate; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException; +import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.coprocessor.BaseMasterObserver; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; +import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos; import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.AccessControlService; import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.CheckPermissionsRequest; -import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.security.AccessDeniedException; import org.apache.hadoop.hbase.security.User; +import org.apache.hadoop.hbase.security.access.Permission.Action; import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; import com.google.common.collect.Lists; @@ -61,17 +73,11 @@ import com.google.protobuf.ServiceException; * Utility methods for testing security */ public class SecureTestUtil { - + private static final Log LOG = LogFactory.getLog(SecureTestUtil.class); private static final int WAIT_TIME = 10000; - public static void enableSecurity(Configuration conf) throws IOException { - conf.set("hadoop.security.authorization", "false"); - conf.set("hadoop.security.authentication", "simple"); - conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, AccessController.class.getName()); - conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, AccessController.class.getName() + - "," + SecureBulkLoadEndpoint.class.getName()); - conf.set(CoprocessorHost.REGIONSERVER_COPROCESSOR_CONF_KEY, AccessController.class.getName()); + public static void configureSuperuser(Configuration conf) throws IOException { // The secure minicluster creates separate service principals based on the // current user's name, one for each slave. We need to add all of these to // the superuser list or security won't function properly. We expect the @@ -86,8 +92,19 @@ public class SecureTestUtil { sb.append(currentUser); sb.append(".hfs."); sb.append(i); } conf.set("hbase.superuser", sb.toString()); + } + + public static void enableSecurity(Configuration conf) throws IOException { + conf.set("hadoop.security.authorization", "false"); + conf.set("hadoop.security.authentication", "simple"); + conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, AccessController.class.getName() + + "," + MasterSyncObserver.class.getName()); + conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, AccessController.class.getName() + + "," + SecureBulkLoadEndpoint.class.getName()); + conf.set(CoprocessorHost.REGIONSERVER_COPROCESSOR_CONF_KEY, AccessController.class.getName()); // Need HFile V3 for tags for security features conf.setInt(HFile.FORMAT_VERSION_KEY, 3); + configureSuperuser(conf); } public static void verifyConfiguration(Configuration conf) { @@ -145,6 +162,7 @@ public class SecureTestUtil { */ static interface AccessTestAction extends PrivilegedExceptionAction { } + /** This fails only in case of ADE or empty list for any of the actions. */ public static void verifyAllowed(User user, AccessTestAction... actions) throws Exception { for (AccessTestAction action : actions) { try { @@ -161,6 +179,7 @@ public class SecureTestUtil { } } + /** This fails only in case of ADE or empty list for any of the users. */ public static void verifyAllowed(AccessTestAction action, User... users) throws Exception { for (User user : users) { verifyAllowed(user, action); @@ -182,36 +201,53 @@ public class SecureTestUtil { } } - public static void verifyDeniedWithException(User user, AccessTestAction... actions) - throws Exception { - verifyDenied(user, true, actions); - } - - public static void verifyDeniedWithException(AccessTestAction action, User... users) - throws Exception { + /** This passes only in case of ADE for all users. */ + public static void verifyDenied(AccessTestAction action, User... users) throws Exception { for (User user : users) { - verifyDenied(user, true, action); + verifyDenied(user, action); } } - public static void verifyDenied(User user, AccessTestAction... actions) throws Exception { - verifyDenied(user, false, actions); - } - - public static void verifyDenied(User user, boolean requireException, - AccessTestAction... actions) throws Exception { - for (AccessTestAction action : actions) { + /** This passes only in case of empty list for all users. */ + public static void verifyIfEmptyList(AccessTestAction action, User... users) throws Exception { + for (User user : users) { try { Object obj = user.runAs(action); - if (requireException) { - fail("Expected exception was not thrown for user '" + user.getShortName() + "'"); - } if (obj != null && obj instanceof List) { List results = (List) obj; if (results != null && !results.isEmpty()) { - fail("Unexpected results for user '" + user.getShortName() + "'"); + fail("Unexpected action results: " + results + " for user '" + + user.getShortName() + "'"); } + } else { + fail("Unexpected results for user '" + user.getShortName() + "'"); } + } catch (AccessDeniedException ade) { + fail("Expected action to pass for user '" + user.getShortName() + "' but was denied"); + } + } + } + + /** This passes only in case of null for all users. */ + public static void verifyIfNull(AccessTestAction action, User... users) throws Exception { + for (User user : users) { + try { + Object obj = user.runAs(action); + if (obj != null) { + fail("Non null results from action for user '" + user.getShortName() + "'"); + } + } catch (AccessDeniedException ade) { + fail("Expected action to pass for user '" + user.getShortName() + "' but was denied"); + } + } + } + + /** This passes only in case of ADE for all actions. */ + public static void verifyDenied(User user, AccessTestAction... actions) throws Exception { + for (AccessTestAction action : actions) { + try { + user.runAs(action); + fail("Expected exception was not thrown for user '" + user.getShortName() + "'"); } catch (IOException e) { boolean isAccessDeniedException = false; if(e instanceof RetriesExhaustedWithDetailsException) { @@ -257,16 +293,10 @@ public class SecureTestUtil { } } - public static void verifyDenied(AccessTestAction action, User... users) throws Exception { - for (User user : users) { - verifyDenied(user, action); - } - } - private static List getAccessControllers(MiniHBaseCluster cluster) { List result = Lists.newArrayList(); for (RegionServerThread t: cluster.getLiveRegionServerThreads()) { - for (HRegion region: t.getRegionServer().getOnlineRegionsLocalContext()) { + for (Region region: t.getRegionServer().getOnlineRegionsLocalContext()) { Coprocessor cp = region.getCoprocessorHost() .findCoprocessor(AccessController.class.getName()); if (cp != null) { @@ -301,7 +331,7 @@ public class SecureTestUtil { for (Map.Entry e: mtimes.entrySet()) { if (!oldMTimes.containsKey(e.getKey())) { LOG.error("Snapshot of AccessController state does not include instance on region " + - e.getKey().getRegion().getRegionNameAsString()); + e.getKey().getRegion().getRegionInfo().getRegionNameAsString()); // Error out the predicate, we will try again return false; } @@ -309,8 +339,8 @@ public class SecureTestUtil { long now = e.getValue(); if (now <= old) { LOG.info("AccessController on region " + - e.getKey().getRegion().getRegionNameAsString() + " has not updated: mtime=" + - now); + e.getKey().getRegion().getRegionInfo().getRegionNameAsString() + + " has not updated: mtime=" + now); return false; } } @@ -394,13 +424,13 @@ public class SecureTestUtil { * or will throw an exception upon timeout (10 seconds). */ public static void grantOnNamespaceUsingAccessControlClient(final HBaseTestingUtility util, - final Configuration conf, final String user, final String namespace, + final Connection connection, final String user, final String namespace, final Permission.Action... actions) throws Exception { SecureTestUtil.updateACLs(util, new Callable() { @Override public Void call() throws Exception { try { - AccessControlClient.grant(conf, namespace, user, actions); + AccessControlClient.grant(connection, namespace, user, actions); } catch (Throwable t) { t.printStackTrace(); } @@ -415,13 +445,13 @@ public class SecureTestUtil { * or will throw an exception upon timeout (10 seconds). */ public static void revokeFromNamespaceUsingAccessControlClient(final HBaseTestingUtility util, - final Configuration conf, final String user, final String namespace, + final Connection connection, final String user, final String namespace, final Permission.Action... actions) throws Exception { SecureTestUtil.updateACLs(util, new Callable() { @Override public Void call() throws Exception { try { - AccessControlClient.revoke(conf, namespace, user, actions); + AccessControlClient.revoke(connection, namespace, user, actions); } catch (Throwable t) { t.printStackTrace(); } @@ -483,13 +513,13 @@ public class SecureTestUtil { * throw an exception upon timeout (10 seconds). */ public static void grantOnTableUsingAccessControlClient(final HBaseTestingUtility util, - final Configuration conf, final String user, final TableName table, final byte[] family, + final Connection connection, final String user, final TableName table, final byte[] family, final byte[] qualifier, final Permission.Action... actions) throws Exception { SecureTestUtil.updateACLs(util, new Callable() { @Override public Void call() throws Exception { try { - AccessControlClient.grant(conf, table, user, family, qualifier, actions); + AccessControlClient.grant(connection, table, user, family, qualifier, actions); } catch (Throwable t) { t.printStackTrace(); } @@ -504,13 +534,13 @@ public class SecureTestUtil { * throw an exception upon timeout (10 seconds). */ public static void grantGlobalUsingAccessControlClient(final HBaseTestingUtility util, - final Configuration conf, final String user, final Permission.Action... actions) + final Connection connection, final String user, final Permission.Action... actions) throws Exception { SecureTestUtil.updateACLs(util, new Callable() { @Override public Void call() throws Exception { try { - AccessControlClient.grant(conf, user, actions); + AccessControlClient.grant(connection, user, actions); } catch (Throwable t) { t.printStackTrace(); } @@ -549,13 +579,13 @@ public class SecureTestUtil { * throw an exception upon timeout (10 seconds). */ public static void revokeFromTableUsingAccessControlClient(final HBaseTestingUtility util, - final Configuration conf, final String user, final TableName table, final byte[] family, + final Connection connection, final String user, final TableName table, final byte[] family, final byte[] qualifier, final Permission.Action... actions) throws Exception { SecureTestUtil.updateACLs(util, new Callable() { @Override public Void call() throws Exception { try { - AccessControlClient.revoke(conf, table, user, family, qualifier, actions); + AccessControlClient.revoke(connection, table, user, family, qualifier, actions); } catch (Throwable t) { t.printStackTrace(); } @@ -570,13 +600,13 @@ public class SecureTestUtil { * throw an exception upon timeout (10 seconds). */ public static void revokeGlobalUsingAccessControlClient(final HBaseTestingUtility util, - final Configuration conf, final String user,final Permission.Action... actions) + final Connection connection, final String user,final Permission.Action... actions) throws Exception { SecureTestUtil.updateACLs(util, new Callable() { @Override public Void call() throws Exception { try { - AccessControlClient.revoke(conf, user, actions); + AccessControlClient.revoke(connection, user, actions); } catch (Throwable t) { t.printStackTrace(); } @@ -584,4 +614,168 @@ public class SecureTestUtil { } }); } + + public static class MasterSyncObserver extends BaseMasterObserver { + volatile CountDownLatch tableCreationLatch = null; + volatile CountDownLatch tableDeletionLatch = null; + + @Override + public void postCreateTableHandler( + final ObserverContext ctx, + HTableDescriptor desc, HRegionInfo[] regions) throws IOException { + // the AccessController test, some times calls only and directly the postCreateTableHandler() + if (tableCreationLatch != null) { + tableCreationLatch.countDown(); + } + } + + @Override + public void postDeleteTableHandler( + final ObserverContext ctx, TableName tableName) + throws IOException { + // the AccessController test, some times calls only and directly the postDeleteTableHandler() + if (tableDeletionLatch != null) { + tableDeletionLatch.countDown(); + } + } + } + + public static Table createTable(HBaseTestingUtility testUtil, TableName tableName, + byte[][] families) throws Exception { + HTableDescriptor htd = new HTableDescriptor(tableName); + for (byte[] family : families) { + HColumnDescriptor hcd = new HColumnDescriptor(family); + htd.addFamily(hcd); + } + createTable(testUtil, testUtil.getHBaseAdmin(), htd); + return testUtil.getConnection().getTable(htd.getTableName()); + } + + public static void createTable(HBaseTestingUtility testUtil, HTableDescriptor htd) + throws Exception { + createTable(testUtil, testUtil.getHBaseAdmin(), htd); + } + + public static void createTable(HBaseTestingUtility testUtil, HTableDescriptor htd, + byte[][] splitKeys) throws Exception { + createTable(testUtil, testUtil.getHBaseAdmin(), htd, splitKeys); + } + + public static void createTable(HBaseTestingUtility testUtil, Admin admin, HTableDescriptor htd) + throws Exception { + createTable(testUtil, admin, htd, null); + } + + public static void createTable(HBaseTestingUtility testUtil, Admin admin, HTableDescriptor htd, + byte[][] splitKeys) throws Exception { + // NOTE: We need a latch because admin is not sync, + // so the postOp coprocessor method may be called after the admin operation returned. + MasterSyncObserver observer = (MasterSyncObserver)testUtil.getHBaseCluster().getMaster() + .getMasterCoprocessorHost().findCoprocessor(MasterSyncObserver.class.getName()); + observer.tableCreationLatch = new CountDownLatch(1); + if (splitKeys != null) { + admin.createTable(htd, splitKeys); + } else { + admin.createTable(htd); + } + observer.tableCreationLatch.await(); + observer.tableCreationLatch = null; + testUtil.waitUntilAllRegionsAssigned(htd.getTableName()); + } + + public static void deleteTable(HBaseTestingUtility testUtil, TableName tableName) + throws Exception { + deleteTable(testUtil, testUtil.getHBaseAdmin(), tableName); + } + + public static void createNamespace(HBaseTestingUtility testUtil, NamespaceDescriptor nsDesc) + throws Exception { + testUtil.getHBaseAdmin().createNamespace(nsDesc); + } + + public static void deleteNamespace(HBaseTestingUtility testUtil, String namespace) + throws Exception { + testUtil.getHBaseAdmin().deleteNamespace(namespace); + } + + public static void deleteTable(HBaseTestingUtility testUtil, Admin admin, TableName tableName) + throws Exception { + // NOTE: We need a latch because admin is not sync, + // so the postOp coprocessor method may be called after the admin operation returned. + MasterSyncObserver observer = (MasterSyncObserver)testUtil.getHBaseCluster().getMaster() + .getMasterCoprocessorHost().findCoprocessor(MasterSyncObserver.class.getName()); + observer.tableDeletionLatch = new CountDownLatch(1); + try { + admin.disableTable(tableName); + } catch (TableNotEnabledException e) { + LOG.debug("Table: " + tableName + " already disabled, so just deleting it."); + } + admin.deleteTable(tableName); + observer.tableDeletionLatch.await(); + observer.tableDeletionLatch = null; + } + + public static String convertToNamespace(String namespace) { + return AccessControlLists.NAMESPACE_PREFIX + namespace; + } + + public static String convertToGroup(String group) { + return AccessControlLists.GROUP_PREFIX + group; + } + + public static void checkGlobalPerms(HBaseTestingUtility testUtil, Permission.Action... actions) + throws IOException { + Permission[] perms = new Permission[actions.length]; + for (int i = 0; i < actions.length; i++) { + perms[i] = new Permission(actions[i]); + } + CheckPermissionsRequest.Builder request = CheckPermissionsRequest.newBuilder(); + for (Action a : actions) { + request.addPermission(AccessControlProtos.Permission.newBuilder() + .setType(AccessControlProtos.Permission.Type.Global) + .setGlobalPermission( + AccessControlProtos.GlobalPermission.newBuilder() + .addAction(ProtobufUtil.toPermissionAction(a)).build())); + } + try(Connection conn = ConnectionFactory.createConnection(testUtil.getConfiguration()); + Table acl = conn.getTable(AccessControlLists.ACL_TABLE_NAME)) { + BlockingRpcChannel channel = acl.coprocessorService(new byte[0]); + AccessControlService.BlockingInterface protocol = + AccessControlService.newBlockingStub(channel); + try { + protocol.checkPermissions(null, request.build()); + } catch (ServiceException se) { + ProtobufUtil.toIOException(se); + } + } + } + + public static void checkTablePerms(HBaseTestingUtility testUtil, TableName table, byte[] family, + byte[] column, Permission.Action... actions) throws IOException { + Permission[] perms = new Permission[actions.length]; + for (int i = 0; i < actions.length; i++) { + perms[i] = new TablePermission(table, family, column, actions[i]); + } + checkTablePerms(testUtil, table, perms); + } + + public static void checkTablePerms(HBaseTestingUtility testUtil, TableName table, + Permission... perms) throws IOException { + CheckPermissionsRequest.Builder request = CheckPermissionsRequest.newBuilder(); + for (Permission p : perms) { + request.addPermission(ProtobufUtil.toPermission(p)); + } + + try(Connection conn = ConnectionFactory.createConnection(testUtil.getConfiguration()); + Table acl = conn.getTable(table)) { + AccessControlService.BlockingInterface protocol = + AccessControlService.newBlockingStub(acl.coprocessorService(new byte[0])); + try { + protocol.checkPermissions(null, request.build()); + } catch (ServiceException se) { + ProtobufUtil.toIOException(se); + } + } + } + } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessControlFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessControlFilter.java index d6a6f03f47c..5d58110f404 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessControlFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessControlFilter.java @@ -94,7 +94,7 @@ public class TestAccessControlFilter extends SecureTestUtil { @Test public void testQualifierAccess() throws Exception { - final Table table = TEST_UTIL.createTable(TABLE, FAMILY); + final Table table = createTable(TEST_UTIL, TABLE, new byte[][] { FAMILY }); try { doQualifierAccess(table); } finally { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java index 51436b42400..ff0a720a405 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java @@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.security.access; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -97,6 +96,7 @@ import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.CheckPermi import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost; import org.apache.hadoop.hbase.regionserver.RegionServerCoprocessorHost; import org.apache.hadoop.hbase.regionserver.ScanType; @@ -138,9 +138,15 @@ public class TestAccessController extends SecureTestUtil { } @Rule public TestTableName TEST_TABLE = new TestTableName(); - private static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); private static Configuration conf; + /** The systemUserConnection created here is tied to the system user. In case, you are planning + * to create AccessTestAction, DON'T use this systemUserConnection as the 'doAs' user + * gets eclipsed by the system user. */ + private static Connection systemUserConnection; + + // user with all permissions private static User SUPERUSER; // user granted with all global permission @@ -176,11 +182,6 @@ public class TestAccessController extends SecureTestUtil { public static void setupBeforeClass() throws Exception { // setup configuration conf = TEST_UTIL.getConfiguration(); - conf.set("hbase.master.hfilecleaner.plugins", - "org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner," + - "org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner"); - conf.set("hbase.master.logcleaner.plugins", - "org.apache.hadoop.hbase.master.snapshot.SnapshotLogCleaner"); // Enable security enableSecurity(conf); // In this particular test case, we can't use SecureBulkLoadEndpoint because its doAs will fail @@ -216,6 +217,8 @@ public class TestAccessController extends SecureTestUtil { USER_CREATE = User.createUserForTesting(conf, "tbl_create", new String[0]); USER_NONE = User.createUserForTesting(conf, "nouser", new String[0]); USER_ADMIN_CF = User.createUserForTesting(conf, "col_family_admin", new String[0]); + + systemUserConnection = TEST_UTIL.getConnection(); } @AfterClass @@ -232,10 +235,9 @@ public class TestAccessController extends SecureTestUtil { hcd.setMaxVersions(100); htd.addFamily(hcd); htd.setOwner(USER_OWNER); - admin.createTable(htd, new byte[][] { Bytes.toBytes("s") }); - TEST_UTIL.waitTableEnabled(TEST_TABLE.getTableName()); + createTable(TEST_UTIL, htd, new byte[][] { Bytes.toBytes("s") }); - HRegion region = TEST_UTIL.getHBaseCluster().getRegions(TEST_TABLE.getTableName()).get(0); + Region region = TEST_UTIL.getHBaseCluster().getRegions(TEST_TABLE.getTableName()).get(0); RegionCoprocessorHost rcpHost = region.getCoprocessorHost(); RCP_ENV = rcpHost.createEnvironment(AccessController.class, ACCESS_CONTROLLER, Coprocessor.PRIORITY_HIGHEST, 1, conf); @@ -270,7 +272,8 @@ public class TestAccessController extends SecureTestUtil { assertEquals(5, AccessControlLists.getTablePermissions(conf, TEST_TABLE.getTableName()).size()); try { - assertEquals(5, AccessControlClient.getUserPermissions(conf, TEST_TABLE.toString()).size()); + assertEquals(5, AccessControlClient.getUserPermissions(systemUserConnection, + TEST_TABLE.toString()).size()); } catch (Throwable e) { LOG.error("error during call of AccessControlClient.getUserPermissions. ", e); } @@ -280,14 +283,17 @@ public class TestAccessController extends SecureTestUtil { public void tearDown() throws Exception { // Clean the _acl_ table try { - TEST_UTIL.deleteTable(TEST_TABLE.getTableName()); + deleteTable(TEST_UTIL, TEST_TABLE.getTableName()); } catch (TableNotFoundException ex) { // Test deleted the table, no problem LOG.info("Test deleted table " + TEST_TABLE.getTableName()); } // Verify all table/namespace permissions are erased assertEquals(0, AccessControlLists.getTablePermissions(conf, TEST_TABLE.getTableName()).size()); - assertEquals(0, AccessControlLists.getNamespacePermissions(conf, TEST_TABLE.getTableName().getNameAsString()).size()); + assertEquals( + 0, + AccessControlLists.getNamespacePermissions(conf, + TEST_TABLE.getTableName().getNamespaceAsString()).size()); } @Test @@ -354,8 +360,8 @@ public class TestAccessController extends SecureTestUtil { } }; - verifyAllowed(truncateTable, SUPERUSER, USER_ADMIN, USER_CREATE); - verifyDenied(truncateTable, USER_RW, USER_RO, USER_NONE, USER_OWNER); + verifyAllowed(truncateTable, SUPERUSER, USER_ADMIN, USER_CREATE, USER_OWNER); + verifyDenied(truncateTable, USER_RW, USER_RO, USER_NONE); } @Test @@ -451,8 +457,7 @@ public class TestAccessController extends SecureTestUtil { @Test public void testMove() throws Exception { List regions; - try (RegionLocator locator = - TEST_UTIL.getConnection().getRegionLocator(TEST_TABLE.getTableName())) { + try (RegionLocator locator = systemUserConnection.getRegionLocator(TEST_TABLE.getTableName())) { regions = locator.getAllRegionLocations(); } HRegionLocation location = regions.get(0); @@ -474,8 +479,7 @@ public class TestAccessController extends SecureTestUtil { @Test public void testAssign() throws Exception { List regions; - try (RegionLocator locator = - TEST_UTIL.getConnection().getRegionLocator(TEST_TABLE.getTableName())) { + try (RegionLocator locator = systemUserConnection.getRegionLocator(TEST_TABLE.getTableName())) { regions = locator.getAllRegionLocations(); } HRegionLocation location = regions.get(0); @@ -495,8 +499,7 @@ public class TestAccessController extends SecureTestUtil { @Test public void testUnassign() throws Exception { List regions; - try (RegionLocator locator = - TEST_UTIL.getConnection().getRegionLocator(TEST_TABLE.getTableName())) { + try (RegionLocator locator = systemUserConnection.getRegionLocator(TEST_TABLE.getTableName())) { regions = locator.getAllRegionLocations(); } HRegionLocation location = regions.get(0); @@ -516,8 +519,7 @@ public class TestAccessController extends SecureTestUtil { @Test public void testRegionOffline() throws Exception { List regions; - try (RegionLocator locator = - TEST_UTIL.getConnection().getRegionLocator(TEST_TABLE.getTableName())) { + try (RegionLocator locator = systemUserConnection.getRegionLocator(TEST_TABLE.getTableName())) { regions = locator.getAllRegionLocations(); } HRegionLocation location = regions.get(0); @@ -627,8 +629,8 @@ public class TestAccessController extends SecureTestUtil { @Test public void testMergeRegions() throws Exception { - final List regions = TEST_UTIL.getHBaseCluster().findRegionsForTable(TEST_TABLE.getTableName()); + assertTrue("not enough regions: " + regions.size(), regions.size() >= 2); AccessTestAction action = new AccessTestAction() { @Override @@ -673,20 +675,6 @@ public class TestAccessController extends SecureTestUtil { verifyDenied(action, USER_RW, USER_RO, USER_NONE); } - @Test - public void testPreCompactSelection() throws Exception { - AccessTestAction action = new AccessTestAction() { - @Override - public Object run() throws Exception { - ACCESS_CONTROLLER.preCompactSelection(ObserverContext.createAndPrepare(RCP_ENV, null), null, null); - return null; - } - }; - - verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_OWNER); - verifyDenied(action, USER_CREATE, USER_RW, USER_RO, USER_NONE); - } - private void verifyRead(AccessTestAction action) throws Exception { verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_OWNER, USER_CREATE, USER_RW, USER_RO); verifyDenied(action, USER_NONE); @@ -705,11 +693,9 @@ public class TestAccessController extends SecureTestUtil { public Object run() throws Exception { Get g = new Get(TEST_ROW); g.addFamily(TEST_FAMILY); - Table t = TEST_UTIL.getConnection().getTable(TEST_TABLE.getTableName()); - try { + try(Connection conn = ConnectionFactory.createConnection(conf); + Table t = conn.getTable(TEST_TABLE.getTableName())) { t.get(g); - } finally { - t.close(); } return null; } @@ -722,9 +708,8 @@ public class TestAccessController extends SecureTestUtil { public Object run() throws Exception { Scan s = new Scan(); s.addFamily(TEST_FAMILY); - - Table table = TEST_UTIL.getConnection().getTable(TEST_TABLE.getTableName()); - try { + try(Connection conn = ConnectionFactory.createConnection(conf); + Table table = conn.getTable(TEST_TABLE.getTableName())) { ResultScanner scanner = table.getScanner(s); try { for (Result r = scanner.next(); r != null; r = scanner.next()) { @@ -734,8 +719,6 @@ public class TestAccessController extends SecureTestUtil { } finally { scanner.close(); } - } finally { - table.close(); } return null; } @@ -752,11 +735,9 @@ public class TestAccessController extends SecureTestUtil { public Object run() throws Exception { Put p = new Put(TEST_ROW); p.add(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes(1)); - Table t = TEST_UTIL.getConnection().getTable(TEST_TABLE.getTableName()); - try { + try(Connection conn = ConnectionFactory.createConnection(conf); + Table t = conn.getTable(TEST_TABLE.getTableName())) { t.put(p); - } finally { - t.close(); } return null; } @@ -769,11 +750,9 @@ public class TestAccessController extends SecureTestUtil { public Object run() throws Exception { Delete d = new Delete(TEST_ROW); d.deleteFamily(TEST_FAMILY); - Table t = TEST_UTIL.getConnection().getTable(TEST_TABLE.getTableName()); - try { + try(Connection conn = ConnectionFactory.createConnection(conf); + Table t = conn.getTable(TEST_TABLE.getTableName())) { t.delete(d); - } finally { - t.close(); } return null; } @@ -786,11 +765,9 @@ public class TestAccessController extends SecureTestUtil { public Object run() throws Exception { Increment inc = new Increment(TEST_ROW); inc.addColumn(TEST_FAMILY, TEST_QUALIFIER, 1); - Table t = TEST_UTIL.getConnection().getTable(TEST_TABLE.getTableName()); - try { + try(Connection conn = ConnectionFactory.createConnection(conf); + Table t = conn.getTable(TEST_TABLE.getTableName());) { t.increment(inc); - } finally { - t.close(); } return null; } @@ -806,12 +783,10 @@ public class TestAccessController extends SecureTestUtil { public Object run() throws Exception { Delete d = new Delete(TEST_ROW); d.deleteFamily(TEST_FAMILY); - Table t = TEST_UTIL.getConnection().getTable(TEST_TABLE.getTableName()); - try { + try(Connection conn = ConnectionFactory.createConnection(conf); + Table t = conn.getTable(TEST_TABLE.getTableName());) { t.checkAndDelete(TEST_ROW, TEST_FAMILY, TEST_QUALIFIER, - Bytes.toBytes("test_value"), d); - } finally { - t.close(); + Bytes.toBytes("test_value"), d); } return null; } @@ -824,12 +799,10 @@ public class TestAccessController extends SecureTestUtil { public Object run() throws Exception { Put p = new Put(TEST_ROW); p.add(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes(1)); - Table t = TEST_UTIL.getConnection().getTable(TEST_TABLE.getTableName()); - try { + try(Connection conn = ConnectionFactory.createConnection(conf); + Table t = conn.getTable(TEST_TABLE.getTableName());) { t.checkAndPut(TEST_ROW, TEST_FAMILY, TEST_QUALIFIER, - Bytes.toBytes("test_value"), p); - } finally { - t.close(); + Bytes.toBytes("test_value"), p); } return null; } @@ -926,8 +899,10 @@ public class TestAccessController extends SecureTestUtil { //set global read so RegionServer can move it setPermission(loadPath, FsPermission.valueOf("-rwxrwxrwx")); - try (HTable table = (HTable)TEST_UTIL.getConnection().getTable(tableName)) { - TEST_UTIL.waitTableEnabled(tableName); + + try (Connection conn = ConnectionFactory.createConnection(conf); + HTable table = (HTable)conn.getTable(tableName)) { + TEST_UTIL.waitUntilAllRegionsAssigned(tableName); LoadIncrementalHFiles loader = new LoadIncrementalHFiles(conf); loader.doBulkLoad(loadPath, table); } @@ -958,12 +933,10 @@ public class TestAccessController extends SecureTestUtil { put.add(TEST_FAMILY, qualifier, Bytes.toBytes(1)); Append append = new Append(row); append.add(TEST_FAMILY, qualifier, Bytes.toBytes(2)); - Table t = TEST_UTIL.getConnection().getTable(TEST_TABLE.getTableName()); - try { + try(Connection conn = ConnectionFactory.createConnection(conf); + Table t = conn.getTable(TEST_TABLE.getTableName())) { t.put(put); t.append(append); - } finally { - t.close(); } return null; } @@ -978,17 +951,13 @@ public class TestAccessController extends SecureTestUtil { AccessTestAction grantAction = new AccessTestAction() { @Override public Object run() throws Exception { - Connection connection = ConnectionFactory.createConnection(conf); - Table acl = connection.getTable(AccessControlLists.ACL_TABLE_NAME); - try { + try(Connection conn = ConnectionFactory.createConnection(conf); + Table acl = conn.getTable(AccessControlLists.ACL_TABLE_NAME)) { BlockingRpcChannel service = acl.coprocessorService(TEST_TABLE.getTableName().getName()); AccessControlService.BlockingInterface protocol = AccessControlService.newBlockingStub(service); ProtobufUtil.grant(protocol, USER_RO.getShortName(), TEST_TABLE.getTableName(), - TEST_FAMILY, null, Action.READ); - } finally { - acl.close(); - connection.close(); + TEST_FAMILY, null, Action.READ); } return null; } @@ -997,17 +966,13 @@ public class TestAccessController extends SecureTestUtil { AccessTestAction revokeAction = new AccessTestAction() { @Override public Object run() throws Exception { - Connection connection = ConnectionFactory.createConnection(conf); - Table acl = connection.getTable(AccessControlLists.ACL_TABLE_NAME); - try { + try(Connection conn = ConnectionFactory.createConnection(conf); + Table acl = conn.getTable(AccessControlLists.ACL_TABLE_NAME)) { BlockingRpcChannel service = acl.coprocessorService(TEST_TABLE.getTableName().getName()); AccessControlService.BlockingInterface protocol = AccessControlService.newBlockingStub(service); ProtobufUtil.revoke(protocol, USER_RO.getShortName(), TEST_TABLE.getTableName(), - TEST_FAMILY, null, Action.READ); - } finally { - acl.close(); - connection.close(); + TEST_FAMILY, null, Action.READ); } return null; } @@ -1016,16 +981,12 @@ public class TestAccessController extends SecureTestUtil { AccessTestAction getTablePermissionsAction = new AccessTestAction() { @Override public Object run() throws Exception { - Connection connection = ConnectionFactory.createConnection(conf); - Table acl = connection.getTable(AccessControlLists.ACL_TABLE_NAME); - try { + try(Connection conn = ConnectionFactory.createConnection(conf); + Table acl = conn.getTable(AccessControlLists.ACL_TABLE_NAME)){ BlockingRpcChannel service = acl.coprocessorService(TEST_TABLE.getTableName().getName()); AccessControlService.BlockingInterface protocol = AccessControlService.newBlockingStub(service); ProtobufUtil.getUserPermissions(protocol, TEST_TABLE.getTableName()); - } finally { - acl.close(); - connection.close(); } return null; } @@ -1034,16 +995,12 @@ public class TestAccessController extends SecureTestUtil { AccessTestAction getGlobalPermissionsAction = new AccessTestAction() { @Override public Object run() throws Exception { - Connection connection = ConnectionFactory.createConnection(conf); - Table acl = connection.getTable(AccessControlLists.ACL_TABLE_NAME); - try { + try(Connection conn = ConnectionFactory.createConnection(conf); + Table acl = conn.getTable(AccessControlLists.ACL_TABLE_NAME);) { BlockingRpcChannel service = acl.coprocessorService(HConstants.EMPTY_START_ROW); AccessControlService.BlockingInterface protocol = AccessControlService.newBlockingStub(service); ProtobufUtil.getUserPermissions(protocol); - } finally { - acl.close(); - connection.close(); } return null; } @@ -1059,7 +1016,7 @@ public class TestAccessController extends SecureTestUtil { verifyDenied(getTablePermissionsAction, USER_CREATE, USER_RW, USER_RO, USER_NONE); verifyAllowed(getGlobalPermissionsAction, SUPERUSER, USER_ADMIN); - verifyDeniedWithException(getGlobalPermissionsAction, USER_CREATE, + verifyDenied(getGlobalPermissionsAction, USER_CREATE, USER_OWNER, USER_RW, USER_RO, USER_NONE); } @@ -1074,14 +1031,12 @@ public class TestAccessController extends SecureTestUtil { // create table Admin admin = TEST_UTIL.getHBaseAdmin(); if (admin.tableExists(tableName)) { - admin.disableTable(tableName); - admin.deleteTable(tableName); + deleteTable(TEST_UTIL, tableName); } HTableDescriptor htd = new HTableDescriptor(tableName); htd.addFamily(new HColumnDescriptor(family1)); htd.addFamily(new HColumnDescriptor(family2)); - admin.createTable(htd); - TEST_UTIL.waitUntilAllRegionsAssigned(tableName); + createTable(TEST_UTIL, htd); // create temp users User tblUser = User @@ -1096,11 +1051,10 @@ public class TestAccessController extends SecureTestUtil { Put p = new Put(Bytes.toBytes("a")); p.add(family1, qualifier, Bytes.toBytes("v1")); p.add(family2, qualifier, Bytes.toBytes("v2")); - Table t = TEST_UTIL.getConnection().getTable(tableName); - try { + + try(Connection conn = ConnectionFactory.createConnection(conf); + Table t = conn.getTable(tableName);) { t.put(p); - } finally { - t.close(); } return null; } @@ -1111,11 +1065,10 @@ public class TestAccessController extends SecureTestUtil { public Object run() throws Exception { Put p = new Put(Bytes.toBytes("a")); p.add(family1, qualifier, Bytes.toBytes("v1")); - Table t = TEST_UTIL.getConnection().getTable(tableName); - try { + + try(Connection conn = ConnectionFactory.createConnection(conf); + Table t = conn.getTable(tableName)) { t.put(p); - } finally { - t.close(); } return null; } @@ -1126,11 +1079,9 @@ public class TestAccessController extends SecureTestUtil { public Object run() throws Exception { Put p = new Put(Bytes.toBytes("a")); p.add(family2, qualifier, Bytes.toBytes("v2")); - Table t = TEST_UTIL.getConnection().getTable(tableName); - try { + try(Connection conn = ConnectionFactory.createConnection(conf); + Table t = conn.getTable(tableName);) { t.put(p); - } finally { - t.close(); } return null; } @@ -1142,11 +1093,9 @@ public class TestAccessController extends SecureTestUtil { Get g = new Get(TEST_ROW); g.addFamily(family1); g.addFamily(family2); - Table t = TEST_UTIL.getConnection().getTable(tableName); - try { + try(Connection conn = ConnectionFactory.createConnection(conf); + Table t = conn.getTable(tableName);) { t.get(g); - } finally { - t.close(); } return null; } @@ -1157,11 +1106,9 @@ public class TestAccessController extends SecureTestUtil { public Object run() throws Exception { Get g = new Get(TEST_ROW); g.addFamily(family1); - Table t = TEST_UTIL.getConnection().getTable(tableName); - try { + try(Connection conn = ConnectionFactory.createConnection(conf); + Table t = conn.getTable(tableName)) { t.get(g); - } finally { - t.close(); } return null; } @@ -1172,11 +1119,9 @@ public class TestAccessController extends SecureTestUtil { public Object run() throws Exception { Get g = new Get(TEST_ROW); g.addFamily(family2); - Table t = TEST_UTIL.getConnection().getTable(tableName); - try { + try(Connection conn = ConnectionFactory.createConnection(conf); + Table t = conn.getTable(tableName)) { t.get(g); - } finally { - t.close(); } return null; } @@ -1188,11 +1133,9 @@ public class TestAccessController extends SecureTestUtil { Delete d = new Delete(TEST_ROW); d.deleteFamily(family1); d.deleteFamily(family2); - Table t = TEST_UTIL.getConnection().getTable(tableName); - try { + try(Connection conn = ConnectionFactory.createConnection(conf); + Table t = conn.getTable(tableName)) { t.delete(d); - } finally { - t.close(); } return null; } @@ -1203,11 +1146,9 @@ public class TestAccessController extends SecureTestUtil { public Object run() throws Exception { Delete d = new Delete(TEST_ROW); d.deleteFamily(family1); - Table t = TEST_UTIL.getConnection().getTable(tableName); - try { + try(Connection conn = ConnectionFactory.createConnection(conf); + Table t = conn.getTable(tableName)) { t.delete(d); - } finally { - t.close(); } return null; } @@ -1218,11 +1159,9 @@ public class TestAccessController extends SecureTestUtil { public Object run() throws Exception { Delete d = new Delete(TEST_ROW); d.deleteFamily(family2); - Table t = TEST_UTIL.getConnection().getTable(tableName); - try { + try(Connection conn = ConnectionFactory.createConnection(conf); + Table t = conn.getTable(tableName)) { t.delete(d); - } finally { - t.close(); } return null; } @@ -1330,8 +1269,7 @@ public class TestAccessController extends SecureTestUtil { verifyDenied(gblUser, deleteActionAll, deleteAction1, deleteAction2); // delete table - admin.disableTable(tableName); - admin.deleteTable(tableName); + deleteTable(TEST_UTIL, tableName); } private boolean hasFoundUserPermission(UserPermission userPermission, List perms) { @@ -1349,14 +1287,12 @@ public class TestAccessController extends SecureTestUtil { // create table Admin admin = TEST_UTIL.getHBaseAdmin(); if (admin.tableExists(tableName)) { - admin.disableTable(tableName); - admin.deleteTable(tableName); + deleteTable(TEST_UTIL, tableName); } HTableDescriptor htd = new HTableDescriptor(tableName); htd.addFamily(new HColumnDescriptor(family1)); htd.addFamily(new HColumnDescriptor(family2)); - admin.createTable(htd); - TEST_UTIL.waitUntilAllRegionsAssigned(tableName); + createTable(TEST_UTIL, htd); // create temp users User user = User.createUserForTesting(TEST_UTIL.getConfiguration(), "user", new String[0]); @@ -1366,11 +1302,9 @@ public class TestAccessController extends SecureTestUtil { public Object run() throws Exception { Get g = new Get(TEST_ROW); g.addColumn(family1, qualifier); - Table t = TEST_UTIL.getConnection().getTable(tableName); - try { + try(Connection conn = ConnectionFactory.createConnection(conf); + Table t = conn.getTable(tableName)) { t.get(g); - } finally { - t.close(); } return null; } @@ -1381,11 +1315,9 @@ public class TestAccessController extends SecureTestUtil { public Object run() throws Exception { Put p = new Put(TEST_ROW); p.add(family1, qualifier, Bytes.toBytes("v1")); - Table t = TEST_UTIL.getConnection().getTable(tableName); - try { + try(Connection conn = ConnectionFactory.createConnection(conf); + Table t = conn.getTable(tableName)) { t.put(p); - } finally { - t.close(); } return null; } @@ -1397,11 +1329,9 @@ public class TestAccessController extends SecureTestUtil { Delete d = new Delete(TEST_ROW); d.deleteColumn(family1, qualifier); // d.deleteFamily(family1); - Table t = TEST_UTIL.getConnection().getTable(tableName); - try { + try(Connection conn = ConnectionFactory.createConnection(conf); + Table t = conn.getTable(tableName)) { t.delete(d); - } finally { - t.close(); } return null; } @@ -1449,8 +1379,7 @@ public class TestAccessController extends SecureTestUtil { verifyDenied(user, deleteQualifierAction); // delete table - admin.disableTable(tableName); - admin.deleteTable(tableName); + deleteTable(TEST_UTIL, tableName); } @Test @@ -1464,19 +1393,16 @@ public class TestAccessController extends SecureTestUtil { // create table Admin admin = TEST_UTIL.getHBaseAdmin(); if (admin.tableExists(tableName)) { - admin.disableTable(tableName); - admin.deleteTable(tableName); + deleteTable(TEST_UTIL, tableName); } HTableDescriptor htd = new HTableDescriptor(tableName); htd.addFamily(new HColumnDescriptor(family1)); htd.addFamily(new HColumnDescriptor(family2)); htd.setOwner(USER_OWNER); - admin.createTable(htd); - TEST_UTIL.waitUntilAllRegionsAssigned(tableName); + createTable(TEST_UTIL, htd); List perms; - - Table acl = TEST_UTIL.getConnection().getTable(AccessControlLists.ACL_TABLE_NAME); + Table acl = systemUserConnection.getTable(AccessControlLists.ACL_TABLE_NAME); try { BlockingRpcChannel service = acl.coprocessorService(tableName.getName()); AccessControlService.BlockingInterface protocol = @@ -1489,7 +1415,7 @@ public class TestAccessController extends SecureTestUtil { UserPermission ownerperm = new UserPermission( Bytes.toBytes(USER_OWNER.getName()), tableName, null, Action.values()); assertTrue("Owner should have all permissions on table", - hasFoundUserPermission(ownerperm, perms)); + hasFoundUserPermission(ownerperm, perms)); User user = User.createUserForTesting(TEST_UTIL.getConfiguration(), "user", new String[0]); byte[] userName = Bytes.toBytes(user.getShortName()); @@ -1497,13 +1423,13 @@ public class TestAccessController extends SecureTestUtil { UserPermission up = new UserPermission(userName, tableName, family1, qualifier, Permission.Action.READ); assertFalse("User should not be granted permission: " + up.toString(), - hasFoundUserPermission(up, perms)); + hasFoundUserPermission(up, perms)); // grant read permission grantOnTable(TEST_UTIL, user.getShortName(), - tableName, family1, qualifier, Permission.Action.READ); + tableName, family1, qualifier, Permission.Action.READ); - acl = TEST_UTIL.getConnection().getTable(AccessControlLists.ACL_TABLE_NAME); + acl = systemUserConnection.getTable(AccessControlLists.ACL_TABLE_NAME); try { BlockingRpcChannel service = acl.coprocessorService(tableName.getName()); AccessControlService.BlockingInterface protocol = @@ -1516,19 +1442,19 @@ public class TestAccessController extends SecureTestUtil { UserPermission upToVerify = new UserPermission( userName, tableName, family1, qualifier, Permission.Action.READ); assertTrue("User should be granted permission: " + upToVerify.toString(), - hasFoundUserPermission(upToVerify, perms)); + hasFoundUserPermission(upToVerify, perms)); upToVerify = new UserPermission( userName, tableName, family1, qualifier, Permission.Action.WRITE); assertFalse("User should not be granted permission: " + upToVerify.toString(), - hasFoundUserPermission(upToVerify, perms)); + hasFoundUserPermission(upToVerify, perms)); // grant read+write grantOnTable(TEST_UTIL, user.getShortName(), tableName, family1, qualifier, Permission.Action.WRITE, Permission.Action.READ); - acl = TEST_UTIL.getConnection().getTable(AccessControlLists.ACL_TABLE_NAME); + acl = systemUserConnection.getTable(AccessControlLists.ACL_TABLE_NAME); try { BlockingRpcChannel service = acl.coprocessorService(tableName.getName()); AccessControlService.BlockingInterface protocol = @@ -1541,13 +1467,13 @@ public class TestAccessController extends SecureTestUtil { upToVerify = new UserPermission(userName, tableName, family1, qualifier, Permission.Action.WRITE, Permission.Action.READ); assertTrue("User should be granted permission: " + upToVerify.toString(), - hasFoundUserPermission(upToVerify, perms)); + hasFoundUserPermission(upToVerify, perms)); // revoke revokeFromTable(TEST_UTIL, user.getShortName(), tableName, family1, qualifier, - Permission.Action.WRITE, Permission.Action.READ); + Permission.Action.WRITE, Permission.Action.READ); - acl = TEST_UTIL.getConnection().getTable(AccessControlLists.ACL_TABLE_NAME); + acl = systemUserConnection.getTable(AccessControlLists.ACL_TABLE_NAME); try { BlockingRpcChannel service = acl.coprocessorService(tableName.getName()); AccessControlService.BlockingInterface protocol = @@ -1558,7 +1484,7 @@ public class TestAccessController extends SecureTestUtil { } assertFalse("User should not be granted permission: " + upToVerify.toString(), - hasFoundUserPermission(upToVerify, perms)); + hasFoundUserPermission(upToVerify, perms)); // disable table before modification admin.disableTable(tableName); @@ -1567,7 +1493,7 @@ public class TestAccessController extends SecureTestUtil { htd.setOwner(newOwner); admin.modifyTable(tableName, htd); - acl = TEST_UTIL.getConnection().getTable(AccessControlLists.ACL_TABLE_NAME); + acl = systemUserConnection.getTable(AccessControlLists.ACL_TABLE_NAME); try { BlockingRpcChannel service = acl.coprocessorService(tableName.getName()); AccessControlService.BlockingInterface protocol = @@ -1580,16 +1506,16 @@ public class TestAccessController extends SecureTestUtil { UserPermission newOwnerperm = new UserPermission( Bytes.toBytes(newOwner.getName()), tableName, null, Action.values()); assertTrue("New owner should have all permissions on table", - hasFoundUserPermission(newOwnerperm, perms)); + hasFoundUserPermission(newOwnerperm, perms)); // delete table - admin.deleteTable(tableName); + deleteTable(TEST_UTIL, tableName); } @Test public void testGlobalPermissionList() throws Exception { List perms; - Table acl = TEST_UTIL.getConnection().getTable(AccessControlLists.ACL_TABLE_NAME); + Table acl = systemUserConnection.getTable(AccessControlLists.ACL_TABLE_NAME); try { BlockingRpcChannel service = acl.coprocessorService(HConstants.EMPTY_START_ROW); AccessControlService.BlockingInterface protocol = @@ -1611,63 +1537,6 @@ public class TestAccessController extends SecureTestUtil { verifyDenied(action, USER_CREATE, USER_RW, USER_NONE, USER_RO); } - public void checkGlobalPerms(Permission.Action... actions) throws IOException { - Permission[] perms = new Permission[actions.length]; - for (int i = 0; i < actions.length; i++) { - perms[i] = new Permission(actions[i]); - } - CheckPermissionsRequest.Builder request = CheckPermissionsRequest.newBuilder(); - for (Action a : actions) { - request.addPermission(AccessControlProtos.Permission.newBuilder() - .setType(AccessControlProtos.Permission.Type.Global) - .setGlobalPermission( - AccessControlProtos.GlobalPermission.newBuilder() - .addAction(ProtobufUtil.toPermissionAction(a)).build())); - } - Table acl = TEST_UTIL.getConnection().getTable(AccessControlLists.ACL_TABLE_NAME); - try { - BlockingRpcChannel channel = acl.coprocessorService(new byte[0]); - AccessControlService.BlockingInterface protocol = - AccessControlService.newBlockingStub(channel); - try { - protocol.checkPermissions(null, request.build()); - } catch (ServiceException se) { - ProtobufUtil.toIOException(se); - } - } finally { - acl.close(); - } - } - - public void checkTablePerms(TableName table, byte[] family, byte[] column, - Permission.Action... actions) throws IOException { - Permission[] perms = new Permission[actions.length]; - for (int i = 0; i < actions.length; i++) { - perms[i] = new TablePermission(table, family, column, actions[i]); - } - - checkTablePerms(table, perms); - } - - public void checkTablePerms(TableName table, Permission... perms) throws IOException { - CheckPermissionsRequest.Builder request = CheckPermissionsRequest.newBuilder(); - for (Permission p : perms) { - request.addPermission(ProtobufUtil.toPermission(p)); - } - Table acl = TEST_UTIL.getConnection().getTable(table); - try { - AccessControlService.BlockingInterface protocol = - AccessControlService.newBlockingStub(acl.coprocessorService(new byte[0])); - try { - protocol.checkPermissions(null, request.build()); - } catch (ServiceException se) { - ProtobufUtil.toIOException(se); - } - } finally { - acl.close(); - } - } - @Test public void testCheckPermissions() throws Exception { // -------------------------------------- @@ -1675,7 +1544,7 @@ public class TestAccessController extends SecureTestUtil { AccessTestAction globalAdmin = new AccessTestAction() { @Override public Void run() throws Exception { - checkGlobalPerms(Permission.Action.ADMIN); + checkGlobalPerms(TEST_UTIL, Permission.Action.ADMIN); return null; } }; @@ -1687,7 +1556,7 @@ public class TestAccessController extends SecureTestUtil { AccessTestAction globalReadWrite = new AccessTestAction() { @Override public Void run() throws Exception { - checkGlobalPerms(Permission.Action.READ, Permission.Action.WRITE); + checkGlobalPerms(TEST_UTIL, Permission.Action.READ, Permission.Action.WRITE); return null; } }; @@ -1716,7 +1585,8 @@ public class TestAccessController extends SecureTestUtil { AccessTestAction tableRead = new AccessTestAction() { @Override public Void run() throws Exception { - checkTablePerms(TEST_TABLE.getTableName(), null, null, Permission.Action.READ); + checkTablePerms(TEST_UTIL, TEST_TABLE.getTableName(), null, null, + Permission.Action.READ); return null; } }; @@ -1724,7 +1594,8 @@ public class TestAccessController extends SecureTestUtil { AccessTestAction columnRead = new AccessTestAction() { @Override public Void run() throws Exception { - checkTablePerms(TEST_TABLE.getTableName(), TEST_FAMILY, null, Permission.Action.READ); + checkTablePerms(TEST_UTIL, TEST_TABLE.getTableName(), TEST_FAMILY, null, + Permission.Action.READ); return null; } }; @@ -1732,7 +1603,8 @@ public class TestAccessController extends SecureTestUtil { AccessTestAction qualifierRead = new AccessTestAction() { @Override public Void run() throws Exception { - checkTablePerms(TEST_TABLE.getTableName(), TEST_FAMILY, TEST_Q1, Permission.Action.READ); + checkTablePerms(TEST_UTIL, TEST_TABLE.getTableName(), TEST_FAMILY, TEST_Q1, + Permission.Action.READ); return null; } }; @@ -1740,9 +1612,11 @@ public class TestAccessController extends SecureTestUtil { AccessTestAction multiQualifierRead = new AccessTestAction() { @Override public Void run() throws Exception { - checkTablePerms(TEST_TABLE.getTableName(), new Permission[] { - new TablePermission(TEST_TABLE.getTableName(), TEST_FAMILY, TEST_Q1, Permission.Action.READ), - new TablePermission(TEST_TABLE.getTableName(), TEST_FAMILY, TEST_Q2, Permission.Action.READ), }); + checkTablePerms(TEST_UTIL, TEST_TABLE.getTableName(), new Permission[] { + new TablePermission(TEST_TABLE.getTableName(), TEST_FAMILY, TEST_Q1, + Permission.Action.READ), + new TablePermission(TEST_TABLE.getTableName(), TEST_FAMILY, TEST_Q2, + Permission.Action.READ), }); return null; } }; @@ -1750,8 +1624,10 @@ public class TestAccessController extends SecureTestUtil { AccessTestAction globalAndTableRead = new AccessTestAction() { @Override public Void run() throws Exception { - checkTablePerms(TEST_TABLE.getTableName(), new Permission[] { new Permission(Permission.Action.READ), - new TablePermission(TEST_TABLE.getTableName(), null, (byte[]) null, Permission.Action.READ), }); + checkTablePerms(TEST_UTIL, TEST_TABLE.getTableName(), + new Permission[] { new Permission(Permission.Action.READ), + new TablePermission(TEST_TABLE.getTableName(), null, (byte[]) null, + Permission.Action.READ), }); return null; } }; @@ -1759,7 +1635,7 @@ public class TestAccessController extends SecureTestUtil { AccessTestAction noCheck = new AccessTestAction() { @Override public Void run() throws Exception { - checkTablePerms(TEST_TABLE.getTableName(), new Permission[0]); + checkTablePerms(TEST_UTIL, TEST_TABLE.getTableName(), new Permission[0]); return null; } }; @@ -1785,8 +1661,8 @@ public class TestAccessController extends SecureTestUtil { AccessTestAction familyReadWrite = new AccessTestAction() { @Override public Void run() throws Exception { - checkTablePerms(TEST_TABLE.getTableName(), TEST_FAMILY, null, Permission.Action.READ, - Permission.Action.WRITE); + checkTablePerms(TEST_UTIL, TEST_TABLE.getTableName(), TEST_FAMILY, null, + Permission.Action.READ, Permission.Action.WRITE); return null; } }; @@ -1804,7 +1680,7 @@ public class TestAccessController extends SecureTestUtil { .setTableName(ProtobufUtil.toProtoTableName(TEST_TABLE.getTableName())) .addAction(AccessControlProtos.Permission.Action.CREATE)) ).build(); - Table acl = TEST_UTIL.getConnection().getTable(AccessControlLists.ACL_TABLE_NAME); + Table acl = systemUserConnection.getTable(AccessControlLists.ACL_TABLE_NAME); try { BlockingRpcChannel channel = acl.coprocessorService(new byte[0]); AccessControlService.BlockingInterface protocol = @@ -1947,7 +1823,7 @@ public class TestAccessController extends SecureTestUtil { @Override public Object run() throws Exception { ACCESS_CONTROLLER.preSnapshot(ObserverContext.createAndPrepare(CP_ENV, null), - snapshot, htd); + snapshot, htd); return null; } }; @@ -2007,8 +1883,7 @@ public class TestAccessController extends SecureTestUtil { final Admin admin = TEST_UTIL.getHBaseAdmin(); HTableDescriptor htd = new HTableDescriptor(TEST_TABLE2); htd.addFamily(new HColumnDescriptor(TEST_FAMILY)); - admin.createTable(htd); - TEST_UTIL.waitUntilAllRegionsAssigned(TEST_TABLE2); + createTable(TEST_UTIL, htd); // Starting a new RegionServer. JVMClusterUtil.RegionServerThread newRsThread = hbaseCluster @@ -2017,13 +1892,13 @@ public class TestAccessController extends SecureTestUtil { // Move region to the new RegionServer. List regions; - try (RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(TEST_TABLE2)) { + try (RegionLocator locator = systemUserConnection.getRegionLocator(TEST_TABLE2)) { regions = locator.getAllRegionLocations(); } HRegionLocation location = regions.get(0); final HRegionInfo hri = location.getRegionInfo(); final ServerName server = location.getServerName(); - try (HTable table = (HTable)TEST_UTIL.getConnection().getTable(TEST_TABLE2)) { + try (HTable table = (HTable) systemUserConnection.getTable(TEST_TABLE2)) { AccessTestAction moveAction = new AccessTestAction() { @Override public Object run() throws Exception { @@ -2075,37 +1950,25 @@ public class TestAccessController extends SecureTestUtil { AccessTestAction listTablesAction = new AccessTestAction() { @Override public Object run() throws Exception { - Connection unmanagedConnection = - ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); - Admin admin = unmanagedConnection.getAdmin(); - try { - admin.listTables(); - } finally { - admin.close(); - unmanagedConnection.close(); + try(Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); + Admin admin = conn.getAdmin()) { + return Arrays.asList(admin.listTables()); } - return null; } }; AccessTestAction getTableDescAction = new AccessTestAction() { @Override public Object run() throws Exception { - Connection unmanagedConnection = - ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); - Admin admin = unmanagedConnection.getAdmin(); - try { - admin.getTableDescriptor(TEST_TABLE.getTableName()); - } finally { - admin.close(); - unmanagedConnection.close(); + try(Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); + Admin admin = conn.getAdmin();) { + return admin.getTableDescriptor(TEST_TABLE.getTableName()); } - return null; } }; verifyAllowed(listTablesAction, SUPERUSER, USER_ADMIN, USER_CREATE, TABLE_ADMIN); - verifyDenied(listTablesAction, USER_RW, USER_RO, USER_NONE); + verifyIfEmptyList(listTablesAction, USER_RW, USER_RO, USER_NONE); verifyAllowed(getTableDescAction, SUPERUSER, USER_ADMIN, USER_CREATE, TABLE_ADMIN); verifyDenied(getTableDescAction, USER_RW, USER_RO, USER_NONE); @@ -2129,7 +1992,7 @@ public class TestAccessController extends SecureTestUtil { }; verifyAllowed(listTablesAction, SUPERUSER, USER_ADMIN, USER_CREATE, USER_RW, USER_RO); - verifyDenied(listTablesAction, USER_NONE); + verifyIfEmptyList(listTablesAction, USER_NONE); } @Test @@ -2147,8 +2010,7 @@ public class TestAccessController extends SecureTestUtil { Connection unmanagedConnection = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); Admin admin = unmanagedConnection.getAdmin(); try { - admin.disableTable(TEST_TABLE.getTableName()); - admin.deleteTable(TEST_TABLE.getTableName()); + deleteTable(TEST_UTIL, admin, TEST_TABLE.getTableName()); } finally { admin.close(); unmanagedConnection.close(); @@ -2166,24 +2028,23 @@ public class TestAccessController extends SecureTestUtil { AccessTestAction getAction = new AccessTestAction() { @Override public Object run() throws Exception { - Table t = TEST_UTIL.getConnection().getTable(TEST_TABLE.getTableName()); - try { + try(Connection conn = ConnectionFactory.createConnection(conf); + Table t = conn.getTable(TEST_TABLE.getTableName());) { return t.get(new Get(TEST_ROW)); - } finally { - t.close(); } } }; - verifyDenied(getAction, USER_NONE); + String namespace = TEST_TABLE.getTableName().getNamespaceAsString(); // Grant namespace READ to USER_NONE, this should supersede any table permissions - grantOnNamespace(TEST_UTIL, USER_NONE.getShortName(), - TEST_TABLE.getTableName().getNamespaceAsString(), - Permission.Action.READ); - - // Now USER_NONE should be able to read also + grantOnNamespace(TEST_UTIL, USER_NONE.getShortName(), namespace, Permission.Action.READ); + // Now USER_NONE should be able to read verifyAllowed(getAction, USER_NONE); + + // Revoke namespace READ to USER_NONE + revokeFromNamespace(TEST_UTIL, USER_NONE.getShortName(), namespace, Permission.Action.READ); + verifyDenied(getAction, USER_NONE); } @Test @@ -2193,11 +2054,9 @@ public class TestAccessController extends SecureTestUtil { AccessTestAction getAction = new AccessTestAction() { @Override public Object run() throws Exception { - Table t = TEST_UTIL.getConnection().getTable(TEST_TABLE.getTableName()); - try { + try(Connection conn = ConnectionFactory.createConnection(conf); + Table t = conn.getTable(TEST_TABLE.getTableName());) { return t.get(new Get(TEST_ROW)); - } finally { - t.close(); } } }; @@ -2206,7 +2065,7 @@ public class TestAccessController extends SecureTestUtil { // Grant table READ permissions to testGrantRevoke. try { - grantOnTableUsingAccessControlClient(TEST_UTIL, conf, testGrantRevoke.getShortName(), + grantOnTableUsingAccessControlClient(TEST_UTIL, systemUserConnection, testGrantRevoke.getShortName(), TEST_TABLE.getTableName(), null, null, Permission.Action.READ); } catch (Throwable e) { LOG.error("error during call of AccessControlClient.grant. ", e); @@ -2217,7 +2076,7 @@ public class TestAccessController extends SecureTestUtil { // Revoke table READ permission to testGrantRevoke. try { - revokeFromTableUsingAccessControlClient(TEST_UTIL, conf, testGrantRevoke.getShortName(), + revokeFromTableUsingAccessControlClient(TEST_UTIL, systemUserConnection, testGrantRevoke.getShortName(), TEST_TABLE.getTableName(), null, null, Permission.Action.READ); } catch (Throwable e) { LOG.error("error during call of AccessControlClient.revoke ", e); @@ -2235,11 +2094,9 @@ public class TestAccessController extends SecureTestUtil { AccessTestAction getAction = new AccessTestAction() { @Override public Object run() throws Exception { - Table t = TEST_UTIL.getConnection().getTable(TEST_TABLE.getTableName()); - try { + try(Connection conn = ConnectionFactory.createConnection(conf); + Table t = conn.getTable(TEST_TABLE.getTableName())) { return t.get(new Get(TEST_ROW)); - } finally { - t.close(); } } }; @@ -2248,8 +2105,8 @@ public class TestAccessController extends SecureTestUtil { // Grant table READ permissions to testGlobalGrantRevoke. try { - grantGlobalUsingAccessControlClient(TEST_UTIL, conf, testGlobalGrantRevoke.getShortName(), - Permission.Action.READ); + grantGlobalUsingAccessControlClient(TEST_UTIL, systemUserConnection, + testGlobalGrantRevoke.getShortName(), Permission.Action.READ); } catch (Throwable e) { LOG.error("error during call of AccessControlClient.grant. ", e); } @@ -2259,8 +2116,8 @@ public class TestAccessController extends SecureTestUtil { // Revoke table READ permission to testGlobalGrantRevoke. try { - revokeGlobalUsingAccessControlClient(TEST_UTIL, conf, testGlobalGrantRevoke.getShortName(), - Permission.Action.READ); + revokeGlobalUsingAccessControlClient(TEST_UTIL, systemUserConnection, + testGlobalGrantRevoke.getShortName(), Permission.Action.READ); } catch (Throwable e) { LOG.error("error during call of AccessControlClient.revoke ", e); } @@ -2276,11 +2133,9 @@ public class TestAccessController extends SecureTestUtil { AccessTestAction getAction = new AccessTestAction() { @Override public Object run() throws Exception { - Table t = TEST_UTIL.getConnection().getTable(TEST_TABLE.getTableName()); - try { + try(Connection conn = ConnectionFactory.createConnection(conf); + Table t = conn.getTable(TEST_TABLE.getTableName());) { return t.get(new Get(TEST_ROW)); - } finally { - t.close(); } } }; @@ -2289,7 +2144,7 @@ public class TestAccessController extends SecureTestUtil { // Grant namespace READ to testNS, this should supersede any table permissions try { - grantOnNamespaceUsingAccessControlClient(TEST_UTIL, conf, testNS.getShortName(), + grantOnNamespaceUsingAccessControlClient(TEST_UTIL, systemUserConnection, testNS.getShortName(), TEST_TABLE.getTableName().getNamespaceAsString(), Permission.Action.READ); } catch (Throwable e) { LOG.error("error during call of AccessControlClient.grant. ", e); @@ -2300,7 +2155,7 @@ public class TestAccessController extends SecureTestUtil { // Revoke namespace READ to testNS, this should supersede any table permissions try { - revokeFromNamespaceUsingAccessControlClient(TEST_UTIL, conf, testNS.getShortName(), + revokeFromNamespaceUsingAccessControlClient(TEST_UTIL, systemUserConnection, testNS.getShortName(), TEST_TABLE.getTableName().getNamespaceAsString(), Permission.Action.READ); } catch (Throwable e) { LOG.error("error during call of AccessControlClient.revoke ", e); @@ -2362,7 +2217,7 @@ public class TestAccessController extends SecureTestUtil { for (JVMClusterUtil.RegionServerThread thread: TEST_UTIL.getMiniHBaseCluster().getRegionServerThreads()) { HRegionServer rs = thread.getRegionServer(); - for (HRegion region: rs.getOnlineRegions(TEST_TABLE.getTableName())) { + for (Region region: rs.getOnlineRegions(TEST_TABLE.getTableName())) { region.getCoprocessorHost().load(PingCoprocessor.class, Coprocessor.PRIORITY_USER, conf); } @@ -2381,28 +2236,25 @@ public class TestAccessController extends SecureTestUtil { AccessTestAction execEndpointAction = new AccessTestAction() { @Override public Object run() throws Exception { - Table t = TEST_UTIL.getConnection().getTable(TEST_TABLE.getTableName()); - try { + try(Connection conn = ConnectionFactory.createConnection(conf); + Table t = conn.getTable(TEST_TABLE.getTableName());) { BlockingRpcChannel service = t.coprocessorService(HConstants.EMPTY_BYTE_ARRAY); PingCoprocessor.newBlockingStub(service).noop(null, NoopRequest.newBuilder().build()); - } finally { - t.close(); } return null; } }; + String namespace = TEST_TABLE.getTableName().getNamespaceAsString(); + // Now grant EXEC to the entire namespace to user B + grantOnNamespace(TEST_UTIL, userB.getShortName(), namespace, Permission.Action.EXEC); + // User B should now be allowed also + verifyAllowed(execEndpointAction, userA, userB); + + revokeFromNamespace(TEST_UTIL, userB.getShortName(), namespace, Permission.Action.EXEC); // Verify that EXEC permission is checked correctly verifyDenied(execEndpointAction, userB); verifyAllowed(execEndpointAction, userA); - - // Now grant EXEC to the entire namespace to user B - grantOnNamespace(TEST_UTIL, userB.getShortName(), - TEST_TABLE.getTableName().getNamespaceAsString(), - Permission.Action.EXEC); - - // User B should now be allowed also - verifyAllowed(execEndpointAction, userA, userB); } @Test @@ -2410,16 +2262,14 @@ public class TestAccessController extends SecureTestUtil { AccessTestAction putWithReservedTag = new AccessTestAction() { @Override public Object run() throws Exception { - Table t = TEST_UTIL.getConnection().getTable(TEST_TABLE.getTableName()); - try { + try(Connection conn = ConnectionFactory.createConnection(conf); + Table t = conn.getTable(TEST_TABLE.getTableName());) { KeyValue kv = new KeyValue(TEST_ROW, TEST_FAMILY, TEST_QUALIFIER, HConstants.LATEST_TIMESTAMP, HConstants.EMPTY_BYTE_ARRAY, new Tag[] { new Tag(AccessControlLists.ACL_TAG_TYPE, ProtobufUtil.toUsersAndPermissions(USER_OWNER.getShortName(), new Permission(Permission.Action.READ)).toByteArray()) }); t.put(new Put(TEST_ROW).add(kv)); - } finally { - t.close(); } return null; } @@ -2496,33 +2346,33 @@ public class TestAccessController extends SecureTestUtil { @Test public void testGetNamespacePermission() throws Exception { - String namespace = "testNamespace"; + String namespace = "testGetNamespacePermission"; NamespaceDescriptor desc = NamespaceDescriptor.create(namespace).build(); - TEST_UTIL.getMiniHBaseCluster().getMaster().createNamespace(desc); + createNamespace(TEST_UTIL, desc); grantOnNamespace(TEST_UTIL, USER_NONE.getShortName(), namespace, Permission.Action.READ); try { - List namespacePermissions = AccessControlClient.getUserPermissions(conf, - AccessControlLists.toNamespaceEntry(namespace)); + List namespacePermissions = AccessControlClient.getUserPermissions( + systemUserConnection, AccessControlLists.toNamespaceEntry(namespace)); assertTrue(namespacePermissions != null); assertTrue(namespacePermissions.size() == 1); } catch (Throwable thw) { throw new HBaseException(thw); } - TEST_UTIL.getMiniHBaseCluster().getMaster().deleteNamespace(namespace); + deleteNamespace(TEST_UTIL, namespace); } @Test public void testTruncatePerms() throws Exception { try { - List existingPerms = AccessControlClient.getUserPermissions(conf, - TEST_TABLE.getTableName().getNameAsString()); + List existingPerms = AccessControlClient.getUserPermissions( + systemUserConnection, TEST_TABLE.getTableName().getNameAsString()); assertTrue(existingPerms != null); assertTrue(existingPerms.size() > 1); TEST_UTIL.getHBaseAdmin().disableTable(TEST_TABLE.getTableName()); TEST_UTIL.truncateTable(TEST_TABLE.getTableName()); TEST_UTIL.waitTableAvailable(TEST_TABLE.getTableName()); - List perms = AccessControlClient.getUserPermissions(conf, - TEST_TABLE.getTableName().getNameAsString()); + List perms = AccessControlClient.getUserPermissions( + systemUserConnection, TEST_TABLE.getTableName().getNameAsString()); assertTrue(perms != null); assertEquals(existingPerms.size(), perms.size()); } catch (Throwable e) { @@ -2534,8 +2384,8 @@ public class TestAccessController extends SecureTestUtil { return new PrivilegedAction>() { @Override public List run() { - try { - return AccessControlClient.getUserPermissions(conf, regex); + try(Connection conn = ConnectionFactory.createConnection(conf);) { + return AccessControlClient.getUserPermissions(conn, regex); } catch (Throwable e) { LOG.error("error during call of AccessControlClient.getUserPermissions.", e); return null; @@ -2575,18 +2425,16 @@ public class TestAccessController extends SecureTestUtil { Admin admin = TEST_UTIL.getHBaseAdmin(); HTableDescriptor htd = new HTableDescriptor(table1); htd.addFamily(new HColumnDescriptor(family)); - admin.createTable(htd); - TEST_UTIL.waitUntilAllRegionsAssigned(table1); + createTable(TEST_UTIL, htd); // creating the ns and table in it String ns = "testNamespace"; NamespaceDescriptor desc = NamespaceDescriptor.create(ns).build(); final TableName table2 = TableName.valueOf(ns, tableName); - TEST_UTIL.getMiniHBaseCluster().getMaster().createNamespace(desc); + createNamespace(TEST_UTIL, desc); htd = new HTableDescriptor(table2); htd.addFamily(new HColumnDescriptor(family)); - admin.createTable(htd); - TEST_UTIL.waitUntilAllRegionsAssigned(table2); + createTable(TEST_UTIL, htd); // Verify that we can read sys-tables String aclTableName = AccessControlLists.ACL_TABLE_NAME.getNameAsString(); @@ -2609,9 +2457,9 @@ public class TestAccessController extends SecureTestUtil { ns + TableName.NAMESPACE_DELIM + tableName)).size()); assertEquals(0, testRegexHandler.runAs(getPrivilegedAction("notMatchingAny")).size()); - TEST_UTIL.deleteTable(table1); - TEST_UTIL.deleteTable(table2); - TEST_UTIL.getMiniHBaseCluster().getMaster().deleteNamespace(ns); + deleteTable(TEST_UTIL, table1); + deleteTable(TEST_UTIL, table2); + deleteNamespace(TEST_UTIL, ns); } private void verifyAnyCreate(AccessTestAction action) throws Exception { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController2.java index f6066ad0452..24cd1d8d316 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController2.java @@ -17,32 +17,43 @@ */ package org.apache.hadoop.hbase.security.access; +import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; +import java.util.Arrays; import java.util.List; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.NamespaceDescriptor; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; -import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.access.Permission.Action; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.SecurityTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.TestTableName; +import org.apache.hadoop.hbase.zookeeper.ZKUtil; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.junit.After; import org.junit.AfterClass; +import org.junit.Before; import org.junit.BeforeClass; import org.junit.Rule; import org.junit.Test; @@ -50,6 +61,7 @@ import org.junit.experimental.categories.Category; @Category({SecurityTests.class, LargeTests.class}) public class TestAccessController2 extends SecureTestUtil { + private static final Log LOG = LogFactory.getLog(TestAccessController2.class); private static final byte[] TEST_ROW = Bytes.toBytes("test"); private static final byte[] TEST_FAMILY = Bytes.toBytes("f"); @@ -59,7 +71,32 @@ public class TestAccessController2 extends SecureTestUtil { private static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); private static Configuration conf; - @Rule public TestTableName TEST_TABLE = new TestTableName(); + /** The systemUserConnection created here is tied to the system user. In case, you are planning + * to create AccessTestAction, DON'T use this systemUserConnection as the 'doAs' user + * gets eclipsed by the system user. */ + private static Connection systemUserConnection; + + private final static byte[] Q1 = Bytes.toBytes("q1"); + private final static byte[] value1 = Bytes.toBytes("value1"); + + private static byte[] TEST_FAMILY_2 = Bytes.toBytes("f2"); + private static byte[] TEST_ROW_2 = Bytes.toBytes("r2"); + private final static byte[] Q2 = Bytes.toBytes("q2"); + private final static byte[] value2 = Bytes.toBytes("value2"); + + private static byte[] TEST_ROW_3 = Bytes.toBytes("r3"); + + private static final String TESTGROUP_1 = "testgroup_1"; + private static final String TESTGROUP_2 = "testgroup_2"; + + private static User TESTGROUP1_USER1; + private static User TESTGROUP2_USER1; + + @Rule + public TestTableName TEST_TABLE = new TestTableName(); + private String namespace = "testNamespace"; + private String tname = namespace + ":testtable1"; + private TableName tableName = TableName.valueOf(tname); @BeforeClass public static void setupBeforeClass() throws Exception { @@ -70,14 +107,60 @@ public class TestAccessController2 extends SecureTestUtil { verifyConfiguration(conf); TEST_UTIL.startMiniCluster(); // Wait for the ACL table to become available - TEST_UTIL.waitTableEnabled(AccessControlLists.ACL_TABLE_NAME); + TEST_UTIL.waitUntilAllRegionsAssigned(AccessControlLists.ACL_TABLE_NAME); + + TESTGROUP1_USER1 = + User.createUserForTesting(conf, "testgroup1_user1", new String[] { TESTGROUP_1 }); + TESTGROUP2_USER1 = + User.createUserForTesting(conf, "testgroup2_user2", new String[] { TESTGROUP_2 }); + + systemUserConnection = ConnectionFactory.createConnection(conf); + } + + @Before + public void setUp() throws Exception { + createNamespace(TEST_UTIL, NamespaceDescriptor.create(namespace).build()); + try (Table table = createTable(TEST_UTIL, tableName, + new byte[][] { TEST_FAMILY, TEST_FAMILY_2 })) { + TEST_UTIL.waitTableEnabled(tableName); + + // Ingesting test data. + table.put(Arrays.asList(new Put(TEST_ROW).addColumn(TEST_FAMILY, Q1, value1), + new Put(TEST_ROW_2).addColumn(TEST_FAMILY, Q2, value2), + new Put(TEST_ROW_3).addColumn(TEST_FAMILY_2, Q1, value1))); + } + + assertEquals(1, AccessControlLists.getTablePermissions(conf, tableName).size()); + try { + assertEquals(1, AccessControlClient.getUserPermissions(systemUserConnection, + tableName.toString()).size()); + } catch (Throwable e) { + LOG.error("Error during call of AccessControlClient.getUserPermissions. ", e); + } + } @AfterClass public static void tearDownAfterClass() throws Exception { + systemUserConnection.close(); TEST_UTIL.shutdownMiniCluster(); } + @After + public void tearDown() throws Exception { + // Clean the _acl_ table + try { + deleteTable(TEST_UTIL, tableName); + } catch (TableNotFoundException ex) { + // Test deleted the table, no problem + LOG.info("Test deleted table " + tableName); + } + deleteNamespace(TEST_UTIL, namespace); + // Verify all table/namespace permissions are erased + assertEquals(0, AccessControlLists.getTablePermissions(conf, tableName).size()); + assertEquals(0, AccessControlLists.getNamespacePermissions(conf, namespace).size()); + } + @Test public void testCreateWithCorrectOwner() throws Exception { // Create a test user @@ -93,13 +176,13 @@ public class TestAccessController2 extends SecureTestUtil { try (Connection connection = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration(), testUser)) { try (Admin admin = connection.getAdmin()) { - admin.createTable(desc); + createTable(TEST_UTIL, admin, desc); } } return null; } }, testUser); - TEST_UTIL.waitTableEnabled(TEST_TABLE.getTableName()); + TEST_UTIL.waitTableAvailable(TEST_TABLE.getTableName()); // Verify that owner permissions have been granted to the test user on the // table just created List perms = @@ -165,13 +248,13 @@ public class TestAccessController2 extends SecureTestUtil { AccessTestAction writeAction = new AccessTestAction() { @Override public Object run() throws Exception { - Table t = TEST_UTIL.getConnection().getTable(AccessControlLists.ACL_TABLE_NAME); - try { + + try(Connection conn = ConnectionFactory.createConnection(conf); + Table t = conn.getTable(AccessControlLists.ACL_TABLE_NAME)) { t.put(new Put(TEST_ROW).add(AccessControlLists.ACL_LIST_FAMILY, TEST_QUALIFIER, TEST_VALUE)); return null; } finally { - t.close(); } } }; @@ -188,8 +271,8 @@ public class TestAccessController2 extends SecureTestUtil { AccessTestAction scanAction = new AccessTestAction() { @Override public Object run() throws Exception { - Table t = TEST_UTIL.getConnection().getTable(AccessControlLists.ACL_TABLE_NAME); - try { + try(Connection conn = ConnectionFactory.createConnection(conf); + Table t = conn.getTable(AccessControlLists.ACL_TABLE_NAME)) { ResultScanner s = t.getScanner(new Scan()); try { for (Result r = s.next(); r != null; r = s.next()) { @@ -199,8 +282,6 @@ public class TestAccessController2 extends SecureTestUtil { s.close(); } return null; - } finally { - t.close(); } } }; @@ -213,4 +294,169 @@ public class TestAccessController2 extends SecureTestUtil { verifyAllowed(scanAction, superUser, globalRead); } + /* + * Test table scan operation at table, column family and column qualifier level. + */ + @Test(timeout = 300000) + public void testPostGrantAndRevokeScanAction() throws Exception { + AccessTestAction scanTableActionForGroupWithTableLevelAccess = new AccessTestAction() { + @Override + public Void run() throws Exception { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName);) { + Scan s1 = new Scan(); + try (ResultScanner scanner1 = table.getScanner(s1);) { + Result[] next1 = scanner1.next(5); + assertTrue("User having table level access should be able to scan all " + + "the data in the table.", next1.length == 3); + } + } + return null; + } + }; + + AccessTestAction scanTableActionForGroupWithFamilyLevelAccess = new AccessTestAction() { + @Override + public Void run() throws Exception { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName);) { + Scan s1 = new Scan(); + try (ResultScanner scanner1 = table.getScanner(s1);) { + Result[] next1 = scanner1.next(5); + assertTrue("User having column family level access should be able to scan all " + + "the data belonging to that family.", next1.length == 2); + } + } + return null; + } + }; + + AccessTestAction scanFamilyActionForGroupWithFamilyLevelAccess = new AccessTestAction() { + @Override + public Void run() throws Exception { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName);) { + Scan s1 = new Scan(); + s1.addFamily(TEST_FAMILY_2); + try (ResultScanner scanner1 = table.getScanner(s1);) { + } + } + return null; + } + }; + + AccessTestAction scanTableActionForGroupWithQualifierLevelAccess = new AccessTestAction() { + @Override + public Void run() throws Exception { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName);) { + Scan s1 = new Scan(); + try (ResultScanner scanner1 = table.getScanner(s1);) { + Result[] next1 = scanner1.next(5); + assertTrue("User having column qualifier level access should be able to scan " + + "that column family qualifier data.", next1.length == 1); + } + } + return null; + } + }; + + AccessTestAction scanFamilyActionForGroupWithQualifierLevelAccess = new AccessTestAction() { + @Override + public Void run() throws Exception { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName);) { + Scan s1 = new Scan(); + s1.addFamily(TEST_FAMILY_2); + try (ResultScanner scanner1 = table.getScanner(s1);) { + } + } + return null; + } + }; + + AccessTestAction scanQualifierActionForGroupWithQualifierLevelAccess = new AccessTestAction() { + @Override + public Void run() throws Exception { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName);) { + Scan s1 = new Scan(); + s1.addColumn(TEST_FAMILY, Q2); + try (ResultScanner scanner1 = table.getScanner(s1);) { + } + } + return null; + } + }; + + // Verify user from a group which has table level access can read all the data and group which + // has no access can't read any data. + grantOnTable(TEST_UTIL, convertToGroup(TESTGROUP_1), tableName, null, null, Action.READ); + verifyAllowed(TESTGROUP1_USER1, scanTableActionForGroupWithTableLevelAccess); + verifyDenied(TESTGROUP2_USER1, scanTableActionForGroupWithTableLevelAccess); + + // Verify user from a group whose table level access has been revoked can't read any data. + revokeFromTable(TEST_UTIL, convertToGroup(TESTGROUP_1), tableName, null, null); + verifyDenied(TESTGROUP1_USER1, scanTableActionForGroupWithTableLevelAccess); + + // Verify user from a group which has column family level access can read all the data + // belonging to that family and group which has no access can't read any data. + grantOnTable(TEST_UTIL, convertToGroup(TESTGROUP_1), tableName, TEST_FAMILY, null, + Permission.Action.READ); + verifyAllowed(TESTGROUP1_USER1, scanTableActionForGroupWithFamilyLevelAccess); + verifyDenied(TESTGROUP1_USER1, scanFamilyActionForGroupWithFamilyLevelAccess); + verifyDenied(TESTGROUP2_USER1, scanTableActionForGroupWithFamilyLevelAccess); + verifyDenied(TESTGROUP2_USER1, scanFamilyActionForGroupWithFamilyLevelAccess); + + // Verify user from a group whose column family level access has been revoked can't read any + // data from that family. + revokeFromTable(TEST_UTIL, convertToGroup(TESTGROUP_1), tableName, TEST_FAMILY, null); + verifyDenied(TESTGROUP1_USER1, scanTableActionForGroupWithFamilyLevelAccess); + + // Verify user from a group which has column qualifier level access can read data that has this + // family and qualifier, and group which has no access can't read any data. + grantOnTable(TEST_UTIL, convertToGroup(TESTGROUP_1), tableName, TEST_FAMILY, Q1, Action.READ); + verifyAllowed(TESTGROUP1_USER1, scanTableActionForGroupWithQualifierLevelAccess); + verifyDenied(TESTGROUP1_USER1, scanFamilyActionForGroupWithQualifierLevelAccess); + verifyDenied(TESTGROUP1_USER1, scanQualifierActionForGroupWithQualifierLevelAccess); + verifyDenied(TESTGROUP2_USER1, scanTableActionForGroupWithQualifierLevelAccess); + verifyDenied(TESTGROUP2_USER1, scanFamilyActionForGroupWithQualifierLevelAccess); + verifyDenied(TESTGROUP2_USER1, scanQualifierActionForGroupWithQualifierLevelAccess); + + // Verify user from a group whose column qualifier level access has been revoked can't read the + // data having this column family and qualifier. + revokeFromTable(TEST_UTIL, convertToGroup(TESTGROUP_1), tableName, TEST_FAMILY, Q1); + verifyDenied(TESTGROUP1_USER1, scanTableActionForGroupWithQualifierLevelAccess); + } + + @Test + public void testACLZNodeDeletion() throws Exception { + String baseAclZNode = "/hbase/acl/"; + String ns = "testACLZNodeDeletionNamespace"; + NamespaceDescriptor desc = NamespaceDescriptor.create(ns).build(); + createNamespace(TEST_UTIL, desc); + + final TableName table = TableName.valueOf(ns, "testACLZNodeDeletionTable"); + final byte[] family = Bytes.toBytes("f1"); + HTableDescriptor htd = new HTableDescriptor(table); + htd.addFamily(new HColumnDescriptor(family)); + createTable(TEST_UTIL, htd); + + // Namespace needs this, as they follow the lazy creation of ACL znode. + grantOnNamespace(TEST_UTIL, TESTGROUP1_USER1.getShortName(), ns, Action.ADMIN); + ZooKeeperWatcher zkw = TEST_UTIL.getMiniHBaseCluster().getMaster().getZooKeeper(); + assertTrue("The acl znode for table should exist", ZKUtil.checkExists(zkw, baseAclZNode + + table.getNameAsString()) != -1); + assertTrue("The acl znode for namespace should exist", ZKUtil.checkExists(zkw, baseAclZNode + + convertToNamespace(ns)) != -1); + + revokeFromNamespace(TEST_UTIL, TESTGROUP1_USER1.getShortName(), ns, Action.ADMIN); + deleteTable(TEST_UTIL, table); + deleteNamespace(TEST_UTIL, ns); + + assertTrue("The acl znode for table should have been deleted", + ZKUtil.checkExists(zkw, baseAclZNode + table.getNameAsString()) == -1); + assertTrue( "The acl znode for namespace should have been deleted", + ZKUtil.checkExists(zkw, baseAclZNode + convertToNamespace(ns)) == -1); + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCellACLWithMultipleVersions.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCellACLWithMultipleVersions.java index e239647d466..c7c49cd2f9f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCellACLWithMultipleVersions.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCellACLWithMultipleVersions.java @@ -37,7 +37,6 @@ import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Increment; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Table; @@ -154,9 +153,8 @@ public class TestCellACLWithMultipleVersions extends SecureTestUtil { verifyAllowed(new AccessTestAction() { @Override public Object run() throws Exception { - Connection connection = ConnectionFactory.createConnection(conf); - Table t = connection.getTable(TEST_TABLE.getTableName()); - try { + try(Connection connection = ConnectionFactory.createConnection(conf); + Table t = connection.getTable(TEST_TABLE.getTableName())) { Put p; // with ro ACL p = new Put(TEST_ROW).add(TEST_FAMILY1, TEST_Q1, ZERO); @@ -175,9 +173,6 @@ public class TestCellACLWithMultipleVersions extends SecureTestUtil { p = new Put(TEST_ROW).add(TEST_FAMILY1, TEST_Q1, ZERO); p.setACL(USER_OTHER.getShortName(), new Permission(Permission.Action.WRITE)); t.put(p); - } finally { - t.close(); - connection.close(); } return null; } @@ -190,13 +185,9 @@ public class TestCellACLWithMultipleVersions extends SecureTestUtil { public Object run() throws Exception { Get get = new Get(TEST_ROW); get.setMaxVersions(10); - Connection connection = ConnectionFactory.createConnection(conf); - Table t = connection.getTable(TEST_TABLE.getTableName()); - try { + try(Connection connection = ConnectionFactory.createConnection(conf); + Table t = connection.getTable(TEST_TABLE.getTableName())) { return t.get(get).listCells(); - } finally { - t.close(); - connection.close(); } } }; @@ -206,13 +197,9 @@ public class TestCellACLWithMultipleVersions extends SecureTestUtil { public Object run() throws Exception { Get get = new Get(TEST_ROW); get.setMaxVersions(10); - Connection connection = ConnectionFactory.createConnection(conf); - Table t = connection.getTable(TEST_TABLE.getTableName()); - try { + try(Connection connection = ConnectionFactory.createConnection(conf); + Table t = connection.getTable(TEST_TABLE.getTableName())) { return t.get(get).listCells(); - } finally { - t.close(); - connection.close(); } } }; @@ -225,9 +212,8 @@ public class TestCellACLWithMultipleVersions extends SecureTestUtil { verifyAllowed(new AccessTestAction() { @Override public Object run() throws Exception { - Connection connection = ConnectionFactory.createConnection(conf); - Table t = connection.getTable(TEST_TABLE.getTableName()); - try { + try(Connection connection = ConnectionFactory.createConnection(conf); + Table t = connection.getTable(TEST_TABLE.getTableName())) { Put p; p = new Put(TEST_ROW).add(TEST_FAMILY1, TEST_Q1, ZERO); p.setACL(USER_OTHER.getShortName(), new Permission(Permission.Action.WRITE)); @@ -238,9 +224,6 @@ public class TestCellACLWithMultipleVersions extends SecureTestUtil { p = new Put(TEST_ROW).add(TEST_FAMILY1, TEST_Q1, ZERO); p.setACL(USER_OTHER.getShortName(), new Permission(Permission.Action.WRITE)); t.put(p); - } finally { - t.close(); - connection.close(); } return null; } @@ -448,7 +431,7 @@ public class TestCellACLWithMultipleVersions extends SecureTestUtil { // The other put should be covered by the tombstone - verifyDenied(getQ2, USER_OTHER); + verifyIfNull(getQ2, USER_OTHER); } @Test diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCellACLs.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCellACLs.java index b7cbc5266a5..aed9bc24c43 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCellACLs.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCellACLs.java @@ -147,9 +147,8 @@ public class TestCellACLs extends SecureTestUtil { verifyAllowed(new AccessTestAction() { @Override public Object run() throws Exception { - Connection connection = ConnectionFactory.createConnection(conf); - Table t = connection.getTable(TEST_TABLE.getTableName()); - try { + try(Connection connection = ConnectionFactory.createConnection(conf); + Table t = connection.getTable(TEST_TABLE.getTableName())) { Put p; // with ro ACL p = new Put(TEST_ROW).add(TEST_FAMILY, TEST_Q1, ZERO); @@ -164,9 +163,6 @@ public class TestCellACLs extends SecureTestUtil { .add(TEST_FAMILY, TEST_Q3, ZERO) .add(TEST_FAMILY, TEST_Q4, ZERO); t.put(p); - } finally { - t.close(); - connection.close(); } return null; } @@ -178,13 +174,9 @@ public class TestCellACLs extends SecureTestUtil { @Override public Object run() throws Exception { Get get = new Get(TEST_ROW).addColumn(TEST_FAMILY, TEST_Q1); - Connection connection = ConnectionFactory.createConnection(conf); - Table t = connection.getTable(TEST_TABLE.getTableName()); - try { + try(Connection connection = ConnectionFactory.createConnection(conf); + Table t = connection.getTable(TEST_TABLE.getTableName())) { return t.get(get).listCells(); - } finally { - t.close(); - connection.close(); } } }; @@ -193,13 +185,9 @@ public class TestCellACLs extends SecureTestUtil { @Override public Object run() throws Exception { Get get = new Get(TEST_ROW).addColumn(TEST_FAMILY, TEST_Q2); - Connection connection = ConnectionFactory.createConnection(conf); - Table t = connection.getTable(TEST_TABLE.getTableName()); - try { + try(Connection connection = ConnectionFactory.createConnection(conf); + Table t = connection.getTable(TEST_TABLE.getTableName())) { return t.get(get).listCells(); - } finally { - t.close(); - connection.close(); } } }; @@ -208,13 +196,9 @@ public class TestCellACLs extends SecureTestUtil { @Override public Object run() throws Exception { Get get = new Get(TEST_ROW).addColumn(TEST_FAMILY, TEST_Q3); - Connection connection = ConnectionFactory.createConnection(conf); - Table t = connection.getTable(TEST_TABLE.getTableName()); - try { + try(Connection connection = ConnectionFactory.createConnection(conf); + Table t = connection.getTable(TEST_TABLE.getTableName())) { return t.get(get).listCells(); - } finally { - t.close(); - connection.close(); } } }; @@ -223,12 +207,9 @@ public class TestCellACLs extends SecureTestUtil { @Override public Object run() throws Exception { Get get = new Get(TEST_ROW).addColumn(TEST_FAMILY, TEST_Q4); - Connection connection = ConnectionFactory.createConnection(conf); - Table t = connection.getTable(TEST_TABLE.getTableName()); - try { + try(Connection connection = ConnectionFactory.createConnection(conf); + Table t = connection.getTable(TEST_TABLE.getTableName())) { return t.get(get).listCells(); - } finally { - t.close(); } } }; @@ -240,8 +221,8 @@ public class TestCellACLs extends SecureTestUtil { // Confirm this access does not extend to other cells - verifyDenied(getQ3, USER_OTHER); - verifyDenied(getQ4, USER_OTHER); + verifyIfNull(getQ3, USER_OTHER); + verifyIfNull(getQ4, USER_OTHER); /* ---- Scans ---- */ @@ -291,13 +272,9 @@ public class TestCellACLs extends SecureTestUtil { @Override public Object run() throws Exception { Increment i = new Increment(TEST_ROW).addColumn(TEST_FAMILY, TEST_Q1, 1L); - Connection connection = ConnectionFactory.createConnection(conf); - Table t = connection.getTable(TEST_TABLE.getTableName()); - try { + try(Connection connection = ConnectionFactory.createConnection(conf); + Table t = connection.getTable(TEST_TABLE.getTableName())) { t.increment(i); - } finally { - t.close(); - connection.close(); } return null; } @@ -307,13 +284,9 @@ public class TestCellACLs extends SecureTestUtil { @Override public Object run() throws Exception { Increment i = new Increment(TEST_ROW).addColumn(TEST_FAMILY, TEST_Q2, 1L); - Connection connection = ConnectionFactory.createConnection(conf); - Table t = connection.getTable(TEST_TABLE.getTableName()); - try { + try(Connection connection = ConnectionFactory.createConnection(conf); + Table t = connection.getTable(TEST_TABLE.getTableName())) { t.increment(i); - } finally { - t.close(); - connection.close(); } return null; } @@ -325,13 +298,9 @@ public class TestCellACLs extends SecureTestUtil { Increment i = new Increment(TEST_ROW).addColumn(TEST_FAMILY, TEST_Q2, 1L); // Tag this increment with an ACL that denies write permissions to USER_OTHER i.setACL(USER_OTHER.getShortName(), new Permission(Action.READ)); - Connection connection = ConnectionFactory.createConnection(conf); - Table t = connection.getTable(TEST_TABLE.getTableName()); - try { + try(Connection connection = ConnectionFactory.createConnection(conf); + Table t = connection.getTable(TEST_TABLE.getTableName())) { t.increment(i); - } finally { - t.close(); - connection.close(); } return null; } @@ -341,13 +310,9 @@ public class TestCellACLs extends SecureTestUtil { @Override public Object run() throws Exception { Increment i = new Increment(TEST_ROW).addColumn(TEST_FAMILY, TEST_Q3, 1L); - Connection connection = ConnectionFactory.createConnection(conf); - Table t = connection.getTable(TEST_TABLE.getTableName()); - try { + try(Connection connection = ConnectionFactory.createConnection(conf); + Table t = connection.getTable(TEST_TABLE.getTableName())) { t.increment(i); - } finally { - t.close(); - connection.close(); } return null; } @@ -370,13 +335,9 @@ public class TestCellACLs extends SecureTestUtil { @Override public Object run() throws Exception { Delete delete = new Delete(TEST_ROW).deleteFamily(TEST_FAMILY); - Connection connection = ConnectionFactory.createConnection(conf); - Table t = connection.getTable(TEST_TABLE.getTableName()); - try { + try(Connection connection = ConnectionFactory.createConnection(conf); + Table t = connection.getTable(TEST_TABLE.getTableName())) { t.delete(delete); - } finally { - t.close(); - connection.close(); } return null; } @@ -386,13 +347,9 @@ public class TestCellACLs extends SecureTestUtil { @Override public Object run() throws Exception { Delete delete = new Delete(TEST_ROW).addColumn(TEST_FAMILY, TEST_Q1); - Connection connection = ConnectionFactory.createConnection(conf); - Table t = connection.getTable(TEST_TABLE.getTableName()); - try { + try(Connection connection = ConnectionFactory.createConnection(conf); + Table t = connection.getTable(TEST_TABLE.getTableName())) { t.delete(delete); - } finally { - t.close(); - connection.close(); } return null; } @@ -419,13 +376,11 @@ public class TestCellACLs extends SecureTestUtil { verifyDenied(new AccessTestAction() { @Override public Object run() throws Exception { - Table t = TEST_UTIL.getConnection().getTable(TEST_TABLE.getTableName()); - try { + try(Connection connection = ConnectionFactory.createConnection(conf); + Table t = connection.getTable(TEST_TABLE.getTableName())) { Put p; p = new Put(TEST_ROW).add(TEST_FAMILY, TEST_Q1, ZERO); t.put(p); - } finally { - t.close(); } return null; } @@ -435,13 +390,11 @@ public class TestCellACLs extends SecureTestUtil { verifyAllowed(new AccessTestAction() { @Override public Object run() throws Exception { - Table t = TEST_UTIL.getConnection().getTable(TEST_TABLE.getTableName()); - try { + try(Connection connection = ConnectionFactory.createConnection(conf); + Table t = connection.getTable(TEST_TABLE.getTableName())) { Put p; p = new Put(TEST_ROW).add(TEST_FAMILY, TEST_Q1, ZERO); t.put(p); - } finally { - t.close(); } return null; } @@ -451,13 +404,11 @@ public class TestCellACLs extends SecureTestUtil { verifyDenied(new AccessTestAction() { @Override public Object run() throws Exception { - Table t = TEST_UTIL.getConnection().getTable(TEST_TABLE.getTableName()); - try { + try(Connection connection = ConnectionFactory.createConnection(conf); + Table t = connection.getTable(TEST_TABLE.getTableName())) { Put p; p = new Put(TEST_ROW).add(TEST_FAMILY, TEST_Q1, ONE); t.put(p); - } finally { - t.close(); } return null; } @@ -467,11 +418,9 @@ public class TestCellACLs extends SecureTestUtil { verifyAllowed(new AccessTestAction() { @Override public Object run() throws Exception { - Table t = TEST_UTIL.getConnection().getTable(TEST_TABLE.getTableName()); - try { + try(Connection connection = ConnectionFactory.createConnection(conf); + Table t = connection.getTable(TEST_TABLE.getTableName())) { return t.get(new Get(TEST_ROW).addColumn(TEST_FAMILY, TEST_Q1)); - } finally { - t.close(); } } }, USER_OTHER); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestNamespaceCommands.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestNamespaceCommands.java index 887b56d19c6..457bb3bd903 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestNamespaceCommands.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestNamespaceCommands.java @@ -34,7 +34,6 @@ import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment; @@ -209,16 +208,16 @@ public class TestNamespaceCommands extends SecureTestUtil { SUPERUSER, USER_GLOBAL_ADMIN); - verifyDeniedWithException(modifyNamespace, - USER_GLOBAL_CREATE, - USER_GLOBAL_WRITE, - USER_GLOBAL_READ, - USER_GLOBAL_EXEC, - USER_NS_ADMIN, - USER_NS_CREATE, - USER_NS_WRITE, - USER_NS_READ, - USER_NS_EXEC); + verifyDenied(modifyNamespace, + USER_GLOBAL_CREATE, + USER_GLOBAL_WRITE, + USER_GLOBAL_READ, + USER_GLOBAL_EXEC, + USER_NS_ADMIN, + USER_NS_CREATE, + USER_NS_WRITE, + USER_NS_READ, + USER_NS_EXEC); } @Test @@ -247,7 +246,7 @@ public class TestNamespaceCommands extends SecureTestUtil { USER_GLOBAL_ADMIN); // all others should be denied - verifyDeniedWithException(createNamespace, + verifyDenied(createNamespace, USER_GLOBAL_CREATE, USER_GLOBAL_WRITE, USER_GLOBAL_READ, @@ -265,18 +264,18 @@ public class TestNamespaceCommands extends SecureTestUtil { SUPERUSER, USER_GLOBAL_ADMIN); - verifyDeniedWithException(deleteNamespace, - USER_GLOBAL_CREATE, - USER_GLOBAL_WRITE, - USER_GLOBAL_READ, - USER_GLOBAL_EXEC, - USER_NS_ADMIN, - USER_NS_CREATE, - USER_NS_WRITE, - USER_NS_READ, - USER_NS_EXEC, - USER_TABLE_CREATE, - USER_TABLE_WRITE); + verifyDenied(deleteNamespace, + USER_GLOBAL_CREATE, + USER_GLOBAL_WRITE, + USER_GLOBAL_READ, + USER_GLOBAL_EXEC, + USER_NS_ADMIN, + USER_NS_CREATE, + USER_NS_WRITE, + USER_NS_READ, + USER_NS_EXEC, + USER_TABLE_CREATE, + USER_TABLE_WRITE); } @Test @@ -295,17 +294,17 @@ public class TestNamespaceCommands extends SecureTestUtil { USER_GLOBAL_ADMIN, USER_NS_ADMIN); - verifyDeniedWithException(getNamespaceAction, - USER_GLOBAL_CREATE, - USER_GLOBAL_WRITE, - USER_GLOBAL_READ, - USER_GLOBAL_EXEC, - USER_NS_CREATE, - USER_NS_WRITE, - USER_NS_READ, - USER_NS_EXEC, - USER_TABLE_CREATE, - USER_TABLE_WRITE); + verifyDenied(getNamespaceAction, + USER_GLOBAL_CREATE, + USER_GLOBAL_WRITE, + USER_GLOBAL_READ, + USER_GLOBAL_EXEC, + USER_NS_CREATE, + USER_NS_WRITE, + USER_NS_READ, + USER_NS_EXEC, + USER_TABLE_CREATE, + USER_TABLE_WRITE); } @Test @@ -416,52 +415,52 @@ public class TestNamespaceCommands extends SecureTestUtil { SUPERUSER, USER_GLOBAL_ADMIN); - verifyDeniedWithException(grantAction, - USER_GLOBAL_CREATE, - USER_GLOBAL_WRITE, - USER_GLOBAL_READ, - USER_GLOBAL_EXEC, - USER_NS_ADMIN, - USER_NS_CREATE, - USER_NS_WRITE, - USER_NS_READ, - USER_NS_EXEC, - USER_TABLE_CREATE, - USER_TABLE_WRITE); + verifyDenied(grantAction, + USER_GLOBAL_CREATE, + USER_GLOBAL_WRITE, + USER_GLOBAL_READ, + USER_GLOBAL_EXEC, + USER_NS_ADMIN, + USER_NS_CREATE, + USER_NS_WRITE, + USER_NS_READ, + USER_NS_EXEC, + USER_TABLE_CREATE, + USER_TABLE_WRITE); verifyAllowed(revokeAction, SUPERUSER, USER_GLOBAL_ADMIN); - verifyDeniedWithException(revokeAction, - USER_GLOBAL_CREATE, - USER_GLOBAL_WRITE, - USER_GLOBAL_READ, - USER_GLOBAL_EXEC, - USER_NS_ADMIN, - USER_NS_CREATE, - USER_NS_WRITE, - USER_NS_READ, - USER_NS_EXEC, - USER_TABLE_CREATE, - USER_TABLE_WRITE); + verifyDenied(revokeAction, + USER_GLOBAL_CREATE, + USER_GLOBAL_WRITE, + USER_GLOBAL_READ, + USER_GLOBAL_EXEC, + USER_NS_ADMIN, + USER_NS_CREATE, + USER_NS_WRITE, + USER_NS_READ, + USER_NS_EXEC, + USER_TABLE_CREATE, + USER_TABLE_WRITE); verifyAllowed(getPermissionsAction, SUPERUSER, USER_GLOBAL_ADMIN, USER_NS_ADMIN); - verifyDeniedWithException(getPermissionsAction, - USER_GLOBAL_CREATE, - USER_GLOBAL_WRITE, - USER_GLOBAL_READ, - USER_GLOBAL_EXEC, - USER_NS_CREATE, - USER_NS_WRITE, - USER_NS_READ, - USER_NS_EXEC, - USER_TABLE_CREATE, - USER_TABLE_WRITE); + verifyDenied(getPermissionsAction, + USER_GLOBAL_CREATE, + USER_GLOBAL_WRITE, + USER_GLOBAL_READ, + USER_GLOBAL_EXEC, + USER_NS_CREATE, + USER_NS_WRITE, + USER_NS_READ, + USER_NS_EXEC, + USER_TABLE_CREATE, + USER_TABLE_WRITE); } @Test @@ -482,16 +481,16 @@ public class TestNamespaceCommands extends SecureTestUtil { USER_GLOBAL_CREATE, USER_NS_CREATE); - verifyDeniedWithException(createTable, - USER_GLOBAL_ADMIN, - USER_GLOBAL_WRITE, - USER_GLOBAL_READ, - USER_GLOBAL_EXEC, - USER_NS_ADMIN, - USER_NS_WRITE, - USER_NS_READ, - USER_NS_EXEC, - USER_TABLE_CREATE, - USER_TABLE_WRITE); + verifyDenied(createTable, + USER_GLOBAL_ADMIN, + USER_GLOBAL_WRITE, + USER_GLOBAL_READ, + USER_GLOBAL_EXEC, + USER_NS_ADMIN, + USER_NS_WRITE, + USER_NS_READ, + USER_NS_EXEC, + USER_TABLE_CREATE, + USER_TABLE_WRITE); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestScanEarlyTermination.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestScanEarlyTermination.java index e1dfdbf7629..6002a4d861a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestScanEarlyTermination.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestScanEarlyTermination.java @@ -32,7 +32,6 @@ import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; -import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; @@ -229,7 +228,7 @@ public class TestScanEarlyTermination extends SecureTestUtil { }, USER_OTHER); // A scan of FAMILY2 will throw an AccessDeniedException - verifyDeniedWithException(new AccessTestAction() { + verifyDenied(new AccessTestAction() { @Override public Object run() throws Exception { // force a new RS connection diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestWithDisabledAuthorization.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestWithDisabledAuthorization.java new file mode 100644 index 00000000000..7c2cb281f67 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestWithDisabledAuthorization.java @@ -0,0 +1,1079 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.security.access; + +import static org.junit.Assert.*; +import static org.mockito.Mockito.*; + +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.Coprocessor; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.NamespaceDescriptor; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.TableNotFoundException; +import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Append; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.Durability; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.Increment; +import org.apache.hadoop.hbase.client.Mutation; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.ResultScanner; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment; +import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; +import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessorEnvironment; +import org.apache.hadoop.hbase.filter.BinaryComparator; +import org.apache.hadoop.hbase.filter.CompareFilter; +import org.apache.hadoop.hbase.master.MasterCoprocessorHost; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; +import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas; +import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress; +import org.apache.hadoop.hbase.regionserver.Region; +import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost; +import org.apache.hadoop.hbase.regionserver.RegionScanner; +import org.apache.hadoop.hbase.regionserver.RegionServerCoprocessorHost; +import org.apache.hadoop.hbase.regionserver.wal.WALEdit; +import org.apache.hadoop.hbase.security.User; +import org.apache.hadoop.hbase.security.access.Permission.Action; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.testclassification.SecurityTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.hbase.util.TestTableName; +import org.apache.log4j.Level; +import org.apache.log4j.Logger; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Rule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import com.google.common.collect.Lists; + +@Category({SecurityTests.class, LargeTests.class}) +public class TestWithDisabledAuthorization extends SecureTestUtil { + private static final Log LOG = LogFactory.getLog(TestWithDisabledAuthorization.class); + + static { + Logger.getLogger(AccessController.class).setLevel(Level.TRACE); + Logger.getLogger(AccessControlFilter.class).setLevel(Level.TRACE); + Logger.getLogger(TableAuthManager.class).setLevel(Level.TRACE); + } + + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + + private static final byte[] TEST_FAMILY = Bytes.toBytes("f1"); + private static final byte[] TEST_FAMILY2 = Bytes.toBytes("f2"); + private static final byte[] TEST_ROW = Bytes.toBytes("testrow"); + private static final byte[] TEST_Q1 = Bytes.toBytes("q1"); + private static final byte[] TEST_Q2 = Bytes.toBytes("q2"); + private static final byte[] TEST_Q3 = Bytes.toBytes("q3"); + private static final byte[] TEST_Q4 = Bytes.toBytes("q4"); + private static final byte[] ZERO = Bytes.toBytes(0L); + + private static MasterCoprocessorEnvironment CP_ENV; + private static AccessController ACCESS_CONTROLLER; + private static RegionServerCoprocessorEnvironment RSCP_ENV; + private RegionCoprocessorEnvironment RCP_ENV; + + @Rule public TestTableName TEST_TABLE = new TestTableName(); + + // default users + + // superuser + private static User SUPERUSER; + // user granted with all global permission + private static User USER_ADMIN; + // user with rw permissions on column family. + private static User USER_RW; + // user with read-only permissions + private static User USER_RO; + // user is table owner. will have all permissions on table + private static User USER_OWNER; + // user with create table permissions alone + private static User USER_CREATE; + // user with no permissions + private static User USER_NONE; + // user with only partial read-write perms (on family:q1 only) + private static User USER_QUAL; + + @BeforeClass + public static void setupBeforeClass() throws Exception { + Configuration conf = TEST_UTIL.getConfiguration(); + // Enable security + enableSecurity(conf); + // We expect 0.98 cell ACL semantics + conf.setBoolean(AccessControlConstants.CF_ATTRIBUTE_EARLY_OUT, false); + // Enable EXEC permission checking + conf.setBoolean(AccessControlConstants.EXEC_PERMISSION_CHECKS_KEY, true); + // Verify enableSecurity sets up what we require + verifyConfiguration(conf); + + // Now, DISABLE only active authorization + conf.setBoolean(User.HBASE_SECURITY_AUTHORIZATION_CONF_KEY, false); + + // Start the minicluster + TEST_UTIL.startMiniCluster(); + MasterCoprocessorHost cpHost = + TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterCoprocessorHost(); + cpHost.load(AccessController.class, Coprocessor.PRIORITY_HIGHEST, conf); + ACCESS_CONTROLLER = (AccessController) cpHost.findCoprocessor(AccessController.class.getName()); + CP_ENV = cpHost.createEnvironment(AccessController.class, ACCESS_CONTROLLER, + Coprocessor.PRIORITY_HIGHEST, 1, conf); + RegionServerCoprocessorHost rsHost = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0) + .getRegionServerCoprocessorHost(); + RSCP_ENV = rsHost.createEnvironment(AccessController.class, ACCESS_CONTROLLER, + Coprocessor.PRIORITY_HIGHEST, 1, conf); + + // Wait for the ACL table to become available + TEST_UTIL.waitUntilAllRegionsAssigned(AccessControlLists.ACL_TABLE_NAME); + + // create a set of test users + SUPERUSER = User.createUserForTesting(conf, "admin", new String[] { "supergroup" }); + USER_ADMIN = User.createUserForTesting(conf, "admin2", new String[0]); + USER_OWNER = User.createUserForTesting(conf, "owner", new String[0]); + USER_CREATE = User.createUserForTesting(conf, "tbl_create", new String[0]); + USER_RW = User.createUserForTesting(conf, "rwuser", new String[0]); + USER_RO = User.createUserForTesting(conf, "rouser", new String[0]); + USER_QUAL = User.createUserForTesting(conf, "rwpartial", new String[0]); + USER_NONE = User.createUserForTesting(conf, "nouser", new String[0]); + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + @Before + public void setUp() throws Exception { + // Create the test table (owner added to the _acl_ table) + Admin admin = TEST_UTIL.getHBaseAdmin(); + HTableDescriptor htd = new HTableDescriptor(TEST_TABLE.getTableName()); + HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAMILY); + hcd.setMaxVersions(100); + htd.addFamily(hcd); + htd.setOwner(USER_OWNER); + admin.createTable(htd, new byte[][] { Bytes.toBytes("s") }); + TEST_UTIL.waitUntilAllRegionsAssigned(TEST_TABLE.getTableName()); + + Region region = TEST_UTIL.getHBaseCluster().getRegions(TEST_TABLE.getTableName()).get(0); + RegionCoprocessorHost rcpHost = region.getCoprocessorHost(); + RCP_ENV = rcpHost.createEnvironment(AccessController.class, ACCESS_CONTROLLER, + Coprocessor.PRIORITY_HIGHEST, 1, TEST_UTIL.getConfiguration()); + + // Set up initial grants + + grantGlobal(TEST_UTIL, USER_ADMIN.getShortName(), + Permission.Action.ADMIN, + Permission.Action.CREATE, + Permission.Action.READ, + Permission.Action.WRITE); + + grantOnTable(TEST_UTIL, USER_RW.getShortName(), + TEST_TABLE.getTableName(), TEST_FAMILY, null, + Permission.Action.READ, + Permission.Action.WRITE); + + // USER_CREATE is USER_RW plus CREATE permissions + grantOnTable(TEST_UTIL, USER_CREATE.getShortName(), + TEST_TABLE.getTableName(), null, null, + Permission.Action.CREATE, + Permission.Action.READ, + Permission.Action.WRITE); + + grantOnTable(TEST_UTIL, USER_RO.getShortName(), + TEST_TABLE.getTableName(), TEST_FAMILY, null, + Permission.Action.READ); + + grantOnTable(TEST_UTIL, USER_QUAL.getShortName(), + TEST_TABLE.getTableName(), TEST_FAMILY, TEST_Q1, + Permission.Action.READ, + Permission.Action.WRITE); + + assertEquals(5, AccessControlLists.getTablePermissions(TEST_UTIL.getConfiguration(), + TEST_TABLE.getTableName()).size()); + } + + @After + public void tearDown() throws Exception { + // Clean the _acl_ table + try { + deleteTable(TEST_UTIL, TEST_TABLE.getTableName()); + } catch (TableNotFoundException ex) { + // Test deleted the table, no problem + LOG.info("Test deleted table " + TEST_TABLE.getTableName()); + } + // Verify all table/namespace permissions are erased + assertEquals(0, AccessControlLists.getTablePermissions(TEST_UTIL.getConfiguration(), + TEST_TABLE.getTableName()).size()); + assertEquals(0, AccessControlLists.getNamespacePermissions(TEST_UTIL.getConfiguration(), + TEST_TABLE.getTableName().getNamespaceAsString()).size()); + } + + @Test + public void testCheckPermissions() throws Exception { + + AccessTestAction checkGlobalAdmin = new AccessTestAction() { + @Override + public Void run() throws Exception { + checkGlobalPerms(TEST_UTIL, Permission.Action.ADMIN); + return null; + } + }; + + verifyAllowed(checkGlobalAdmin, SUPERUSER, USER_ADMIN); + verifyDenied(checkGlobalAdmin, USER_OWNER, USER_CREATE, USER_RW, USER_RO, USER_QUAL, + USER_NONE); + + AccessTestAction checkGlobalRead = new AccessTestAction() { + @Override + public Void run() throws Exception { + checkGlobalPerms(TEST_UTIL, Permission.Action.READ); + return null; + } + }; + + verifyAllowed(checkGlobalRead, SUPERUSER, USER_ADMIN); + verifyDenied(checkGlobalRead, USER_OWNER, USER_CREATE, USER_RW, USER_RO, USER_QUAL, + USER_NONE); + + AccessTestAction checkGlobalReadWrite = new AccessTestAction() { + @Override + public Void run() throws Exception { + checkGlobalPerms(TEST_UTIL, Permission.Action.READ, Permission.Action.WRITE); + return null; + } + }; + + verifyAllowed(checkGlobalReadWrite, SUPERUSER, USER_ADMIN); + verifyDenied(checkGlobalReadWrite, USER_OWNER, USER_CREATE, USER_RW, USER_RO, USER_QUAL, + USER_NONE); + + AccessTestAction checkTableAdmin = new AccessTestAction() { + @Override + public Void run() throws Exception { + checkTablePerms(TEST_UTIL, TEST_TABLE.getTableName(), null, null, + Permission.Action.ADMIN); + return null; + } + }; + + verifyAllowed(checkTableAdmin, SUPERUSER, USER_ADMIN, USER_OWNER); + verifyDenied(checkTableAdmin, USER_CREATE, USER_RW, USER_RO, USER_QUAL, USER_NONE); + + AccessTestAction checkTableCreate = new AccessTestAction() { + @Override + public Void run() throws Exception { + checkTablePerms(TEST_UTIL, TEST_TABLE.getTableName(), null, null, + Permission.Action.CREATE); + return null; + } + }; + + verifyAllowed(checkTableCreate, SUPERUSER, USER_ADMIN, USER_OWNER, USER_CREATE); + verifyDenied(checkTableCreate, USER_RW, USER_RO, USER_QUAL, USER_NONE); + + AccessTestAction checkTableRead = new AccessTestAction() { + @Override + public Void run() throws Exception { + checkTablePerms(TEST_UTIL, TEST_TABLE.getTableName(), null, null, + Permission.Action.READ); + return null; + } + }; + + verifyAllowed(checkTableRead, SUPERUSER, USER_ADMIN, USER_OWNER, USER_CREATE); + verifyDenied(checkTableRead, USER_RW, USER_RO, USER_QUAL, USER_NONE); + + AccessTestAction checkTableReadWrite = new AccessTestAction() { + @Override + public Void run() throws Exception { + checkTablePerms(TEST_UTIL, TEST_TABLE.getTableName(), null, null, + Permission.Action.READ, Permission.Action.WRITE); + return null; + } + }; + + verifyAllowed(checkTableReadWrite, SUPERUSER, USER_ADMIN, USER_OWNER, USER_CREATE); + verifyDenied(checkTableReadWrite, USER_RW, USER_RO, USER_QUAL, USER_NONE); + + AccessTestAction checkColumnRead = new AccessTestAction() { + @Override + public Void run() throws Exception { + checkTablePerms(TEST_UTIL, TEST_TABLE.getTableName(), TEST_FAMILY, null, + Permission.Action.READ); + return null; + } + }; + + verifyAllowed(checkColumnRead, SUPERUSER, USER_ADMIN, USER_OWNER, USER_CREATE, USER_RW, + USER_RO); + verifyDenied(checkColumnRead, USER_QUAL, USER_NONE); + + AccessTestAction checkColumnReadWrite = new AccessTestAction() { + @Override + public Void run() throws Exception { + checkTablePerms(TEST_UTIL, TEST_TABLE.getTableName(), TEST_FAMILY, null, + Permission.Action.READ, Permission.Action.WRITE); + return null; + } + }; + + verifyAllowed(checkColumnReadWrite, SUPERUSER, USER_ADMIN, USER_OWNER, USER_CREATE, + USER_RW); + verifyDenied(checkColumnReadWrite, USER_RO, USER_QUAL, USER_NONE); + + AccessTestAction checkQualifierRead = new AccessTestAction() { + @Override + public Void run() throws Exception { + checkTablePerms(TEST_UTIL, TEST_TABLE.getTableName(), TEST_FAMILY, TEST_Q1, + Permission.Action.READ); + return null; + } + }; + + verifyAllowed(checkQualifierRead, SUPERUSER, USER_ADMIN, USER_OWNER, USER_CREATE, USER_RW, + USER_RO, USER_QUAL); + verifyDenied(checkQualifierRead, USER_NONE); + + AccessTestAction checkQualifierReadWrite = new AccessTestAction() { + @Override + public Void run() throws Exception { + checkTablePerms(TEST_UTIL, TEST_TABLE.getTableName(), TEST_FAMILY, TEST_Q1, + Permission.Action.READ, Permission.Action.WRITE); + return null; + } + }; + + verifyAllowed(checkQualifierReadWrite, SUPERUSER, USER_ADMIN, USER_OWNER, USER_CREATE, + USER_RW, USER_QUAL); + verifyDenied(checkQualifierReadWrite, USER_RO, USER_NONE); + + AccessTestAction checkMultiQualifierRead = new AccessTestAction() { + @Override + public Void run() throws Exception { + checkTablePerms(TEST_UTIL, TEST_TABLE.getTableName(), new Permission[] { + new TablePermission(TEST_TABLE.getTableName(), TEST_FAMILY, TEST_Q1, + Permission.Action.READ), + new TablePermission(TEST_TABLE.getTableName(), TEST_FAMILY, TEST_Q2, + Permission.Action.READ), }); + return null; + } + }; + + verifyAllowed(checkMultiQualifierRead, SUPERUSER, USER_ADMIN, USER_OWNER, USER_CREATE, + USER_RW, USER_RO); + verifyDenied(checkMultiQualifierRead, USER_QUAL, USER_NONE); + + AccessTestAction checkMultiQualifierReadWrite = new AccessTestAction() { + @Override + public Void run() throws Exception { + checkTablePerms(TEST_UTIL, TEST_TABLE.getTableName(), new Permission[] { + new TablePermission(TEST_TABLE.getTableName(), TEST_FAMILY, TEST_Q1, + Permission.Action.READ, Permission.Action.WRITE), + new TablePermission(TEST_TABLE.getTableName(), TEST_FAMILY, TEST_Q2, + Permission.Action.READ, Permission.Action.WRITE), }); + return null; + } + }; + + verifyAllowed(checkMultiQualifierReadWrite, SUPERUSER, USER_ADMIN, USER_OWNER, USER_CREATE, + USER_RW); + verifyDenied(checkMultiQualifierReadWrite, USER_RO, USER_QUAL, USER_NONE); + } + + /** Test grants and revocations with authorization disabled */ + @Test + public void testPassiveGrantRevoke() throws Exception { + + // Add a test user + + User tblUser = User.createUserForTesting(TEST_UTIL.getConfiguration(), "tbluser", + new String[0]); + + // If we check now, the test user won't have permissions + + AccessTestAction checkTableRead = new AccessTestAction() { + @Override + public Void run() throws Exception { + checkTablePerms(TEST_UTIL, TEST_TABLE.getTableName(), TEST_FAMILY, null, + Permission.Action.READ); + return null; + } + }; + + verifyDenied(tblUser, checkTableRead); + + // An actual read won't be denied + + AccessTestAction tableRead = new AccessTestAction() { + @Override + public Void run() throws Exception { + try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); + Table t = conn.getTable(TEST_TABLE.getTableName())) { + t.get(new Get(TEST_ROW).addFamily(TEST_FAMILY)); + } + return null; + } + }; + + verifyAllowed(tblUser, tableRead); + + // Grant read perms to the test user + + grantOnTable(TEST_UTIL, tblUser.getShortName(), TEST_TABLE.getTableName(), TEST_FAMILY, + null, Permission.Action.READ); + + // Now both the permission check and actual op will succeed + + verifyAllowed(tblUser, checkTableRead); + verifyAllowed(tblUser, tableRead); + + // Revoke read perms from the test user + + revokeFromTable(TEST_UTIL, tblUser.getShortName(), TEST_TABLE.getTableName(), TEST_FAMILY, + null, Permission.Action.READ); + + // Now the permission check will indicate revocation but the actual op will still succeed + + verifyDenied(tblUser, checkTableRead); + verifyAllowed(tblUser, tableRead); + } + + /** Test master observer */ + @Test + public void testPassiveMasterOperations() throws Exception { + + // preCreateTable + verifyAllowed(new AccessTestAction() { + @Override + public Object run() throws Exception { + HTableDescriptor htd = new HTableDescriptor(TEST_TABLE.getTableName()); + htd.addFamily(new HColumnDescriptor(TEST_FAMILY)); + ACCESS_CONTROLLER.preCreateTable(ObserverContext.createAndPrepare(CP_ENV, null), htd, + null); + return null; + } + }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); + + // preModifyTable + verifyAllowed(new AccessTestAction() { + @Override + public Object run() throws Exception { + HTableDescriptor htd = new HTableDescriptor(TEST_TABLE.getTableName()); + htd.addFamily(new HColumnDescriptor(TEST_FAMILY)); + htd.addFamily(new HColumnDescriptor(TEST_FAMILY2)); + ACCESS_CONTROLLER.preModifyTable(ObserverContext.createAndPrepare(CP_ENV, null), + TEST_TABLE.getTableName(), htd); + return null; + } + }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); + + // preDeleteTable + verifyAllowed(new AccessTestAction() { + @Override + public Object run() throws Exception { + ACCESS_CONTROLLER.preDeleteTable(ObserverContext.createAndPrepare(CP_ENV, null), + TEST_TABLE.getTableName()); + return null; + } + }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); + + // preTruncateTable + verifyAllowed(new AccessTestAction() { + @Override + public Object run() throws Exception { + ACCESS_CONTROLLER.preTruncateTable(ObserverContext.createAndPrepare(CP_ENV, null), + TEST_TABLE.getTableName()); + return null; + } + }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); + + // preAddColumn + verifyAllowed(new AccessTestAction() { + @Override + public Object run() throws Exception { + HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAMILY2); + ACCESS_CONTROLLER.preAddColumn(ObserverContext.createAndPrepare(CP_ENV, null), + TEST_TABLE.getTableName(), hcd); + return null; + } + }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); + + // preModifyColumn + verifyAllowed(new AccessTestAction() { + @Override + public Object run() throws Exception { + HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAMILY2); + ACCESS_CONTROLLER.preModifyColumn(ObserverContext.createAndPrepare(CP_ENV, null), + TEST_TABLE.getTableName(), hcd); + return null; + } + }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); + + // preDeleteColumn + verifyAllowed(new AccessTestAction() { + @Override + public Object run() throws Exception { + ACCESS_CONTROLLER.preDeleteColumn(ObserverContext.createAndPrepare(CP_ENV, null), + TEST_TABLE.getTableName(), TEST_FAMILY2); + return null; + } + }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); + + // preEnableTable + verifyAllowed(new AccessTestAction() { + @Override + public Object run() throws Exception { + ACCESS_CONTROLLER.preEnableTable(ObserverContext.createAndPrepare(CP_ENV, null), + TEST_TABLE.getTableName()); + return null; + } + }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); + + // preDisableTable + verifyAllowed(new AccessTestAction() { + @Override + public Object run() throws Exception { + ACCESS_CONTROLLER.preDisableTable(ObserverContext.createAndPrepare(CP_ENV, null), + TEST_TABLE.getTableName()); + return null; + } + }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); + + // preMove + verifyAllowed(new AccessTestAction() { + @Override + public Object run() throws Exception { + HRegionInfo region = new HRegionInfo(TEST_TABLE.getTableName()); + ServerName srcServer = ServerName.valueOf("1.1.1.1", 1, 0); + ServerName destServer = ServerName.valueOf("2.2.2.2", 2, 0); + ACCESS_CONTROLLER.preMove(ObserverContext.createAndPrepare(CP_ENV, null), region, + srcServer, destServer); + return null; + } + }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); + + // preAssign + verifyAllowed(new AccessTestAction() { + @Override + public Object run() throws Exception { + HRegionInfo region = new HRegionInfo(TEST_TABLE.getTableName()); + ACCESS_CONTROLLER.preAssign(ObserverContext.createAndPrepare(CP_ENV, null), region); + return null; + } + }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); + + // preUnassign + verifyAllowed(new AccessTestAction() { + @Override + public Object run() throws Exception { + HRegionInfo region = new HRegionInfo(TEST_TABLE.getTableName()); + ACCESS_CONTROLLER.preUnassign(ObserverContext.createAndPrepare(CP_ENV, null), region, + true); + return null; + } + }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); + + // preBalance + verifyAllowed(new AccessTestAction() { + @Override + public Object run() throws Exception { + ACCESS_CONTROLLER.preBalance(ObserverContext.createAndPrepare(CP_ENV, null)); + return null; + } + }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); + + // preBalanceSwitch + verifyAllowed(new AccessTestAction() { + @Override + public Object run() throws Exception { + ACCESS_CONTROLLER.preBalanceSwitch(ObserverContext.createAndPrepare(CP_ENV, null), + true); + return null; + } + }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); + + // preSnapshot + verifyAllowed(new AccessTestAction() { + @Override + public Object run() throws Exception { + SnapshotDescription snapshot = SnapshotDescription.newBuilder() + .setName("foo") + .build(); + HTableDescriptor htd = new HTableDescriptor(TEST_TABLE.getTableName()); + ACCESS_CONTROLLER.preSnapshot(ObserverContext.createAndPrepare(CP_ENV, null), + snapshot, htd); + return null; + } + }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); + + // preListSnapshot + verifyAllowed(new AccessTestAction() { + @Override + public Object run() throws Exception { + SnapshotDescription snapshot = SnapshotDescription.newBuilder() + .setName("foo") + .build(); + ACCESS_CONTROLLER.preListSnapshot(ObserverContext.createAndPrepare(CP_ENV, null), + snapshot); + return null; + } + }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); + + // preCloneSnapshot + verifyAllowed(new AccessTestAction() { + @Override + public Object run() throws Exception { + SnapshotDescription snapshot = SnapshotDescription.newBuilder() + .setName("foo") + .build(); + HTableDescriptor htd = new HTableDescriptor(TEST_TABLE.getTableName()); + ACCESS_CONTROLLER.preCloneSnapshot(ObserverContext.createAndPrepare(CP_ENV, null), + snapshot, htd); + return null; + } + }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); + + // preRestoreSnapshot + verifyAllowed(new AccessTestAction() { + @Override + public Object run() throws Exception { + SnapshotDescription snapshot = SnapshotDescription.newBuilder() + .setName("foo") + .build(); + HTableDescriptor htd = new HTableDescriptor(TEST_TABLE.getTableName()); + ACCESS_CONTROLLER.preRestoreSnapshot(ObserverContext.createAndPrepare(CP_ENV, null), + snapshot, htd); + return null; + } + }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); + + // preDeleteSnapshot + verifyAllowed(new AccessTestAction() { + @Override + public Object run() throws Exception { + SnapshotDescription snapshot = SnapshotDescription.newBuilder() + .setName("foo") + .build(); + ACCESS_CONTROLLER.preDeleteSnapshot(ObserverContext.createAndPrepare(CP_ENV, null), + snapshot); + return null; + } + }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); + + // preGetTableDescriptors + verifyAllowed(new AccessTestAction() { + @Override + public Object run() throws Exception { + List tableNamesList = Lists.newArrayList(); + tableNamesList.add(TEST_TABLE.getTableName()); + List descriptors = Lists.newArrayList(); + ACCESS_CONTROLLER.preGetTableDescriptors(ObserverContext.createAndPrepare(CP_ENV, null), + tableNamesList, descriptors, ".+"); + return null; + } + }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); + + // preGetTableNames + verifyAllowed(new AccessTestAction() { + @Override + public Object run() throws Exception { + List descriptors = Lists.newArrayList(); + ACCESS_CONTROLLER.preGetTableNames(ObserverContext.createAndPrepare(CP_ENV, null), + descriptors, ".+"); + return null; + } + }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); + + // preCreateNamespace + verifyAllowed(new AccessTestAction() { + @Override + public Object run() throws Exception { + NamespaceDescriptor ns = NamespaceDescriptor.create("test").build(); + ACCESS_CONTROLLER.preCreateNamespace(ObserverContext.createAndPrepare(CP_ENV, null), + ns); + return null; + } + }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); + + // preDeleteNamespace + verifyAllowed(new AccessTestAction() { + @Override + public Object run() throws Exception { + ACCESS_CONTROLLER.preDeleteNamespace(ObserverContext.createAndPrepare(CP_ENV, null), + "test"); + return null; + } + }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); + + // preModifyNamespace + verifyAllowed(new AccessTestAction() { + @Override + public Object run() throws Exception { + NamespaceDescriptor ns = NamespaceDescriptor.create("test").build(); + ACCESS_CONTROLLER.preModifyNamespace(ObserverContext.createAndPrepare(CP_ENV, null), + ns); + return null; + } + }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); + + // preGetNamespaceDescriptor + verifyAllowed(new AccessTestAction() { + @Override + public Object run() throws Exception { + ACCESS_CONTROLLER.preGetNamespaceDescriptor(ObserverContext.createAndPrepare(CP_ENV, + null), + "test"); + return null; + } + }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); + + // preListNamespaceDescriptors + verifyAllowed(new AccessTestAction() { + @Override + public Object run() throws Exception { + List descriptors = Lists.newArrayList(); + ACCESS_CONTROLLER.preListNamespaceDescriptors(ObserverContext.createAndPrepare(CP_ENV, + null), + descriptors); + return null; + } + }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); + + // preSetUserQuota + verifyAllowed(new AccessTestAction() { + @Override + public Object run() throws Exception { + Quotas quotas = Quotas.newBuilder().build(); + ACCESS_CONTROLLER.preSetUserQuota(ObserverContext.createAndPrepare(CP_ENV, null), + "testuser", quotas); + return null; + } + }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); + + // preSetTableQuota + verifyAllowed(new AccessTestAction() { + @Override + public Object run() throws Exception { + Quotas quotas = Quotas.newBuilder().build(); + ACCESS_CONTROLLER.preSetTableQuota(ObserverContext.createAndPrepare(CP_ENV, null), + TEST_TABLE.getTableName(), quotas); + return null; + } + }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); + + // preSetNamespaceQuota + verifyAllowed(new AccessTestAction() { + @Override + public Object run() throws Exception { + Quotas quotas = Quotas.newBuilder().build(); + ACCESS_CONTROLLER.preSetNamespaceQuota(ObserverContext.createAndPrepare(CP_ENV, null), + "test", quotas); + return null; + } + }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); + + } + + /** Test region server observer */ + @Test + public void testPassiveRegionServerOperations() throws Exception { + // preStopRegionServer + verifyAllowed(new AccessTestAction() { + @Override + public Object run() throws Exception { + ACCESS_CONTROLLER.preStopRegionServer(ObserverContext.createAndPrepare(RSCP_ENV, null)); + return null; + } + }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); + + // preMerge + verifyAllowed(new AccessTestAction() { + @Override + public Object run() throws Exception { + HTableDescriptor htd = new HTableDescriptor(TEST_TABLE.getTableName()); + Region region_a = mock(Region.class); + when(region_a.getTableDesc()).thenReturn(htd); + Region region_b = mock(Region.class); + when(region_b.getTableDesc()).thenReturn(htd); + ACCESS_CONTROLLER.preMerge(ObserverContext.createAndPrepare(RSCP_ENV, null), region_a, + region_b); + return null; + } + }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); + + // preRollWALWriterRequest + verifyAllowed(new AccessTestAction() { + @Override + public Object run() throws Exception { + ACCESS_CONTROLLER.preRollWALWriterRequest(ObserverContext.createAndPrepare(RSCP_ENV, + null)); + return null; + } + }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); + + } + + /** Test region observer */ + @Test + public void testPassiveRegionOperations() throws Exception { + + // preOpen + verifyAllowed(new AccessTestAction() { + @Override + public Object run() throws Exception { + ACCESS_CONTROLLER.preOpen(ObserverContext.createAndPrepare(RCP_ENV, null)); + return null; + } + }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); + + // preFlush + verifyAllowed(new AccessTestAction() { + @Override + public Object run() throws Exception { + ACCESS_CONTROLLER.preFlush(ObserverContext.createAndPrepare(RCP_ENV, null)); + return null; + } + }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); + + // preSplit + verifyAllowed(new AccessTestAction() { + @Override + public Object run() throws Exception { + ACCESS_CONTROLLER.preSplit(ObserverContext.createAndPrepare(RCP_ENV, null)); + return null; + } + }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); + + // preGetClosestRowBefore + verifyAllowed(new AccessTestAction() { + @Override + public Object run() throws Exception { + ACCESS_CONTROLLER.preGetClosestRowBefore(ObserverContext.createAndPrepare(RCP_ENV, null), + TEST_ROW, TEST_FAMILY, new Result()); + return null; + } + }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); + + // preGetOp + verifyAllowed(new AccessTestAction() { + @Override + public Object run() throws Exception { + List cells = Lists.newArrayList(); + ACCESS_CONTROLLER.preGetOp(ObserverContext.createAndPrepare(RCP_ENV, null), + new Get(TEST_ROW), cells); + return null; + } + }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); + + // preExists + verifyAllowed(new AccessTestAction() { + @Override + public Object run() throws Exception { + ACCESS_CONTROLLER.preExists(ObserverContext.createAndPrepare(RCP_ENV, null), + new Get(TEST_ROW), true); + return null; + } + }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); + + // prePut + verifyAllowed(new AccessTestAction() { + @Override + public Object run() throws Exception { + ACCESS_CONTROLLER.prePut(ObserverContext.createAndPrepare(RCP_ENV, null), + new Put(TEST_ROW), new WALEdit(), Durability.USE_DEFAULT); + return null; + } + }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); + + // preDelete + verifyAllowed(new AccessTestAction() { + @Override + public Object run() throws Exception { + ACCESS_CONTROLLER.preDelete(ObserverContext.createAndPrepare(RCP_ENV, null), + new Delete(TEST_ROW), new WALEdit(), Durability.USE_DEFAULT); + return null; + } + }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); + + // preBatchMutate + verifyAllowed(new AccessTestAction() { + @Override + public Object run() throws Exception { + ACCESS_CONTROLLER.preBatchMutate(ObserverContext.createAndPrepare(RCP_ENV, null), + new MiniBatchOperationInProgress(null, null, null, 0, 0)); + return null; + } + }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); + + // preCheckAndPut + verifyAllowed(new AccessTestAction() { + @Override + public Object run() throws Exception { + ACCESS_CONTROLLER.preCheckAndPut(ObserverContext.createAndPrepare(RCP_ENV, null), + TEST_ROW, TEST_FAMILY, TEST_Q1, CompareFilter.CompareOp.EQUAL, + new BinaryComparator("foo".getBytes()), new Put(TEST_ROW), true); + return null; + } + }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); + + // preCheckAndDelete + verifyAllowed(new AccessTestAction() { + @Override + public Object run() throws Exception { + ACCESS_CONTROLLER.preCheckAndDelete(ObserverContext.createAndPrepare(RCP_ENV, null), + TEST_ROW, TEST_FAMILY, TEST_Q1, CompareFilter.CompareOp.EQUAL, + new BinaryComparator("foo".getBytes()), new Delete(TEST_ROW), true); + return null; + } + }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); + + // preAppend + verifyAllowed(new AccessTestAction() { + @Override + public Object run() throws Exception { + ACCESS_CONTROLLER.preAppend(ObserverContext.createAndPrepare(RCP_ENV, null), + new Append(TEST_ROW)); + return null; + } + }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); + + // preIncrement + verifyAllowed(new AccessTestAction() { + @Override + public Object run() throws Exception { + ACCESS_CONTROLLER.preIncrement(ObserverContext.createAndPrepare(RCP_ENV, null), + new Increment(TEST_ROW)); + return null; + } + }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); + + // preScannerOpen + verifyAllowed(new AccessTestAction() { + @Override + public Object run() throws Exception { + ACCESS_CONTROLLER.preScannerOpen(ObserverContext.createAndPrepare(RCP_ENV, null), + new Scan(), mock(RegionScanner.class)); + return null; + } + }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); + + // preBulkLoadHFile + verifyAllowed(new AccessTestAction() { + @Override + public Object run() throws Exception { + List> paths = Lists.newArrayList(); + ACCESS_CONTROLLER.preBulkLoadHFile(ObserverContext.createAndPrepare(RCP_ENV, null), + paths); + return null; + } + }, SUPERUSER, USER_ADMIN, USER_RW, USER_RO, USER_OWNER, USER_CREATE, USER_QUAL, USER_NONE); + + } + + @Test + public void testPassiveCellPermissions() throws Exception { + final Configuration conf = TEST_UTIL.getConfiguration(); + + // store two sets of values, one store with a cell level ACL, and one without + verifyAllowed(new AccessTestAction() { + @Override + public Object run() throws Exception { + try(Connection connection = ConnectionFactory.createConnection(conf); + Table t = connection.getTable(TEST_TABLE.getTableName())) { + Put p; + // with ro ACL + p = new Put(TEST_ROW).add(TEST_FAMILY, TEST_Q1, ZERO); + p.setACL(USER_NONE.getShortName(), new Permission(Action.READ)); + t.put(p); + // with rw ACL + p = new Put(TEST_ROW).add(TEST_FAMILY, TEST_Q2, ZERO); + p.setACL(USER_NONE.getShortName(), new Permission(Action.READ, Action.WRITE)); + t.put(p); + // no ACL + p = new Put(TEST_ROW) + .add(TEST_FAMILY, TEST_Q3, ZERO) + .add(TEST_FAMILY, TEST_Q4, ZERO); + t.put(p); + } + return null; + } + }, USER_OWNER); + + // check that a scan over the test data returns the expected number of KVs + + final List scanResults = Lists.newArrayList(); + + AccessTestAction scanAction = new AccessTestAction() { + @Override + public List run() throws Exception { + Scan scan = new Scan(); + scan.setStartRow(TEST_ROW); + scan.setStopRow(Bytes.add(TEST_ROW, new byte[]{ 0 } )); + scan.addFamily(TEST_FAMILY); + Connection connection = ConnectionFactory.createConnection(conf); + Table t = connection.getTable(TEST_TABLE.getTableName()); + try { + ResultScanner scanner = t.getScanner(scan); + Result result = null; + do { + result = scanner.next(); + if (result != null) { + scanResults.addAll(result.listCells()); + } + } while (result != null); + } finally { + t.close(); + connection.close(); + } + return scanResults; + } + }; + + // owner will see all values + scanResults.clear(); + verifyAllowed(scanAction, USER_OWNER); + assertEquals(4, scanResults.size()); + + // other user will also see 4 values + // if cell filtering was active, we would only see 2 values + scanResults.clear(); + verifyAllowed(scanAction, USER_NONE); + assertEquals(4, scanResults.size()); + } + +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestGenerateDelegationToken.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestGenerateDelegationToken.java new file mode 100644 index 00000000000..88db52e151f --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestGenerateDelegationToken.java @@ -0,0 +1,173 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.security.token; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.io.IOException; +import java.util.Properties; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.LocalHBaseCluster; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; +import org.apache.hadoop.hbase.http.ssl.KeyStoreTestUtil; +import org.apache.hadoop.hbase.ipc.AsyncRpcClient; +import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; +import org.apache.hadoop.hbase.ipc.RpcClient; +import org.apache.hadoop.hbase.ipc.RpcClientFactory; +import org.apache.hadoop.hbase.ipc.RpcClientImpl; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos; +import org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos.GetAuthenticationTokenRequest; +import org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos.WhoAmIRequest; +import org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos.WhoAmIResponse; +import org.apache.hadoop.hbase.security.AccessDeniedException; +import org.apache.hadoop.hbase.security.HBaseKerberosUtils; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.SecurityTests; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.http.HttpConfig; +import org.apache.hadoop.minikdc.MiniKdc; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; +import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.security.token.TokenIdentifier; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import com.google.protobuf.ServiceException; + +@Category({ SecurityTests.class, MediumTests.class }) +public class TestGenerateDelegationToken { + + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + + private static LocalHBaseCluster CLUSTER; + + private static final File KEYTAB_FILE = new File(TEST_UTIL.getDataTestDir("keytab").toUri() + .getPath()); + private static MiniKdc KDC; + + private static String HOST = "localhost"; + + private static String USERNAME; + + private static String PRINCIPAL; + + private static String HTTP_PRINCIPAL; + + private static void setHdfsSecuredConfiguration(Configuration conf) throws Exception { + // change XXX_USER_NAME_KEY to XXX_KERBEROS_PRINCIPAL_KEY after we drop support for hadoop-2.4.1 + conf.set(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY, PRINCIPAL + "@" + KDC.getRealm()); + conf.set(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY, KEYTAB_FILE.getAbsolutePath()); + conf.set(DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY, PRINCIPAL + "@" + KDC.getRealm()); + conf.set(DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY, KEYTAB_FILE.getAbsolutePath()); + conf.set(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, HTTP_PRINCIPAL + "@" + + KDC.getRealm()); + conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true); + conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name()); + conf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0"); + conf.set(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0"); + + File keystoresDir = new File(TEST_UTIL.getDataTestDir("keystore").toUri().getPath()); + keystoresDir.mkdirs(); + String sslConfDir = KeyStoreTestUtil.getClasspathDir(TestGenerateDelegationToken.class); + KeyStoreTestUtil.setupSSLConfig(keystoresDir.getAbsolutePath(), sslConfDir, conf, false); + + conf.setBoolean("ignore.secure.ports.for.testing", true); + } + + @BeforeClass + public static void setUp() throws Exception { + Properties conf = MiniKdc.createConf(); + conf.put(MiniKdc.DEBUG, true); + KDC = new MiniKdc(conf, new File(TEST_UTIL.getDataTestDir("kdc").toUri().getPath())); + KDC.start(); + USERNAME = UserGroupInformation.getLoginUser().getShortUserName(); + PRINCIPAL = USERNAME + "/" + HOST; + HTTP_PRINCIPAL = "HTTP/" + HOST; + KDC.createPrincipal(KEYTAB_FILE, PRINCIPAL, HTTP_PRINCIPAL); + TEST_UTIL.startMiniZKCluster(); + + HBaseKerberosUtils.setKeytabFileForTesting(KEYTAB_FILE.getAbsolutePath()); + HBaseKerberosUtils.setPrincipalForTesting(PRINCIPAL + "@" + KDC.getRealm()); + HBaseKerberosUtils.setSecuredConfiguration(TEST_UTIL.getConfiguration()); + setHdfsSecuredConfiguration(TEST_UTIL.getConfiguration()); + UserGroupInformation.setConfiguration(TEST_UTIL.getConfiguration()); + TEST_UTIL.getConfiguration().setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, + TokenProvider.class.getName()); + TEST_UTIL.startMiniDFSCluster(1); + CLUSTER = new LocalHBaseCluster(TEST_UTIL.getConfiguration(), 1); + CLUSTER.startup(); + } + + @AfterClass + public static void tearDown() throws Exception { + if (CLUSTER != null) { + CLUSTER.shutdown(); + } + CLUSTER.join(); + if (KDC != null) { + KDC.stop(); + } + TEST_UTIL.shutdownMiniCluster(); + } + + private void testTokenAuth(Class rpcImplClass) throws IOException, + ServiceException { + TEST_UTIL.getConfiguration().set(RpcClientFactory.CUSTOM_RPC_CLIENT_IMPL_CONF_KEY, + rpcImplClass.getName()); + try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration()); + Table table = conn.getTable(TableName.META_TABLE_NAME)) { + CoprocessorRpcChannel rpcChannel = table.coprocessorService(HConstants.EMPTY_START_ROW); + AuthenticationProtos.AuthenticationService.BlockingInterface service = + AuthenticationProtos.AuthenticationService.newBlockingStub(rpcChannel); + WhoAmIResponse response = service.whoAmI(null, WhoAmIRequest.getDefaultInstance()); + assertEquals(USERNAME, response.getUsername()); + assertEquals(AuthenticationMethod.TOKEN.name(), response.getAuthMethod()); + try { + service.getAuthenticationToken(null, GetAuthenticationTokenRequest.getDefaultInstance()); + } catch (ServiceException e) { + AccessDeniedException exc = (AccessDeniedException) ProtobufUtil.getRemoteException(e); + assertTrue(exc.getMessage().contains( + "Token generation only allowed for Kerberos authenticated clients")); + } + } + } + + @Test + public void test() throws Exception { + try (Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration())) { + Token token = TokenUtil.obtainToken(conn); + UserGroupInformation.getCurrentUser().addToken(token); + testTokenAuth(RpcClientImpl.class); + testTokenAuth(AsyncRpcClient.class); + } + + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java index 3bd20b2b9e9..6bb2765217c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java @@ -51,7 +51,6 @@ import org.apache.hadoop.hbase.client.HTableInterface; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.ipc.BlockingRpcCallback; import org.apache.hadoop.hbase.ipc.FifoRpcScheduler; -import org.apache.hadoop.hbase.ipc.RequestContext; import org.apache.hadoop.hbase.ipc.RpcClient; import org.apache.hadoop.hbase.ipc.RpcClientFactory; import org.apache.hadoop.hbase.ipc.RpcServer; @@ -295,7 +294,7 @@ public class TestTokenAuthentication { public AuthenticationProtos.GetAuthenticationTokenResponse getAuthenticationToken( RpcController controller, AuthenticationProtos.GetAuthenticationTokenRequest request) throws ServiceException { - LOG.debug("Authentication token request from "+RequestContext.getRequestUserName()); + LOG.debug("Authentication token request from " + RpcServer.getRequestUserName()); // ignore passed in controller -- it's always null ServerRpcController serverController = new ServerRpcController(); BlockingRpcCallback callback = @@ -313,7 +312,7 @@ public class TestTokenAuthentication { public AuthenticationProtos.WhoAmIResponse whoAmI( RpcController controller, AuthenticationProtos.WhoAmIRequest request) throws ServiceException { - LOG.debug("whoAmI() request from "+RequestContext.getRequestUserName()); + LOG.debug("whoAmI() request from " + RpcServer.getRequestUserName()); // ignore passed in controller -- it's always null ServerRpcController serverController = new ServerRpcController(); BlockingRpcCallback callback = diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/ExpAsStringVisibilityLabelServiceImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/ExpAsStringVisibilityLabelServiceImpl.java index 6f46fd37741..63fe4180f1e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/ExpAsStringVisibilityLabelServiceImpl.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/ExpAsStringVisibilityLabelServiceImpl.java @@ -45,13 +45,12 @@ import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; -import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.OperationStatus; +import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.access.AccessControlLists; import org.apache.hadoop.hbase.security.visibility.expression.ExpressionNode; @@ -81,7 +80,7 @@ public class ExpAsStringVisibilityLabelServiceImpl implements VisibilityLabelSer private final ExpressionParser expressionParser = new ExpressionParser(); private final ExpressionExpander expressionExpander = new ExpressionExpander(); private Configuration conf; - private HRegion labelsRegion; + private Region labelsRegion; private List scanLabelGenerators; private List superUsers; private List superGroups; @@ -195,13 +194,16 @@ public class ExpAsStringVisibilityLabelServiceImpl implements VisibilityLabelSer List cells = null; if (labelsRegion == null) { Table table = null; + Connection connection = null; try { - table = new HTable(conf, VisibilityConstants.LABELS_TABLE_NAME); + connection = ConnectionFactory.createConnection(conf); + table = connection.getTable(VisibilityConstants.LABELS_TABLE_NAME); Result result = table.get(get); cells = result.listCells(); } finally { if (table != null) { table.close(); + connection.close(); } } } else { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestDefaultScanLabelGeneratorStack.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestDefaultScanLabelGeneratorStack.java index 061db74c735..2cd5ff9c44d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestDefaultScanLabelGeneratorStack.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestDefaultScanLabelGeneratorStack.java @@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.security.visibility; import static org.apache.hadoop.hbase.security.visibility.VisibilityConstants.LABELS_TABLE_NAME; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; -import static org.junit.Assert.assertNull; import java.io.IOException; import java.security.PrivilegedExceptionAction; @@ -33,8 +32,6 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; -import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; @@ -106,9 +103,8 @@ public class TestDefaultScanLabelGeneratorStack { SUPERUSER.runAs(new PrivilegedExceptionAction() { public Void run() throws Exception { - Connection connection = ConnectionFactory.createConnection(conf); - Table table = TEST_UTIL.createTable(tableName, CF); - try { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = TEST_UTIL.createTable(tableName, CF)) { Put put = new Put(ROW_1); put.add(CF, Q1, HConstants.LATEST_TIMESTAMP, value1); put.setCellVisibility(new CellVisibility(SECRET)); @@ -121,9 +117,6 @@ public class TestDefaultScanLabelGeneratorStack { put.add(CF, Q3, HConstants.LATEST_TIMESTAMP, value3); table.put(put); return null; - } finally { - table.close(); - connection.close(); } } }); @@ -131,9 +124,8 @@ public class TestDefaultScanLabelGeneratorStack { // Test that super user can see all the cells. SUPERUSER.runAs(new PrivilegedExceptionAction() { public Void run() throws Exception { - Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName); - try { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { Scan s = new Scan(); ResultScanner scanner = table.getScanner(s); Result[] next = scanner.next(1); @@ -161,18 +153,14 @@ public class TestDefaultScanLabelGeneratorStack { assertTrue(Bytes.equals(current.getValue(), value3)); return null; - } finally { - table.close(); - connection.close(); } } }); TESTUSER.runAs(new PrivilegedExceptionAction() { public Void run() throws Exception { - Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName); - try { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { // Test scan with no auth attribute Scan s = new Scan(); ResultScanner scanner = table.getScanner(s); @@ -239,9 +227,6 @@ public class TestDefaultScanLabelGeneratorStack { assertFalse(cellScanner2.advance()); return null; - } finally { - table.close(); - connection.close(); } } }); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestEnforcingScanLabelGenerator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestEnforcingScanLabelGenerator.java index df165bd1071..2fa8afd2395 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestEnforcingScanLabelGenerator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestEnforcingScanLabelGenerator.java @@ -101,8 +101,8 @@ public class TestEnforcingScanLabelGenerator { SUPERUSER.runAs(new PrivilegedExceptionAction() { public Void run() throws Exception { - Table table = TEST_UTIL.createTable(tableName, CF); - try { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = TEST_UTIL.createTable(tableName, CF)) { Put put = new Put(ROW_1); put.add(CF, Q1, HConstants.LATEST_TIMESTAMP, value); put.setCellVisibility(new CellVisibility(SECRET)); @@ -115,8 +115,6 @@ public class TestEnforcingScanLabelGenerator { put.add(CF, Q3, HConstants.LATEST_TIMESTAMP, value); table.put(put); return null; - } finally { - table.close(); } } }); @@ -124,9 +122,8 @@ public class TestEnforcingScanLabelGenerator { // Test that super user can see all the cells. SUPERUSER.runAs(new PrivilegedExceptionAction() { public Void run() throws Exception { - Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName); - try { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { // Test that super user can see all the cells. Get get = new Get(ROW_1); Result result = table.get(get); @@ -134,18 +131,14 @@ public class TestEnforcingScanLabelGenerator { assertTrue("Missing authorization", result.containsColumn(CF, Q2)); assertTrue("Missing authorization", result.containsColumn(CF, Q3)); return null; - } finally { - table.close(); - connection.close(); } } }); TESTUSER.runAs(new PrivilegedExceptionAction() { public Void run() throws Exception { - Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName); - try { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { // Test that we enforce the defined set Get get = new Get(ROW_1); get.setAuthorizations(new Authorizations(new String[] { SECRET, CONFIDENTIAL })); @@ -160,9 +153,6 @@ public class TestEnforcingScanLabelGenerator { assertTrue("Missing authorization", result.containsColumn(CF, Q2)); assertTrue("Inappropriate filtering", result.containsColumn(CF, Q3)); return null; - } finally { - table.close(); - connection.close(); } } }); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelReplicationWithExpAsString.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelReplicationWithExpAsString.java index 01e22daab86..c1d4d01e8d9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelReplicationWithExpAsString.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelReplicationWithExpAsString.java @@ -36,13 +36,11 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.HBaseAdmin; -import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.replication.ReplicationAdmin; @@ -139,25 +137,15 @@ public class TestVisibilityLabelReplicationWithExpAsString extends TestVisibilit // Wait for the labels table to become available TEST_UTIL.waitTableEnabled(LABELS_TABLE_NAME.getName(), 50000); TEST_UTIL1.startMiniCluster(1); - HBaseAdmin hBaseAdmin = TEST_UTIL.getHBaseAdmin(); HTableDescriptor table = new HTableDescriptor(TABLE_NAME); HColumnDescriptor desc = new HColumnDescriptor(fam); desc.setScope(HConstants.REPLICATION_SCOPE_GLOBAL); table.addFamily(desc); - try { + try (HBaseAdmin hBaseAdmin = TEST_UTIL.getHBaseAdmin()) { hBaseAdmin.createTable(table); - } finally { - if (hBaseAdmin != null) { - hBaseAdmin.close(); - } } - HBaseAdmin hBaseAdmin1 = TEST_UTIL1.getHBaseAdmin(); - try { + try (HBaseAdmin hBaseAdmin1 = TEST_UTIL1.getHBaseAdmin()){ hBaseAdmin1.createTable(table); - } finally { - if (hBaseAdmin1 != null) { - hBaseAdmin1.close(); - } } addLabels(); setAuths(conf); @@ -174,13 +162,10 @@ public class TestVisibilityLabelReplicationWithExpAsString extends TestVisibilit final boolean nullExpected, final String... auths) throws IOException, InterruptedException { PrivilegedExceptionAction scanAction = new PrivilegedExceptionAction() { - Table table2 = null; public Void run() throws Exception { - Connection connection = null; - try { - connection = ConnectionFactory.createConnection(conf1); - table2 = connection.getTable(TABLE_NAME); + try (Connection connection = ConnectionFactory.createConnection(conf1); + Table table2 = connection.getTable(TABLE_NAME)) { CellScanner cellScanner; Cell current; Get get = new Get(row); @@ -206,13 +191,6 @@ public class TestVisibilityLabelReplicationWithExpAsString extends TestVisibilit doAssert(row, visString); assertTrue(foundNonVisTag); return null; - } finally { - if (table2 != null) { - table2.close(); - } - if(connection != null){ - connection.close(); - } } } }; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabels.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabels.java index dcd5fbb459d..3671386bf9f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabels.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabels.java @@ -57,8 +57,8 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionActionResul import org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.GetAuthsResponse; import org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsResponse; import org.apache.hadoop.hbase.regionserver.BloomType; -import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; @@ -114,9 +114,8 @@ public abstract class TestVisibilityLabels { @Test public void testSimpleVisibilityLabels() throws Exception { TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); - Table table = createTableAndWriteDataWithLabels(tableName, SECRET + "|" + CONFIDENTIAL, - PRIVATE + "|" + CONFIDENTIAL); - try { + try (Table table = createTableAndWriteDataWithLabels(tableName, SECRET + "|" + CONFIDENTIAL, + PRIVATE + "|" + CONFIDENTIAL)) { Scan s = new Scan(); s.setAuthorizations(new Authorizations(SECRET, CONFIDENTIAL, PRIVATE)); ResultScanner scanner = table.getScanner(s); @@ -133,21 +132,16 @@ public abstract class TestVisibilityLabels { current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row2, 0, row2.length)); - } finally { - if (table != null) { - table.close(); - } } } @Test public void testSimpleVisibilityLabelsWithUniCodeCharacters() throws Exception { TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); - Table table = createTableAndWriteDataWithLabels(tableName, - SECRET + "|" + CellVisibility.quote(COPYRIGHT), "(" + CellVisibility.quote(COPYRIGHT) + "&" - + CellVisibility.quote(ACCENT) + ")|" + CONFIDENTIAL, - CellVisibility.quote(UNICODE_VIS_TAG) + "&" + SECRET); - try { + try (Table table = createTableAndWriteDataWithLabels(tableName, + SECRET + "|" + CellVisibility.quote(COPYRIGHT), "(" + CellVisibility.quote(COPYRIGHT) + + "&" + CellVisibility.quote(ACCENT) + ")|" + CONFIDENTIAL, + CellVisibility.quote(UNICODE_VIS_TAG) + "&" + SECRET)) { Scan s = new Scan(); s.setAuthorizations(new Authorizations(SECRET, CONFIDENTIAL, PRIVATE, COPYRIGHT, ACCENT, UNICODE_VIS_TAG)); @@ -169,20 +163,15 @@ public abstract class TestVisibilityLabels { current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row3, 0, row3.length)); - } finally { - if (table != null) { - table.close(); - } } } @Test public void testAuthorizationsWithSpecialUnicodeCharacters() throws Exception { TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); - Table table = createTableAndWriteDataWithLabels(tableName, + try (Table table = createTableAndWriteDataWithLabels(tableName, CellVisibility.quote(UC1) + "|" + CellVisibility.quote(UC2), CellVisibility.quote(UC1), - CellVisibility.quote(UNICODE_VIS_TAG)); - try { + CellVisibility.quote(UNICODE_VIS_TAG))) { Scan s = new Scan(); s.setAuthorizations(new Authorizations(UC1, UC2, ACCENT, UNICODE_VIS_TAG)); @@ -204,21 +193,16 @@ public abstract class TestVisibilityLabels { current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row3, 0, row3.length)); - } finally { - if (table != null) { - table.close(); - } } } @Test public void testVisibilityLabelsWithComplexLabels() throws Exception { TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); - Table table = createTableAndWriteDataWithLabels(tableName, "(" + SECRET + "|" + CONFIDENTIAL - + ")" + "&" + "!" + TOPSECRET, "(" + PRIVATE + "&" + CONFIDENTIAL + "&" + SECRET + ")", "(" - + PRIVATE + "&" + CONFIDENTIAL + "&" + SECRET + ")", "(" + PRIVATE + "&" + CONFIDENTIAL - + "&" + SECRET + ")"); - try { + try (Table table = createTableAndWriteDataWithLabels(tableName, "(" + SECRET + "|" + + CONFIDENTIAL + ")" + "&" + "!" + TOPSECRET, "(" + PRIVATE + "&" + CONFIDENTIAL + "&" + + SECRET + ")", "(" + PRIVATE + "&" + CONFIDENTIAL + "&" + SECRET + ")", "(" + PRIVATE + + "&" + CONFIDENTIAL + "&" + SECRET + ")")) { Scan s = new Scan(); s.setAuthorizations(new Authorizations(TOPSECRET, CONFIDENTIAL, PRIVATE, PUBLIC, SECRET)); ResultScanner scanner = table.getScanner(s); @@ -239,28 +223,19 @@ public abstract class TestVisibilityLabels { current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row4, 0, row4.length)); - } finally { - if (table != null) { - table.close(); - } } } @Test public void testVisibilityLabelsThatDoesNotPassTheCriteria() throws Exception { TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); - Table table = createTableAndWriteDataWithLabels(tableName, "(" + SECRET + "|" + CONFIDENTIAL - + ")", PRIVATE); - try { + try (Table table = createTableAndWriteDataWithLabels(tableName, + "(" + SECRET + "|" + CONFIDENTIAL + ")", PRIVATE)){ Scan s = new Scan(); s.setAuthorizations(new Authorizations(PUBLIC)); ResultScanner scanner = table.getScanner(s); Result[] next = scanner.next(3); assertTrue(next.length == 0); - } finally { - if (table != null) { - table.close(); - } } } @@ -277,27 +252,21 @@ public abstract class TestVisibilityLabels { @Test public void testVisibilityLabelsInScanThatDoesNotMatchAnyDefinedLabels() throws Exception { TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); - Table table = createTableAndWriteDataWithLabels(tableName, "(" + SECRET + "|" + CONFIDENTIAL - + ")", PRIVATE); - try { + try ( Table table = createTableAndWriteDataWithLabels(tableName, "(" + SECRET + "|" + + CONFIDENTIAL + ")", PRIVATE)){ Scan s = new Scan(); s.setAuthorizations(new Authorizations("SAMPLE")); ResultScanner scanner = table.getScanner(s); Result[] next = scanner.next(3); assertTrue(next.length == 0); - } finally { - if (table != null) { - table.close(); - } } } @Test public void testVisibilityLabelsWithGet() throws Exception { TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); - Table table = createTableAndWriteDataWithLabels(tableName, SECRET + "&" + CONFIDENTIAL + "&!" - + PRIVATE, SECRET + "&" + CONFIDENTIAL + "&" + PRIVATE); - try { + try (Table table = createTableAndWriteDataWithLabels(tableName, SECRET + "&" + CONFIDENTIAL + + "&!" + PRIVATE, SECRET + "&" + CONFIDENTIAL + "&" + PRIVATE)) { Get get = new Get(row1); get.setAuthorizations(new Authorizations(SECRET, CONFIDENTIAL)); Result result = table.get(get); @@ -305,10 +274,6 @@ public abstract class TestVisibilityLabels { Cell cell = result.getColumnLatestCell(fam, qual); assertTrue(Bytes.equals(value, 0, value.length, cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())); - } finally { - if (table != null) { - table.close(); - } } } @@ -330,7 +295,7 @@ public abstract class TestVisibilityLabels { List regionServerThreads = TEST_UTIL.getHBaseCluster() .getRegionServerThreads(); for (RegionServerThread rsThread : regionServerThreads) { - List onlineRegions = rsThread.getRegionServer().getOnlineRegions( + List onlineRegions = rsThread.getRegionServer().getOnlineRegions( LABELS_TABLE_NAME); if (onlineRegions.size() > 0) { rsThread.getRegionServer().abort("Aborting "); @@ -364,7 +329,7 @@ public abstract class TestVisibilityLabels { for (RegionServerThread rsThread : regionServerThreads) { while (true) { if (!rsThread.getRegionServer().isAborted()) { - List onlineRegions = rsThread.getRegionServer().getOnlineRegions( + List onlineRegions = rsThread.getRegionServer().getOnlineRegions( LABELS_TABLE_NAME); if (onlineRegions.size() > 0) { break; @@ -378,26 +343,18 @@ public abstract class TestVisibilityLabels { } TEST_UTIL.waitTableEnabled(LABELS_TABLE_NAME.getName(), 50000); t.join(); - Table table = null; - try { - table = TEST_UTIL.getConnection().getTable(tableName); + try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { Scan s = new Scan(); s.setAuthorizations(new Authorizations(SECRET)); ResultScanner scanner = table.getScanner(s); Result[] next = scanner.next(3); assertTrue(next.length == 1); - } finally { - if (table != null) { - table.close(); - } } } @Test(timeout = 60 * 1000) public void testVisibilityLabelsOnRSRestart() throws Exception { final TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); - Table table = createTableAndWriteDataWithLabels(tableName, "(" + SECRET + "|" + CONFIDENTIAL - + ")", PRIVATE); List regionServerThreads = TEST_UTIL.getHBaseCluster() .getRegionServerThreads(); for (RegionServerThread rsThread : regionServerThreads) { @@ -406,16 +363,13 @@ public abstract class TestVisibilityLabels { // Start one new RS RegionServerThread rs = TEST_UTIL.getHBaseCluster().startRegionServer(); waitForLabelsRegionAvailability(rs.getRegionServer()); - try { + try (Table table = createTableAndWriteDataWithLabels(tableName, "(" + SECRET + "|" + CONFIDENTIAL + + ")", PRIVATE);) { Scan s = new Scan(); s.setAuthorizations(new Authorizations(SECRET)); ResultScanner scanner = table.getScanner(s); Result[] next = scanner.next(3); assertTrue(next.length == 1); - } finally { - if (table != null) { - table.close(); - } } } @@ -432,7 +386,7 @@ public abstract class TestVisibilityLabels { } catch (InterruptedException e) { } } - HRegion labelsTableRegion = regionServer.getOnlineRegions(LABELS_TABLE_NAME).get(0); + Region labelsTableRegion = regionServer.getOnlineRegions(LABELS_TABLE_NAME).get(0); while (labelsTableRegion.isRecovering()) { try { Thread.sleep(10); @@ -444,17 +398,12 @@ public abstract class TestVisibilityLabels { @Test public void testVisibilityLabelsInGetThatDoesNotMatchAnyDefinedLabels() throws Exception { TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); - Table table = createTableAndWriteDataWithLabels(tableName, "(" + SECRET + "|" + CONFIDENTIAL - + ")", PRIVATE); - try { + try (Table table = createTableAndWriteDataWithLabels(tableName, "(" + SECRET + "|" + CONFIDENTIAL + + ")", PRIVATE)) { Get get = new Get(row1); get.setAuthorizations(new Authorizations("SAMPLE")); Result result = table.get(get); assertTrue(result.isEmpty()); - } finally { - if (table != null) { - table.close(); - } } } @@ -472,9 +421,7 @@ public abstract class TestVisibilityLabels { } }; SUPERUSER.runAs(action); - Table ht = null; - try { - ht = TEST_UTIL.getConnection().getTable(LABELS_TABLE_NAME); + try (Table ht = TEST_UTIL.getConnection().getTable(LABELS_TABLE_NAME);) { Scan scan = new Scan(); scan.setAuthorizations(new Authorizations(VisibilityUtils.SYSTEM_LABEL)); ResultScanner scanner = ht.getScanner(scan); @@ -487,10 +434,6 @@ public abstract class TestVisibilityLabels { assertTrue(auths.contains(SECRET)); assertTrue(auths.contains(CONFIDENTIAL)); assertEquals(2, auths.size()); - } finally { - if (ht != null) { - ht.close(); - } } action = new PrivilegedExceptionAction() { @@ -583,11 +526,8 @@ public abstract class TestVisibilityLabels { "org.apache.hadoop.hbase.security.visibility.InvalidLabelException: " + "Label 'public' is not set for the user testUser")); assertTrue(resultList.get(2).getException().getValue().isEmpty()); - Connection connection = null; - Table ht = null; - try { - connection = ConnectionFactory.createConnection(conf); - ht = connection.getTable(LABELS_TABLE_NAME); + try (Connection connection = ConnectionFactory.createConnection(conf); + Table ht = connection.getTable(LABELS_TABLE_NAME)) { ResultScanner scanner = ht.getScanner(new Scan()); Result result = null; List results = new ArrayList(); @@ -597,13 +537,6 @@ public abstract class TestVisibilityLabels { List curAuths = extractAuths(user, results); assertTrue(curAuths.contains(PRIVATE)); assertEquals(1, curAuths.size()); - } finally { - if (ht != null) { - ht.close(); - } - if (connection != null){ - connection.close(); - } } GetAuthsResponse authsResponse = null; @@ -627,9 +560,7 @@ public abstract class TestVisibilityLabels { @Test public void testLabelsWithCheckAndPut() throws Throwable { TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); - Table table = null; - try { - table = TEST_UTIL.createTable(tableName, fam); + try (Table table = TEST_UTIL.createTable(tableName, fam)) { byte[] row1 = Bytes.toBytes("row1"); Put put = new Put(row1); put.add(fam, qual, HConstants.LATEST_TIMESTAMP, value); @@ -649,19 +580,13 @@ public abstract class TestVisibilityLabels { assertTrue(Bytes.equals(row2, result.getRow())); result = scanner.next(); assertNull(result); - } finally { - if (table != null) { - table.close(); - } } } @Test public void testLabelsWithIncrement() throws Throwable { TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); - Table table = null; - try { - table = TEST_UTIL.createTable(tableName, fam); + try (Table table = TEST_UTIL.createTable(tableName, fam)) { byte[] row1 = Bytes.toBytes("row1"); byte[] val = Bytes.toBytes(1L); Put put = new Put(row1); @@ -681,19 +606,13 @@ public abstract class TestVisibilityLabels { table.increment(increment); result = table.get(get); assertTrue(!result.isEmpty()); - } finally { - if (table != null) { - table.close(); - } } } @Test public void testLabelsWithAppend() throws Throwable { TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); - Table table = null; - try { - table = TEST_UTIL.createTable(tableName, fam); + try (Table table = TEST_UTIL.createTable(tableName, fam);) { byte[] row1 = Bytes.toBytes("row1"); byte[] val = Bytes.toBytes("a"); Put put = new Put(row1); @@ -715,10 +634,6 @@ public abstract class TestVisibilityLabels { table.append(append); result = table.get(get); assertTrue(!result.isEmpty()); - } finally { - if (table != null) { - table.close(); - } } } @@ -779,9 +694,7 @@ public abstract class TestVisibilityLabels { col.setMaxVersions(5); desc.addFamily(col); TEST_UTIL.getHBaseAdmin().createTable(desc); - Table table = null; - try { - table = TEST_UTIL.getConnection().getTable(tableName); + try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { Put put = new Put(r1); put.add(fam, qual, 3l, v1); put.add(fam, qual2, 3l, v1); @@ -854,10 +767,6 @@ public abstract class TestVisibilityLabels { assertNotNull(cell); assertTrue(Bytes.equals(v2, 0, v2.length, cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())); - } finally { - if (table != null) { - table.close(); - } } } @@ -869,8 +778,7 @@ public abstract class TestVisibilityLabels { HColumnDescriptor col = new HColumnDescriptor(fam); desc.addFamily(col); TEST_UTIL.getHBaseAdmin().createTable(desc); - Table table = TEST_UTIL.getConnection().getTable(tableName); - try { + try (Table table = TEST_UTIL.getConnection().getTable(tableName)){ Put p1 = new Put(row1); p1.add(fam, qual, value); p1.setCellVisibility(new CellVisibility(CONFIDENTIAL)); @@ -895,8 +803,6 @@ public abstract class TestVisibilityLabels { result = table.get(get); assertFalse(result.containsColumn(fam, qual)); assertTrue(result.containsColumn(fam, qual2)); - } finally { - table.close(); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java index c087f4e6522..bc34ce3e3c0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java @@ -219,12 +219,11 @@ public class TestVisibilityLabelsReplication { @Test public void testVisibilityReplication() throws Exception { - Table table = writeData(TABLE_NAME, "(" + SECRET + "&" + PUBLIC + ")" + "|(" + CONFIDENTIAL - + ")&(" + TOPSECRET + ")", "(" + PRIVATE + "|" + CONFIDENTIAL + ")&(" + PUBLIC + "|" - + TOPSECRET + ")", "(" + SECRET + "|" + CONFIDENTIAL + ")" + "&" + "!" + TOPSECRET, - CellVisibility.quote(UNICODE_VIS_TAG) + "&" + SECRET); int retry = 0; - try { + try (Table table = writeData(TABLE_NAME, "(" + SECRET + "&" + PUBLIC + ")" + "|(" + CONFIDENTIAL + + ")&(" + TOPSECRET + ")", "(" + PRIVATE + "|" + CONFIDENTIAL + ")&(" + PUBLIC + "|" + + TOPSECRET + ")", "(" + SECRET + "|" + CONFIDENTIAL + ")" + "&" + "!" + TOPSECRET, + CellVisibility.quote(UNICODE_VIS_TAG) + "&" + SECRET);) { Scan s = new Scan(); s.setAuthorizations(new Authorizations(SECRET, CONFIDENTIAL, PRIVATE, TOPSECRET, UNICODE_VIS_TAG)); @@ -252,9 +251,7 @@ public class TestVisibilityLabelsReplication { current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row4, 0, row4.length)); - Table table2 = null; - try { - table2 = TEST_UTIL1.getConnection().getTable(TABLE_NAME); + try (Table table2 = TEST_UTIL1.getConnection().getTable(TABLE_NAME);) { s = new Scan(); // Ensure both rows are replicated scanner = table2.getScanner(s); @@ -273,14 +270,6 @@ public class TestVisibilityLabelsReplication { verifyGet(row3, expectedVisString[2], expected[2], false, PRIVATE, SECRET); verifyGet(row3, "", expected[3], true, TOPSECRET, SECRET); verifyGet(row4, expectedVisString[3], expected[4], false, UNICODE_VIS_TAG, SECRET); - } finally { - if (table2 != null) { - table2.close(); - } - } - } finally { - if (table != null) { - table.close(); } } } @@ -314,13 +303,9 @@ public class TestVisibilityLabelsReplication { final boolean nullExpected, final String... auths) throws IOException, InterruptedException { PrivilegedExceptionAction scanAction = new PrivilegedExceptionAction() { - Table table2 = null; - Connection connection = null; - public Void run() throws Exception { - try { - connection = ConnectionFactory.createConnection(conf1); - table2 = connection.getTable(TABLE_NAME); + try (Connection connection = ConnectionFactory.createConnection(conf1); + Table table2 = connection.getTable(TABLE_NAME)) { CellScanner cellScanner; Cell current; Get get = new Get(row); @@ -354,13 +339,6 @@ public class TestVisibilityLabelsReplication { doAssert(row, visString); assertTrue(foundNonVisTag); return null; - } finally { - if (table2 != null) { - table2.close(); - } - if(connection != null) { - connection.close(); - } } } }; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithACL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithACL.java index 7e7d8a388d1..3175fccb525 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithACL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithACL.java @@ -34,7 +34,6 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; @@ -118,7 +117,7 @@ public class TestVisibilityLabelsWithACL { String user = "user2"; VisibilityClient.setAuths(conf, auths, user); TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); - final HTable table = createTableAndWriteDataWithLabels(tableName, SECRET + "&" + CONFIDENTIAL + final Table table = createTableAndWriteDataWithLabels(tableName, SECRET + "&" + CONFIDENTIAL + "&!" + PRIVATE, SECRET + "&!" + PRIVATE); SecureTestUtil.grantOnTable(TEST_UTIL, NORMAL_USER2.getShortName(), tableName, null, null, Permission.Action.READ); @@ -126,18 +125,14 @@ public class TestVisibilityLabelsWithACL { public Void run() throws Exception { Scan s = new Scan(); s.setAuthorizations(new Authorizations(SECRET, CONFIDENTIAL)); - Connection connection = ConnectionFactory.createConnection(conf); - Table t = connection.getTable(table.getName()); - try { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table t = connection.getTable(table.getName())) { ResultScanner scanner = t.getScanner(s); Result result = scanner.next(); assertTrue(!result.isEmpty()); assertTrue(Bytes.equals(Bytes.toBytes("row2"), result.getRow())); result = scanner.next(); assertNull(result); - } finally { - t.close(); - connection.close(); } return null; } @@ -151,19 +146,17 @@ public class TestVisibilityLabelsWithACL { String user = "admin"; VisibilityClient.setAuths(conf, auths, user); TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); - final HTable table = createTableAndWriteDataWithLabels(tableName, SECRET + "&" + CONFIDENTIAL + final Table table = createTableAndWriteDataWithLabels(tableName, SECRET + "&" + CONFIDENTIAL + "&!" + PRIVATE, SECRET + "&!" + PRIVATE); PrivilegedExceptionAction scanAction = new PrivilegedExceptionAction() { public Void run() throws Exception { Scan s = new Scan(); s.setAuthorizations(new Authorizations(SECRET, CONFIDENTIAL)); - Table t = TEST_UTIL.getConnection().getTable(table.getName()); - try { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table t = connection.getTable(table.getName())) { ResultScanner scanner = t.getScanner(s); Result[] result = scanner.next(5); assertTrue(result.length == 2); - } finally { - t.close(); } return null; } @@ -177,18 +170,16 @@ public class TestVisibilityLabelsWithACL { String user = "admin"; VisibilityClient.setAuths(conf, auths, user); TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); - final HTable table = createTableAndWriteDataWithLabels(tableName, SECRET + "&" + CONFIDENTIAL + final Table table = createTableAndWriteDataWithLabels(tableName, SECRET + "&" + CONFIDENTIAL + "&!" + PRIVATE, SECRET + "&!" + PRIVATE); PrivilegedExceptionAction scanAction = new PrivilegedExceptionAction() { public Void run() throws Exception { Get g = new Get(row1); g.setAuthorizations(new Authorizations(SECRET, CONFIDENTIAL)); - Table t = TEST_UTIL.getConnection().getTable(table.getName()); - try { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table t = connection.getTable(table.getName())) { Result result = t.get(g); assertTrue(!result.isEmpty()); - } finally { - t.close(); } return null; } @@ -203,7 +194,7 @@ public class TestVisibilityLabelsWithACL { VisibilityClient.clearAuths(conf, auths, user); // Removing all auths if any. VisibilityClient.setAuths(conf, auths, "user1"); TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); - final HTable table = createTableAndWriteDataWithLabels(tableName, SECRET); + final Table table = createTableAndWriteDataWithLabels(tableName, SECRET); SecureTestUtil.grantOnTable(TEST_UTIL, NORMAL_USER1.getShortName(), tableName, null, null, Permission.Action.READ); SecureTestUtil.grantOnTable(TEST_UTIL, NORMAL_USER2.getShortName(), tableName, @@ -212,14 +203,10 @@ public class TestVisibilityLabelsWithACL { public Void run() throws Exception { Get g = new Get(row1); g.setAuthorizations(new Authorizations(SECRET, CONFIDENTIAL)); - Connection connection = ConnectionFactory.createConnection(conf); - Table t = connection.getTable(table.getName()); - try { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table t = connection.getTable(table.getName())) { Result result = t.get(g); assertTrue(result.isEmpty()); - } finally { - t.close(); - connection.close(); } return null; } @@ -315,9 +302,9 @@ public class TestVisibilityLabelsWithACL { assertTrue(authsList.contains(PRIVATE)); } - private static HTable createTableAndWriteDataWithLabels(TableName tableName, String... labelExps) + private static Table createTableAndWriteDataWithLabels(TableName tableName, String... labelExps) throws Exception { - HTable table = null; + Table table = null; try { table = TEST_UTIL.createTable(tableName, fam); int i = 1; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDefaultVisLabelService.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDefaultVisLabelService.java index 52f86c3911c..3297002ff7e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDefaultVisLabelService.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDefaultVisLabelService.java @@ -32,7 +32,6 @@ import java.util.concurrent.atomic.AtomicBoolean; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; @@ -150,10 +149,10 @@ public class TestVisibilityLabelsWithDefaultVisLabelService extends TestVisibili // Scan the visibility label Scan s = new Scan(); s.setAuthorizations(new Authorizations(VisibilityUtils.SYSTEM_LABEL)); - Table ht = TEST_UTIL.getConnection().getTable(LABELS_TABLE_NAME); + int i = 0; - try { - ResultScanner scanner = ht.getScanner(s); + try (Table ht = TEST_UTIL.getConnection().getTable(LABELS_TABLE_NAME); + ResultScanner scanner = ht.getScanner(s)) { while (true) { Result next = scanner.next(); if (next == null) { @@ -161,10 +160,6 @@ public class TestVisibilityLabelsWithDefaultVisLabelService extends TestVisibili } i++; } - } finally { - if (ht != null) { - ht.close(); - } } // One label is the "system" label. Assert.assertEquals("The count should be 13", 13, i); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDeletes.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDeletes.java index b2d0ae5f740..033299b9d62 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDeletes.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDeletes.java @@ -26,6 +26,8 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; @@ -112,13 +114,14 @@ public class TestVisibilityLabelsWithDeletes { public void testVisibilityLabelsWithDeleteColumns() throws Throwable { setAuths(); final TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); - final Table table = createTableAndWriteDataWithLabels(tableName, SECRET + "&" + TOPSECRET, - SECRET); - try { + + try (Table table = createTableAndWriteDataWithLabels(tableName, + SECRET + "&" + TOPSECRET, SECRET)) { PrivilegedExceptionAction actiona = new PrivilegedExceptionAction() { @Override public Void run() throws Exception { - try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility(TOPSECRET + "&" + SECRET)); d.addColumns(fam, qual); @@ -143,10 +146,6 @@ public class TestVisibilityLabelsWithDeletes { assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row2, 0, row2.length)); - } finally { - if (table != null) { - table.close(); - } } } @@ -154,13 +153,13 @@ public class TestVisibilityLabelsWithDeletes { public void testVisibilityLabelsWithDeleteFamily() throws Exception { setAuths(); final TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); - final Table table = createTableAndWriteDataWithLabels(tableName, SECRET, CONFIDENTIAL + "|" - + TOPSECRET); - try { + try (Table table = createTableAndWriteDataWithLabels(tableName, SECRET, + CONFIDENTIAL + "|" + TOPSECRET);) { PrivilegedExceptionAction actiona = new PrivilegedExceptionAction() { @Override public Void run() throws Exception { - try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { Delete d = new Delete(row2); d.setCellVisibility(new CellVisibility(TOPSECRET + "|" + CONFIDENTIAL)); d.addFamily(fam); @@ -184,10 +183,6 @@ public class TestVisibilityLabelsWithDeletes { Cell current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); - } finally { - if (table != null) { - table.close(); - } } } @@ -196,13 +191,13 @@ public class TestVisibilityLabelsWithDeletes { setAuths(); final TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); long[] ts = new long[] { 123l, 125l }; - final Table table = createTableAndWriteDataWithLabels(tableName, ts, CONFIDENTIAL + "|" - + TOPSECRET, SECRET); - try { + try (Table table = createTableAndWriteDataWithLabels(tableName, ts, + CONFIDENTIAL + "|" + TOPSECRET, SECRET)) { PrivilegedExceptionAction actiona = new PrivilegedExceptionAction() { @Override public Void run() throws Exception { - try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility(TOPSECRET + "|" + CONFIDENTIAL)); d.deleteFamilyVersion(fam, 123l); @@ -226,10 +221,6 @@ public class TestVisibilityLabelsWithDeletes { Cell current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row2, 0, row2.length)); - } finally { - if (table != null) { - table.close(); - } } } @@ -238,13 +229,13 @@ public class TestVisibilityLabelsWithDeletes { setAuths(); final TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); long[] ts = new long[] { 123l, 125l }; - final Table table = createTableAndWriteDataWithLabels(tableName, ts, CONFIDENTIAL + "|" - + TOPSECRET, SECRET); - try { + try (Table table = createTableAndWriteDataWithLabels(tableName, ts, + CONFIDENTIAL + "|" + TOPSECRET, SECRET);) { PrivilegedExceptionAction actiona = new PrivilegedExceptionAction() { @Override public Void run() throws Exception { - try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility(TOPSECRET + "|" + CONFIDENTIAL)); d.addColumn(fam, qual, 123l); @@ -268,10 +259,6 @@ public class TestVisibilityLabelsWithDeletes { Cell current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row2, 0, row2.length)); - } finally { - if (table != null) { - table.close(); - } } } @@ -284,7 +271,8 @@ public class TestVisibilityLabelsWithDeletes { PrivilegedExceptionAction actiona = new PrivilegedExceptionAction() { @Override public Void run() throws Exception { - try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility("(" + PRIVATE + "&" + CONFIDENTIAL + ")|(" + SECRET + "&" + TOPSECRET+")")); @@ -339,7 +327,8 @@ public class TestVisibilityLabelsWithDeletes { PrivilegedExceptionAction actiona = new PrivilegedExceptionAction() { @Override public Void run() throws Exception { - try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { Delete d1 = new Delete(row1); d1.setCellVisibility(new CellVisibility(CONFIDENTIAL)); d1.addColumns(fam, qual); @@ -388,7 +377,8 @@ public class TestVisibilityLabelsWithDeletes { PrivilegedExceptionAction actiona = new PrivilegedExceptionAction() { @Override public Void run() throws Exception { - try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility(CONFIDENTIAL)); d.addColumns(fam, qual); @@ -440,7 +430,8 @@ public class TestVisibilityLabelsWithDeletes { PrivilegedExceptionAction actiona = new PrivilegedExceptionAction() { @Override public Void run() throws Exception { - try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { Delete d1 = new Delete(row1); d1.setCellVisibility(new CellVisibility(CONFIDENTIAL)); d1.addFamily(fam); @@ -499,7 +490,8 @@ public class TestVisibilityLabelsWithDeletes { PrivilegedExceptionAction actiona = new PrivilegedExceptionAction() { @Override public Void run() throws Exception { - try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility(CONFIDENTIAL)); d.addFamily(fam); @@ -524,7 +516,8 @@ public class TestVisibilityLabelsWithDeletes { actiona = new PrivilegedExceptionAction() { @Override public Void run() throws Exception { - try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility(SECRET)); d.addFamily(fam); @@ -573,7 +566,8 @@ public class TestVisibilityLabelsWithDeletes { PrivilegedExceptionAction actiona = new PrivilegedExceptionAction() { @Override public Void run() throws Exception { - try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility(CONFIDENTIAL)); d.addColumns(fam, qual); @@ -598,7 +592,8 @@ public class TestVisibilityLabelsWithDeletes { actiona = new PrivilegedExceptionAction() { @Override public Void run() throws Exception { - try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility(SECRET)); d.addColumns(fam, qual); @@ -646,7 +641,8 @@ public class TestVisibilityLabelsWithDeletes { PrivilegedExceptionAction actiona = new PrivilegedExceptionAction() { @Override public Void run() throws Exception { - try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility(SECRET)); d.addColumns(fam, qual, 126l); @@ -655,7 +651,8 @@ public class TestVisibilityLabelsWithDeletes { throw new IOException(t); } - try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility(CONFIDENTIAL)); d.addColumn(fam, qual, 123l); @@ -707,14 +704,16 @@ public class TestVisibilityLabelsWithDeletes { PrivilegedExceptionAction actiona = new PrivilegedExceptionAction() { @Override public Void run() throws Exception { - try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility(CONFIDENTIAL)); d.addColumn(fam, qual, 123l); table.delete(d); } - try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility(SECRET)); d.addColumn(fam, qual, 123l); @@ -759,7 +758,8 @@ public class TestVisibilityLabelsWithDeletes { + SECRET + "&" + TOPSECRET + ")")); d3.addFamily(fam); - try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { table.delete(createList(d1, d2, d3)); } catch (Throwable t) { throw new IOException(t); @@ -803,7 +803,8 @@ public class TestVisibilityLabelsWithDeletes { Delete d2 = new Delete(row1); d2.setCellVisibility(new CellVisibility(SECRET + "&" + TOPSECRET)); d2.addColumns(fam, qual); - try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { table.delete(createList(d1, d2)); } catch (Throwable t) { throw new IOException(t); @@ -992,7 +993,8 @@ public class TestVisibilityLabelsWithDeletes { PrivilegedExceptionAction actiona = new PrivilegedExceptionAction() { @Override public Void run() throws Exception { - try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility("(" + PRIVATE + "&" + CONFIDENTIAL + ")|(" + SECRET + "&" + TOPSECRET+")")); @@ -1056,7 +1058,8 @@ public class TestVisibilityLabelsWithDeletes { PrivilegedExceptionAction actiona = new PrivilegedExceptionAction() { @Override public Void run() throws Exception { - try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility(SECRET + "&" + TOPSECRET)); d.addColumn(fam, qual); @@ -1109,9 +1112,7 @@ public class TestVisibilityLabelsWithDeletes { public void testDeleteColumnWithLatestTimeStampWhenNoVersionMatches() throws Exception { setAuths(); final TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); - Table table = null; - try { - table = doPuts(tableName); + try (Table table = doPuts(tableName)) { TEST_UTIL.getHBaseAdmin().flush(tableName); Put put = new Put(Bytes.toBytes("row1")); put.add(fam, qual, 128l, value); @@ -1120,7 +1121,8 @@ public class TestVisibilityLabelsWithDeletes { PrivilegedExceptionAction actiona = new PrivilegedExceptionAction() { @Override public Void run() throws Exception { - try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility(SECRET )); d.addColumn(fam, qual); @@ -1190,10 +1192,6 @@ public class TestVisibilityLabelsWithDeletes { assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row1, 0, row1.length)); assertEquals(current.getTimestamp(), 129l); - } finally { - if (table != null) { - table.close(); - } } } @Test @@ -1201,14 +1199,13 @@ public class TestVisibilityLabelsWithDeletes { throws Exception { setAuths(); final TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); - Table table = null; - try { - table = doPuts(tableName); + try (Table table = doPuts(tableName)) { TEST_UTIL.getHBaseAdmin().flush(tableName); PrivilegedExceptionAction actiona = new PrivilegedExceptionAction() { @Override public Void run() throws Exception { - try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility(SECRET + "&" + TOPSECRET)); d.addColumn(fam, qual); @@ -1261,10 +1258,6 @@ public class TestVisibilityLabelsWithDeletes { current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row2, 0, row2.length)); - } finally { - if (table != null) { - table.close(); - } } } @@ -1272,14 +1265,13 @@ public class TestVisibilityLabelsWithDeletes { public void testDeleteFamilyLatestTimeStampWithMulipleVersions() throws Exception { setAuths(); final TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); - Table table = null; - try { - table = doPuts(tableName); + try (Table table = doPuts(tableName)) { TEST_UTIL.getHBaseAdmin().flush(tableName); PrivilegedExceptionAction actiona = new PrivilegedExceptionAction() { @Override public Void run() throws Exception { - try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility(SECRET + "&" + TOPSECRET)); d.addFamily(fam); @@ -1315,10 +1307,6 @@ public class TestVisibilityLabelsWithDeletes { current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row2, 0, row2.length)); - } finally { - if (table != null) { - table.close(); - } } } @@ -1326,9 +1314,7 @@ public class TestVisibilityLabelsWithDeletes { public void testDeleteColumnswithMultipleColumnsWithMultipleVersions() throws Exception { setAuths(); final TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); - Table table = null; - try { - table = doPutsWithDiffCols(tableName); + try (Table table = doPutsWithDiffCols(tableName)) { TEST_UTIL.getHBaseAdmin().flush(tableName); PrivilegedExceptionAction actiona = new PrivilegedExceptionAction() { @Override @@ -1336,7 +1322,8 @@ public class TestVisibilityLabelsWithDeletes { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility(SECRET + "&" + TOPSECRET)); d.addColumns(fam, qual, 125l); - try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { table.delete(d); } catch (Throwable t) { throw new IOException(t); @@ -1378,10 +1365,6 @@ public class TestVisibilityLabelsWithDeletes { assertEquals(current.getTimestamp(), 127l); assertTrue(Bytes.equals(current.getQualifierArray(), current.getQualifierOffset(), current.getQualifierLength(), qual2, 0, qual2.length)); - } finally { - if (table != null) { - table.close(); - } } } @@ -1415,7 +1398,8 @@ public class TestVisibilityLabelsWithDeletes { d2.setCellVisibility(new CellVisibility(CONFIDENTIAL)); d2.addColumns(fam, qual1, 125l); - try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { table.delete(createList(d1, d2)); } catch (Throwable t) { throw new IOException(t); @@ -1462,7 +1446,8 @@ public class TestVisibilityLabelsWithDeletes { d2.setCellVisibility(new CellVisibility(CONFIDENTIAL)); d2.addColumns(fam, qual1, 126l); - try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { table.delete(createList(d1, d2)); } catch (Throwable t) { throw new IOException(t); @@ -1483,14 +1468,13 @@ public class TestVisibilityLabelsWithDeletes { public void testDeleteFamilyWithoutCellVisibilityWithMulipleVersions() throws Exception { setAuths(); final TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); - Table table = null; - try { - table = doPutsWithoutVisibility(tableName); + try (Table table = doPutsWithoutVisibility(tableName)) { TEST_UTIL.getHBaseAdmin().flush(tableName); PrivilegedExceptionAction actiona = new PrivilegedExceptionAction() { @Override public Void run() throws Exception { - try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.addFamily(fam); table.delete(d); @@ -1515,10 +1499,6 @@ public class TestVisibilityLabelsWithDeletes { Cell current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row2, 0, row2.length)); - } finally { - if (table != null) { - table.close(); - } } } @@ -1527,13 +1507,12 @@ public class TestVisibilityLabelsWithDeletes { throws Exception { setAuths(); final TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); - Table table = null; - try { - table = doPutsWithoutVisibility(tableName); + try (Table table = doPutsWithoutVisibility(tableName)) { PrivilegedExceptionAction actiona = new PrivilegedExceptionAction() { @Override public Void run() throws Exception { - try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility(SECRET + "&" + TOPSECRET)); d.addFamily(fam); @@ -1583,10 +1562,6 @@ public class TestVisibilityLabelsWithDeletes { current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row2, 0, row2.length)); - } finally { - if (table != null) { - table.close(); - } } } @@ -1594,14 +1569,13 @@ public class TestVisibilityLabelsWithDeletes { public void testDeleteFamilySpecificTimeStampWithMulipleVersions() throws Exception { setAuths(); final TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); - Table table = null; - try { - table = doPuts(tableName); + try (Table table = doPuts(tableName)) { TEST_UTIL.getHBaseAdmin().flush(tableName); PrivilegedExceptionAction actiona = new PrivilegedExceptionAction() { @Override public Void run() throws Exception { - try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility("(" + PRIVATE + "&" + CONFIDENTIAL + ")|(" + SECRET + "&" + TOPSECRET + ")")); @@ -1643,10 +1617,6 @@ public class TestVisibilityLabelsWithDeletes { current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row2, 0, row2.length)); - } finally { - if (table != null) { - table.close(); - } } } @@ -1654,14 +1624,13 @@ public class TestVisibilityLabelsWithDeletes { public void testScanAfterCompaction() throws Exception { setAuths(); final TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); - Table table = null; - try { - table = doPuts(tableName); + try (Table table = doPuts(tableName)) { TEST_UTIL.getHBaseAdmin().flush(tableName); PrivilegedExceptionAction actiona = new PrivilegedExceptionAction() { @Override public Void run() throws Exception { - try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility("(" + PRIVATE + "&" + CONFIDENTIAL + ")|(" + SECRET + "&" + TOPSECRET+")")); @@ -1701,10 +1670,6 @@ public class TestVisibilityLabelsWithDeletes { current = cellScanner.current(); assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row2, 0, row2.length)); - } finally { - if (table != null) { - table.close(); - } } } @@ -1712,14 +1677,13 @@ public class TestVisibilityLabelsWithDeletes { public void testDeleteFamilySpecificTimeStampWithMulipleVersionsDoneTwice() throws Exception { setAuths(); final TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); - Table table = null; - try { - // Do not flush here. - table = doPuts(tableName); + // Do not flush here. + try (Table table = doPuts(tableName)) { PrivilegedExceptionAction actiona = new PrivilegedExceptionAction() { @Override public Void run() throws Exception { - try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility("(" + PRIVATE + "&" + CONFIDENTIAL + ")|(" + TOPSECRET + "&" + SECRET+")")); @@ -1770,7 +1734,8 @@ public class TestVisibilityLabelsWithDeletes { actiona = new PrivilegedExceptionAction() { @Override public Void run() throws Exception { - try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility("(" + CONFIDENTIAL + "&" + PRIVATE + ")|(" + TOPSECRET + "&" + SECRET+")")); @@ -1806,10 +1771,6 @@ public class TestVisibilityLabelsWithDeletes { assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row2, 0, row2.length)); assertEquals(current.getTimestamp(), 127l); - } finally { - if (table != null) { - table.close(); - } } } @@ -1833,7 +1794,8 @@ public class TestVisibilityLabelsWithDeletes { PrivilegedExceptionAction actiona = new PrivilegedExceptionAction() { @Override public Void run() throws Exception { - try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility(CONFIDENTIAL)); d.deleteFamilyVersion(fam, 123l); @@ -1884,7 +1846,8 @@ public class TestVisibilityLabelsWithDeletes { PrivilegedExceptionAction actiona = new PrivilegedExceptionAction() { @Override public Void run() throws Exception { - try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility("(" + CONFIDENTIAL + "&" + PRIVATE + ")|(" + TOPSECRET + "&" + SECRET + ")")); @@ -1929,7 +1892,8 @@ public class TestVisibilityLabelsWithDeletes { actiona = new PrivilegedExceptionAction() { @Override public Void run() throws Exception { - try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility(CONFIDENTIAL)); d.addFamily(fam); @@ -1981,7 +1945,8 @@ public class TestVisibilityLabelsWithDeletes { PrivilegedExceptionAction actiona = new PrivilegedExceptionAction() { @Override public Void run() throws Exception { - try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility("(" + CONFIDENTIAL + "&" + PRIVATE + ")|(" + TOPSECRET + "&" + SECRET + ")")); @@ -2027,7 +1992,8 @@ public class TestVisibilityLabelsWithDeletes { actiona = new PrivilegedExceptionAction() { @Override public Void run() throws Exception { - try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility(CONFIDENTIAL)); d.addFamily(fam); @@ -2067,7 +2033,8 @@ public class TestVisibilityLabelsWithDeletes { PrivilegedExceptionAction actiona = new PrivilegedExceptionAction() { @Override public Void run() throws Exception { - try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility(SECRET + "&" + TOPSECRET)); d.addColumn(fam, qual, 125l); @@ -2117,7 +2084,8 @@ public class TestVisibilityLabelsWithDeletes { actiona = new PrivilegedExceptionAction() { @Override public Void run() throws Exception { - try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility("(" + CONFIDENTIAL + "&" + PRIVATE + ")|(" + TOPSECRET + "&" + SECRET+")")); @@ -2165,14 +2133,13 @@ public class TestVisibilityLabelsWithDeletes { public void testDeleteColumnSpecificTimeStampWithMulipleVersionsDoneTwice1() throws Exception { setAuths(); final TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); - Table table = null; - try { - // Do not flush here. - table = doPuts(tableName); + // Do not flush here. + try (Table table = doPuts(tableName)) { PrivilegedExceptionAction actiona = new PrivilegedExceptionAction() { @Override public Void run() throws Exception { - try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility("(" + CONFIDENTIAL + "&" + PRIVATE + ")" + "|(" + TOPSECRET + "&" + SECRET + ")")); @@ -2223,7 +2190,8 @@ public class TestVisibilityLabelsWithDeletes { actiona = new PrivilegedExceptionAction() { @Override public Void run() throws Exception { - try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility(SECRET + "&" + TOPSECRET)); d.addColumn(fam, qual, 127l); @@ -2268,24 +2236,20 @@ public class TestVisibilityLabelsWithDeletes { assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row2, 0, row2.length)); assertEquals(current.getTimestamp(), 127l); - } finally { - if (table != null) { - table.close(); - } } } @Test public void testDeleteColumnSpecificTimeStampWithMulipleVersionsDoneTwice2() throws Exception { setAuths(); final TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); - Table table = null; - try { - // Do not flush here. - table = doPuts(tableName); + + // Do not flush here. + try (Table table = doPuts(tableName)) { PrivilegedExceptionAction actiona = new PrivilegedExceptionAction() { @Override public Void run() throws Exception { - try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility("(" + PRIVATE + "&" + CONFIDENTIAL + ")|(" + TOPSECRET + "&" + SECRET+")")); @@ -2341,7 +2305,8 @@ public class TestVisibilityLabelsWithDeletes { actiona = new PrivilegedExceptionAction() { @Override public Void run() throws Exception { - try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility("(" + CONFIDENTIAL + "&" + PRIVATE + ")|(" + TOPSECRET + "&" + SECRET+")")); @@ -2387,10 +2352,6 @@ public class TestVisibilityLabelsWithDeletes { assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row2, 0, row2.length)); assertEquals(current.getTimestamp(), 127l); - } finally { - if (table != null) { - table.close(); - } } } @Test @@ -2398,14 +2359,13 @@ public class TestVisibilityLabelsWithDeletes { throws Exception { setAuths(); final TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); - Table table = null; - try { - // Do not flush here. - table = doPuts(tableName); + // Do not flush here. + try (Table table = doPuts(tableName)) { PrivilegedExceptionAction actiona = new PrivilegedExceptionAction() { @Override public Void run() throws Exception { - try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility(SECRET + "&" + TOPSECRET)); d.addColumn(fam, qual, 125l); @@ -2455,7 +2415,8 @@ public class TestVisibilityLabelsWithDeletes { actiona = new PrivilegedExceptionAction() { @Override public Void run() throws Exception { - try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility("(" + CONFIDENTIAL + "&" + PRIVATE + ")|(" + TOPSECRET + "&" + SECRET+")")); @@ -2491,10 +2452,6 @@ public class TestVisibilityLabelsWithDeletes { assertTrue(Bytes.equals(current.getRowArray(), current.getRowOffset(), current.getRowLength(), row2, 0, row2.length)); assertEquals(current.getTimestamp(), 127l); - } finally { - if (table != null) { - table.close(); - } } } @@ -2523,7 +2480,8 @@ public class TestVisibilityLabelsWithDeletes { PrivilegedExceptionAction actiona = new PrivilegedExceptionAction() { @Override public Void run() throws Exception { - try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility("(" + PRIVATE + "&" + CONFIDENTIAL + ")|(" + TOPSECRET + "&" + SECRET+")")); @@ -2574,7 +2532,8 @@ public class TestVisibilityLabelsWithDeletes { actiona = new PrivilegedExceptionAction() { @Override public Void run() throws Exception { - try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.setCellVisibility(new CellVisibility("(" + CONFIDENTIAL + "&" + PRIVATE + ")|(" + TOPSECRET + "&" + SECRET+")")); @@ -2626,7 +2585,8 @@ public class TestVisibilityLabelsWithDeletes { PrivilegedExceptionAction actiona = new PrivilegedExceptionAction() { @Override public Void run() throws Exception { - try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.addColumn(fam, qual, 125l); table.delete(d); @@ -2649,7 +2609,8 @@ public class TestVisibilityLabelsWithDeletes { actiona = new PrivilegedExceptionAction() { @Override public Void run() throws Exception { - try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.addColumns(fam, qual, 125l); table.delete(d); @@ -2673,7 +2634,8 @@ public class TestVisibilityLabelsWithDeletes { actiona = new PrivilegedExceptionAction() { @Override public Void run() throws Exception { - try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.addFamily(fam, 125l); table.delete(d); @@ -2697,7 +2659,8 @@ public class TestVisibilityLabelsWithDeletes { actiona = new PrivilegedExceptionAction() { @Override public Void run() throws Exception { - try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.addFamily(fam); table.delete(d); @@ -2721,7 +2684,8 @@ public class TestVisibilityLabelsWithDeletes { actiona = new PrivilegedExceptionAction() { @Override public Void run() throws Exception { - try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.addColumns(fam, qual); table.delete(d); @@ -2745,7 +2709,8 @@ public class TestVisibilityLabelsWithDeletes { actiona = new PrivilegedExceptionAction() { @Override public Void run() throws Exception { - try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.deleteFamilyVersion(fam, 126l); table.delete(d); @@ -2825,7 +2790,8 @@ public class TestVisibilityLabelsWithDeletes { PrivilegedExceptionAction actiona = new PrivilegedExceptionAction() { @Override public Void run() throws Exception { - try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { Delete d = new Delete(row1); d.addColumn(fam, qual, 124l); d.setCellVisibility(new CellVisibility(PRIVATE )); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithSLGStack.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithSLGStack.java index 371d25a58b1..5abfecc0d65 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithSLGStack.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithSLGStack.java @@ -82,9 +82,7 @@ public class TestVisibilityLabelsWithSLGStack { @Test public void testWithSAGStack() throws Exception { TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); - Table table = null; - try { - table = TEST_UTIL.createTable(tableName, CF); + try (Table table = TEST_UTIL.createTable(tableName, CF)) { Put put = new Put(ROW_1); put.add(CF, Q1, HConstants.LATEST_TIMESTAMP, value); put.setCellVisibility(new CellVisibility(SECRET)); @@ -101,10 +99,6 @@ public class TestVisibilityLabelsWithSLGStack { Result next = scanner.next(); assertNotNull(next.getColumnLatestCell(CF, Q1)); assertNull(next.getColumnLatestCell(CF, Q2)); - } finally { - if (table != null) { - table.close(); - } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLablesWithGroups.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLablesWithGroups.java index 9e122c9abb3..f0881fdc9a2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLablesWithGroups.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLablesWithGroups.java @@ -112,12 +112,13 @@ public class TestVisibilityLablesWithGroups { @Test public void testGroupAuths() throws Exception { final TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); - - // create the table and put data. + // create the table + TEST_UTIL.createTable(tableName, CF); + // put the data. SUPERUSER.runAs(new PrivilegedExceptionAction() { public Void run() throws Exception { - Table table = TEST_UTIL.createTable(tableName, CF); - try { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { Put put = new Put(ROW_1); put.add(CF, Q1, HConstants.LATEST_TIMESTAMP, value1); put.setCellVisibility(new CellVisibility(SECRET)); @@ -129,8 +130,6 @@ public class TestVisibilityLablesWithGroups { put = new Put(ROW_1); put.add(CF, Q3, HConstants.LATEST_TIMESTAMP, value3); table.put(put); - } finally { - table.close(); } return null; } @@ -139,9 +138,8 @@ public class TestVisibilityLablesWithGroups { // 'admin' user is part of 'supergroup', thus can see all the cells. SUPERUSER.runAs(new PrivilegedExceptionAction() { public Void run() throws Exception { - Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName); - try { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { Scan s = new Scan(); ResultScanner scanner = table.getScanner(s); Result[] next = scanner.next(1); @@ -167,10 +165,6 @@ public class TestVisibilityLablesWithGroups { current.getRowLength(), ROW_1, 0, ROW_1.length)); assertTrue(Bytes.equals(current.getQualifier(), Q3)); assertTrue(Bytes.equals(current.getValue(), value3)); - - } finally { - table.close(); - connection.close(); } return null; } @@ -198,9 +192,8 @@ public class TestVisibilityLablesWithGroups { // Test that test user can see what 'testgroup' has been authorized to. TESTUSER.runAs(new PrivilegedExceptionAction() { public Void run() throws Exception { - Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName); - try { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { // Test scan with no auth attribute Scan s = new Scan(); ResultScanner scanner = table.getScanner(s); @@ -265,9 +258,6 @@ public class TestVisibilityLablesWithGroups { assertTrue(Bytes.equals(current2.getValue(), value3)); assertFalse(cellScanner2.advance()); - } finally { - table.close(); - connection.close(); } return null; } @@ -307,9 +297,8 @@ public class TestVisibilityLablesWithGroups { // Test that test user cannot see the cells with the labels anymore. TESTUSER.runAs(new PrivilegedExceptionAction() { public Void run() throws Exception { - Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName); - try { + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { Scan s1 = new Scan(); // test user is not entitled to 'CONFIDENTIAL' anymore since we dropped // testgroup's label. test user has no auth labels now. @@ -329,9 +318,6 @@ public class TestVisibilityLablesWithGroups { assertTrue(Bytes.equals(current1.getValue(), value3)); assertFalse(cellScanner1.advance()); - } finally { - table.close(); - connection.close(); } return null; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityWithCheckAuths.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityWithCheckAuths.java index 457d2eb3e20..67d9c63ef0f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityWithCheckAuths.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityWithCheckAuths.java @@ -33,7 +33,6 @@ import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; -import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsResponse; @@ -129,15 +128,13 @@ public class TestVisibilityWithCheckAuths { HTableDescriptor desc = new HTableDescriptor(tableName); desc.addFamily(colDesc); hBaseAdmin.createTable(desc); - Table table = null; try { TEST_UTIL.getHBaseAdmin().flush(tableName); PrivilegedExceptionAction actiona = new PrivilegedExceptionAction() { @Override public Void run() throws Exception { - Table table = null; - try { - table = TEST_UTIL.getConnection().getTable(tableName); + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { Put p = new Put(row1); p.setCellVisibility(new CellVisibility(PUBLIC + "&" + TOPSECRET)); p.add(fam, qual, 125l, value); @@ -145,8 +142,6 @@ public class TestVisibilityWithCheckAuths { Assert.fail("Testcase should fail with AccesDeniedException"); } catch (Throwable t) { assertTrue(t.getMessage().contains("AccessDeniedException")); - } finally { - table.close(); } return null; } @@ -173,25 +168,18 @@ public class TestVisibilityWithCheckAuths { }; SUPERUSER.runAs(action); final TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); - Table table = null; - try { - table = TEST_UTIL.createTable(tableName, fam); + try (Table table = TEST_UTIL.createTable(tableName, fam)) { final byte[] row1 = Bytes.toBytes("row1"); final byte[] val = Bytes.toBytes("a"); PrivilegedExceptionAction actiona = new PrivilegedExceptionAction() { @Override public Void run() throws Exception { - Connection connection = ConnectionFactory.createConnection(conf); - Table table = null; - try { - table = connection.getTable(tableName); + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { Put put = new Put(row1); put.add(fam, qual, HConstants.LATEST_TIMESTAMP, val); put.setCellVisibility(new CellVisibility(TOPSECRET)); table.put(put); - } finally { - table.close(); - connection.close(); } return null; } @@ -200,16 +188,11 @@ public class TestVisibilityWithCheckAuths { actiona = new PrivilegedExceptionAction() { @Override public Void run() throws Exception { - Connection connection = ConnectionFactory.createConnection(conf); - Table table = null; - try { - table = TEST_UTIL.getConnection().getTable(tableName); + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { Append append = new Append(row1); append.add(fam, qual, Bytes.toBytes("b")); table.append(append); - } finally { - table.close(); - connection.close(); } return null; } @@ -218,11 +201,8 @@ public class TestVisibilityWithCheckAuths { actiona = new PrivilegedExceptionAction() { @Override public Void run() throws Exception { - Table table = null; - Connection connection = null; - try { - connection = ConnectionFactory.createConnection(conf); - table = connection.getTable(tableName); + try (Connection connection = ConnectionFactory.createConnection(conf); + Table table = connection.getTable(tableName)) { Append append = new Append(row1); append.add(fam, qual, Bytes.toBytes("c")); append.setCellVisibility(new CellVisibility(PUBLIC)); @@ -230,22 +210,11 @@ public class TestVisibilityWithCheckAuths { Assert.fail("Testcase should fail with AccesDeniedException"); } catch (Throwable t) { assertTrue(t.getMessage().contains("AccessDeniedException")); - } finally { - if (table != null) { - table.close(); - } - if (connection != null) { - connection.close(); - } } return null; } }; USER.runAs(actiona); - } finally { - if (table != null) { - table.close(); - } } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestWithDisabledAuthorization.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestWithDisabledAuthorization.java new file mode 100644 index 00000000000..d5e83de5aa2 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestWithDisabledAuthorization.java @@ -0,0 +1,237 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.security.visibility; + +import static org.apache.hadoop.hbase.security.visibility.VisibilityConstants.LABELS_TABLE_NAME; +import static org.junit.Assert.*; + +import java.security.PrivilegedExceptionAction; +import java.util.ArrayList; +import java.util.List; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.ResultScanner; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.GetAuthsResponse; +import org.apache.hadoop.hbase.security.User; +import org.apache.hadoop.hbase.security.access.SecureTestUtil; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.testclassification.SecurityTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Rule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.TestName; + +import com.google.protobuf.ByteString; + +@Category({SecurityTests.class, LargeTests.class}) +public class TestWithDisabledAuthorization { + + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + + private static final String CONFIDENTIAL = "confidential"; + private static final String SECRET = "secret"; + private static final String PRIVATE = "private"; + private static final byte[] TEST_FAMILY = Bytes.toBytes("test"); + private static final byte[] TEST_QUALIFIER = Bytes.toBytes("q"); + private static final byte[] ZERO = Bytes.toBytes(0L); + + + @Rule + public final TestName TEST_NAME = new TestName(); + + private static User SUPERUSER; + private static User USER_RW; + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + Configuration conf = TEST_UTIL.getConfiguration(); + + // Set up superuser + SecureTestUtil.configureSuperuser(conf); + + // Install the VisibilityController as a system processor + VisibilityTestUtil.enableVisiblityLabels(conf); + + // Now, DISABLE active authorization + conf.setBoolean(User.HBASE_SECURITY_AUTHORIZATION_CONF_KEY, false); + + TEST_UTIL.startMiniCluster(); + + // Wait for the labels table to become available + TEST_UTIL.waitUntilAllRegionsAssigned(LABELS_TABLE_NAME); + + // create a set of test users + SUPERUSER = User.createUserForTesting(conf, "admin", new String[] { "supergroup" }); + USER_RW = User.createUserForTesting(conf, "rwuser", new String[0]); + + // Define test labels + SUPERUSER.runAs(new PrivilegedExceptionAction() { + public Void run() throws Exception { + try { + VisibilityClient.addLabels(TEST_UTIL.getConfiguration(), + new String[] { SECRET, CONFIDENTIAL, PRIVATE }); + VisibilityClient.setAuths(TEST_UTIL.getConfiguration(), + new String[] { SECRET, CONFIDENTIAL }, + USER_RW.getShortName()); + } catch (Throwable t) { + fail("Should not have failed"); + } + return null; + } + }); + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + @Test + public void testManageUserAuths() throws Throwable { + // Even though authorization is disabled, we should be able to manage user auths + + SUPERUSER.runAs(new PrivilegedExceptionAction() { + public Void run() throws Exception { + try { + VisibilityClient.setAuths(TEST_UTIL.getConfiguration(), + new String[] { SECRET, CONFIDENTIAL }, + USER_RW.getShortName()); + } catch (Throwable t) { + fail("Should not have failed"); + } + return null; + } + }); + + PrivilegedExceptionAction> getAuths = + new PrivilegedExceptionAction>() { + public List run() throws Exception { + GetAuthsResponse authsResponse = null; + try { + authsResponse = VisibilityClient.getAuths(TEST_UTIL.getConfiguration(), + USER_RW.getShortName()); + } catch (Throwable t) { + fail("Should not have failed"); + } + List authsList = new ArrayList(); + for (ByteString authBS : authsResponse.getAuthList()) { + authsList.add(Bytes.toString(authBS.toByteArray())); + } + return authsList; + } + }; + + List authsList = SUPERUSER.runAs(getAuths); + assertEquals(2, authsList.size()); + assertTrue(authsList.contains(SECRET)); + assertTrue(authsList.contains(CONFIDENTIAL)); + + SUPERUSER.runAs(new PrivilegedExceptionAction() { + public Void run() throws Exception { + try { + VisibilityClient.clearAuths(TEST_UTIL.getConfiguration(), + new String[] { SECRET }, + USER_RW.getShortName()); + } catch (Throwable t) { + fail("Should not have failed"); + } + return null; + } + }); + + authsList = SUPERUSER.runAs(getAuths); + assertEquals(1, authsList.size()); + assertTrue(authsList.contains(CONFIDENTIAL)); + + SUPERUSER.runAs(new PrivilegedExceptionAction() { + public Void run() throws Exception { + try { + VisibilityClient.clearAuths(TEST_UTIL.getConfiguration(), + new String[] { CONFIDENTIAL }, + USER_RW.getShortName()); + } catch (Throwable t) { + fail("Should not have failed"); + } + return null; + } + }); + + authsList = SUPERUSER.runAs(getAuths); + assertEquals(0, authsList.size()); + } + + @Test + public void testPassiveVisibility() throws Exception { + // No values should be filtered regardless of authorization if we are passive + try (Table t = createTableAndWriteDataWithLabels( + TableName.valueOf(TEST_NAME.getMethodName()), + SECRET, + PRIVATE, + SECRET + "|" + CONFIDENTIAL, + PRIVATE + "|" + CONFIDENTIAL)) { + Scan s = new Scan(); + s.setAuthorizations(new Authorizations()); + try (ResultScanner scanner = t.getScanner(s)) { + Result[] next = scanner.next(10); + assertEquals(next.length, 4); + } + s = new Scan(); + s.setAuthorizations(new Authorizations(SECRET)); + try (ResultScanner scanner = t.getScanner(s)) { + Result[] next = scanner.next(10); + assertEquals(next.length, 4); + } + s = new Scan(); + s.setAuthorizations(new Authorizations(SECRET, CONFIDENTIAL)); + try (ResultScanner scanner = t.getScanner(s)) { + Result[] next = scanner.next(10); + assertEquals(next.length, 4); + } + s = new Scan(); + s.setAuthorizations(new Authorizations(SECRET, CONFIDENTIAL, PRIVATE)); + try (ResultScanner scanner = t.getScanner(s)) { + Result[] next = scanner.next(10); + assertEquals(next.length, 4); + } + } + } + + static Table createTableAndWriteDataWithLabels(TableName tableName, String... labelExps) + throws Exception { + List puts = new ArrayList(); + for (int i = 0; i < labelExps.length; i++) { + Put put = new Put(Bytes.toBytes("row" + (i+1))); + put.addColumn(TEST_FAMILY, TEST_QUALIFIER, HConstants.LATEST_TIMESTAMP, ZERO); + put.setCellVisibility(new CellVisibility(labelExps[i])); + puts.add(put); + } + Table table = TEST_UTIL.createTable(tableName, TEST_FAMILY); + table.put(puts); + return table; + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/MobSnapshotTestingUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/MobSnapshotTestingUtils.java index b7af75ef131..d891c20fde1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/MobSnapshotTestingUtils.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/MobSnapshotTestingUtils.java @@ -28,10 +28,7 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.*; -import org.apache.hadoop.hbase.client.HTable; -import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.client.ResultScanner; -import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.*; import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.regionserver.BloomType; @@ -73,7 +70,7 @@ public class MobSnapshotTestingUtils { * @return An HTable instance for the created table. * @throws IOException */ - public static HTable createMobTable(final HBaseTestingUtility util, + public static Table createMobTable(final HBaseTestingUtility util, final TableName tableName, final byte[]... families) throws IOException { HTableDescriptor htd = new HTableDescriptor(tableName); for (byte[] family : families) { @@ -92,13 +89,13 @@ public class MobSnapshotTestingUtils { // HBaseAdmin only waits for regions to appear in hbase:meta we should wait // until they are assigned util.waitUntilAllRegionsAssigned(htd.getTableName()); - return new HTable(util.getConfiguration(), htd.getTableName()); + return ConnectionFactory.createConnection(util.getConfiguration()).getTable(htd.getTableName()); } /** * Return the number of rows in the given table. */ - public static int countMobRows(final HTable table) throws IOException { + public static int countMobRows(final Table table) throws IOException { Scan scan = new Scan(); ResultScanner results = table.getScanner(scan); int count = 0; @@ -117,7 +114,7 @@ public class MobSnapshotTestingUtils { /** * Return the number of rows in the given table. */ - public static int countMobRows(final HTable table, final byte[]... families) + public static int countMobRows(final Table table, final byte[]... families) throws IOException { Scan scan = new Scan(); for (byte[] family : families) { @@ -139,7 +136,8 @@ public class MobSnapshotTestingUtils { public static void verifyMobRowCount(final HBaseTestingUtility util, final TableName tableName, long expectedRows) throws IOException { - HTable table = new HTable(util.getConfiguration(), tableName); + + Table table = ConnectionFactory.createConnection(util.getConfiguration()).getTable(tableName); try { assertEquals(expectedRows, countMobRows(table)); } finally { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java index 873b7dcd262..9caf9055368 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java @@ -63,9 +63,9 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptio import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse; -import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.FSVisitor; @@ -637,8 +637,8 @@ public class SnapshotTestingUtils { final TableName tableName) throws IOException, InterruptedException { HRegionServer rs = util.getRSForFirstRegionInTable(tableName); - List onlineRegions = rs.getOnlineRegions(tableName); - for (HRegion region : onlineRegions) { + List onlineRegions = rs.getOnlineRegions(tableName); + for (Region region : onlineRegions) { region.waitForFlushesAndCompactions(); } // Wait up to 60 seconds for a table to be available. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobFlushSnapshotFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobFlushSnapshotFromClient.java index 5517f4a4591..f7a9918ad45 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobFlushSnapshotFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobFlushSnapshotFromClient.java @@ -38,10 +38,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; -import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.HTable; -import org.apache.hadoop.hbase.client.ScannerCallable; -import org.apache.hadoop.hbase.ipc.RpcClient; +import org.apache.hadoop.hbase.client.*; import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; @@ -142,7 +139,7 @@ public class TestMobFlushSnapshotFromClient { SnapshotTestingUtils.assertNoSnapshots(admin); // put some stuff in the table - HTable table = new HTable(UTIL.getConfiguration(), TABLE_NAME); + Table table = ConnectionFactory.createConnection(UTIL.getConfiguration()).getTable(TABLE_NAME); SnapshotTestingUtils.loadData(UTIL, TABLE_NAME, DEFAULT_NUM_ROWS, TEST_FAM); LOG.debug("FS state before snapshot:"); @@ -181,7 +178,7 @@ public class TestMobFlushSnapshotFromClient { SnapshotTestingUtils.assertNoSnapshots(admin); // put some stuff in the table - HTable table = new HTable(UTIL.getConfiguration(), TABLE_NAME); + Table table = ConnectionFactory.createConnection(UTIL.getConfiguration()).getTable(TABLE_NAME); UTIL.loadTable(table, TEST_FAM); LOG.debug("FS state before snapshot:"); @@ -225,7 +222,7 @@ public class TestMobFlushSnapshotFromClient { SnapshotTestingUtils.assertNoSnapshots(admin); // put some stuff in the table - HTable table = new HTable(UTIL.getConfiguration(), TABLE_NAME); + Table table = ConnectionFactory.createConnection(UTIL.getConfiguration()).getTable(TABLE_NAME); SnapshotTestingUtils.loadData(UTIL, TABLE_NAME, DEFAULT_NUM_ROWS, TEST_FAM); LOG.debug("FS state before snapshot:"); @@ -425,7 +422,7 @@ public class TestMobFlushSnapshotFromClient { /** * Demonstrate that we reject snapshot requests if there is a snapshot already running on the * same table currently running and that concurrent snapshots on different tables can both - * succeed concurretly. + * succeed concurrently. */ @Test(timeout=300000) public void testConcurrentSnapshottingAttempts() throws IOException, InterruptedException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobRestoreFlushSnapshotFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobRestoreFlushSnapshotFromClient.java index d281763e357..cb58b178971 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobRestoreFlushSnapshotFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobRestoreFlushSnapshotFromClient.java @@ -25,7 +25,8 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; -import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; import org.apache.hadoop.hbase.mob.MobConstants; @@ -106,7 +107,7 @@ public class TestMobRestoreFlushSnapshotFromClient { // create Table MobSnapshotTestingUtils.createMobTable(UTIL, tableName, 1, FAMILY); - HTable table = new HTable(UTIL.getConfiguration(), tableName); + Table table = ConnectionFactory.createConnection(UTIL.getConfiguration()).getTable(tableName); SnapshotTestingUtils.loadData(UTIL, tableName, 500, FAMILY); snapshot0Rows = MobSnapshotTestingUtils.countMobRows(table); LOG.info("=== before snapshot with 500 rows"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobRestoreSnapshotHelper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobRestoreSnapshotHelper.java index 1893c7aa5bc..70b4312fda9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobRestoreSnapshotHelper.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobRestoreSnapshotHelper.java @@ -28,7 +28,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; @@ -54,17 +53,14 @@ public class TestMobRestoreSnapshotHelper { final Log LOG = LogFactory.getLog(getClass()); private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - private final static String TEST_HFILE = "abc"; private Configuration conf; - private Path archiveDir; private FileSystem fs; private Path rootDir; @Before public void setup() throws Exception { rootDir = TEST_UTIL.getDataTestDir("testRestore"); - archiveDir = new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY); fs = TEST_UTIL.getTestFileSystem(); TEST_UTIL.getConfiguration().setInt(MobConstants.MOB_FILE_CACHE_SIZE_KEY, 0); conf = TEST_UTIL.getConfiguration(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotClientRetries.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotClientRetries.java new file mode 100644 index 00000000000..5168b855b2f --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotClientRetries.java @@ -0,0 +1,125 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.snapshot; + +import java.io.IOException; +import java.util.concurrent.atomic.AtomicInteger; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.coprocessor.BaseMasterObserver; +import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; +import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment; +import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; +import org.apache.hadoop.hbase.snapshot.SnapshotExistsException; +import org.apache.hadoop.hbase.snapshot.SnapshotDoesNotExistException; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.TestTableName; + +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +@Category({ MediumTests.class }) +public class TestSnapshotClientRetries { + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + private static final Log LOG = LogFactory.getLog(TestSnapshotClientRetries.class); + + @Rule public TestTableName TEST_TABLE = new TestTableName(); + + @Before + public void setUp() throws Exception { + TEST_UTIL.getConfiguration().set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, + MasterSyncObserver.class.getName()); + TEST_UTIL.startMiniCluster(1); + } + + @After + public void tearDown() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + @Test(timeout = 60000, expected=SnapshotExistsException.class) + public void testSnapshotAlreadyExist() throws Exception { + final String snapshotName = "testSnapshotAlreadyExist"; + TEST_UTIL.createTable(TEST_TABLE.getTableName(), "f"); + TEST_UTIL.getHBaseAdmin().snapshot(snapshotName, TEST_TABLE.getTableName()); + snapshotAndAssertOneRetry(snapshotName, TEST_TABLE.getTableName()); + } + + @Test(timeout = 60000, expected=SnapshotDoesNotExistException.class) + public void testCloneNonExistentSnapshot() throws Exception { + final String snapshotName = "testCloneNonExistentSnapshot"; + cloneAndAssertOneRetry(snapshotName, TEST_TABLE.getTableName()); + } + + public static class MasterSyncObserver extends BaseMasterObserver { + volatile AtomicInteger snapshotCount = null; + volatile AtomicInteger cloneCount = null; + + @Override + public void preSnapshot(final ObserverContext ctx, + final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor) + throws IOException { + if (snapshotCount != null) { + snapshotCount.incrementAndGet(); + } + } + + @Override + public void preCloneSnapshot(final ObserverContext ctx, + final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor) + throws IOException { + if (cloneCount != null) { + cloneCount.incrementAndGet(); + } + } + } + + public void snapshotAndAssertOneRetry(final String snapshotName, final TableName tableName) + throws Exception { + MasterSyncObserver observer = getMasterSyncObserver(); + observer.snapshotCount = new AtomicInteger(0); + TEST_UTIL.getHBaseAdmin().snapshot(snapshotName, tableName); + assertEquals(1, observer.snapshotCount.get()); + } + + public void cloneAndAssertOneRetry(final String snapshotName, final TableName tableName) + throws Exception { + MasterSyncObserver observer = getMasterSyncObserver(); + observer.cloneCount = new AtomicInteger(0); + TEST_UTIL.getHBaseAdmin().cloneSnapshot(snapshotName, tableName); + assertEquals(1, observer.cloneCount.get()); + } + + private MasterSyncObserver getMasterSyncObserver() { + return (MasterSyncObserver)TEST_UTIL.getHBaseCluster().getMaster() + .getMasterCoprocessorHost().findCoprocessor(MasterSyncObserver.class.getName()); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/ConstantDelayQueue.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/ConstantDelayQueue.java new file mode 100644 index 00000000000..73ce71adc65 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/ConstantDelayQueue.java @@ -0,0 +1,196 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.util; + +import java.util.Collection; +import java.util.Iterator; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.DelayQueue; +import java.util.concurrent.Delayed; +import java.util.concurrent.TimeUnit; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; + +/** + * A blocking queue implementation for adding a constant delay. Uses a DelayQueue as a backing store + * @param type of elements + */ +@InterfaceAudience.Private +public class ConstantDelayQueue implements BlockingQueue { + + private static final class DelayedElement implements Delayed { + T element; + long end; + public DelayedElement(T element, long delayMs) { + this.element = element; + this.end = EnvironmentEdgeManager.currentTime() + delayMs; + } + + @Override + public int compareTo(Delayed o) { + long cmp = getDelay(TimeUnit.MILLISECONDS) - o.getDelay(TimeUnit.MILLISECONDS); + return cmp == 0 ? 0 : ( cmp < 0 ? -1 : 1); + } + + @Override + public long getDelay(TimeUnit unit) { + return unit.convert(end - System.currentTimeMillis(), TimeUnit.MILLISECONDS); + } + } + + private final long delayMs; + + // backing DelayQueue + private DelayQueue> queue = new DelayQueue>(); + + public ConstantDelayQueue(TimeUnit timeUnit, long delay) { + this.delayMs = TimeUnit.MILLISECONDS.convert(delay, timeUnit); + } + + @Override + public E remove() { + DelayedElement el = queue.remove(); + return el == null ? null : el.element; + } + + @Override + public E poll() { + DelayedElement el = queue.poll(); + return el == null ? null : el.element; + } + + @Override + public E element() { + DelayedElement el = queue.element(); + return el == null ? null : el.element; + } + + @Override + public E peek() { + DelayedElement el = queue.peek(); + return el == null ? null : el.element; + } + + @Override + public int size() { + return queue.size(); + } + + @Override + public boolean isEmpty() { + return queue.isEmpty(); + } + + @Override + public Iterator iterator() { + throw new UnsupportedOperationException(); // not implemented yet + } + + @Override + public Object[] toArray() { + throw new UnsupportedOperationException(); // not implemented yet + } + + @Override + public T[] toArray(T[] a) { + throw new UnsupportedOperationException(); // not implemented yet + } + + @Override + public boolean containsAll(Collection c) { + throw new UnsupportedOperationException(); // not implemented yet + } + + @Override + public boolean addAll(Collection c) { + throw new UnsupportedOperationException(); // not implemented yet + } + + @Override + public boolean removeAll(Collection c) { + throw new UnsupportedOperationException(); // not implemented yet + } + + @Override + public boolean retainAll(Collection c) { + throw new UnsupportedOperationException(); // not implemented yet + } + + @Override + public void clear() { + queue.clear(); + } + + @Override + public boolean add(E e) { + return queue.add(new DelayedElement(e, delayMs)); + } + + @Override + public boolean offer(E e) { + return queue.offer(new DelayedElement(e, delayMs)); + } + + @Override + public void put(E e) throws InterruptedException { + queue.put(new DelayedElement(e, delayMs)); + } + + @Override + public boolean offer(E e, long timeout, TimeUnit unit) throws InterruptedException { + return queue.offer(new DelayedElement(e, delayMs), timeout, unit); + } + + @Override + public E take() throws InterruptedException { + DelayedElement el = queue.take(); + return el == null ? null : el.element; + } + + @Override + public E poll(long timeout, TimeUnit unit) throws InterruptedException { + DelayedElement el = queue.poll(timeout, unit); + return el == null ? null : el.element; + } + + @Override + public int remainingCapacity() { + return queue.remainingCapacity(); + } + + @Override + public boolean remove(Object o) { + throw new UnsupportedOperationException(); // not implemented yet + } + + @Override + public boolean contains(Object o) { + throw new UnsupportedOperationException(); // not implemented yet + } + + @Override + public int drainTo(Collection c) { + throw new UnsupportedOperationException(); // not implemented yet + } + + @Override + public int drainTo(Collection c, int maxElements) { + throw new UnsupportedOperationException(); // not implemented yet + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestDataGeneratorWithMOB.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestDataGeneratorWithMOB.java index 6ce4252474f..006316ac1d2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestDataGeneratorWithMOB.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestDataGeneratorWithMOB.java @@ -46,7 +46,7 @@ public class LoadTestDataGeneratorWithMOB if (args.length != 3) { throw new IllegalArgumentException( "LoadTestDataGeneratorWithMOB can have 3 arguments." - + "1st arguement is a column family, the 2nd argument " + + "1st argument is a column family, the 2nd argument " + "is the minimum mob data size and the 3rd argument " + "is the maximum mob data size."); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java index 33efe860b69..0a8a594527b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.io.compress.Compression; @@ -74,14 +75,17 @@ public class LoadTestTool extends AbstractHBaseTool { /** Table name for the test */ private TableName tableName; + /** Column families for the test */ + private byte[][] families; + /** Table name to use of not overridden on the command line */ protected static final String DEFAULT_TABLE_NAME = "cluster_test"; /** Column family used by the test */ - public static byte[] COLUMN_FAMILY = Bytes.toBytes("test_cf"); + public static byte[] DEFAULT_COLUMN_FAMILY = Bytes.toBytes("test_cf"); /** Column families used by the test */ - protected static final byte[][] COLUMN_FAMILIES = { COLUMN_FAMILY }; + public static final byte[][] DEFAULT_COLUMN_FAMILIES = { DEFAULT_COLUMN_FAMILY }; /** The default data size if not specified */ protected static final int DEFAULT_DATA_SIZE = 64; @@ -130,18 +134,25 @@ public class LoadTestTool extends AbstractHBaseTool { public static final String OPT_GENERATOR_USAGE = "The class which generates load for the tool." + " Any args for this class can be passed as colon separated after class name"; + public static final String OPT_WRITER = "writer"; + public static final String OPT_WRITER_USAGE = "The class for executing the write requests"; + + public static final String OPT_UPDATER = "updater"; + public static final String OPT_UPDATER_USAGE = "The class for executing the update requests"; + public static final String OPT_READER = "reader"; public static final String OPT_READER_USAGE = "The class for executing the read requests"; protected static final String OPT_KEY_WINDOW = "key_window"; protected static final String OPT_WRITE = "write"; protected static final String OPT_MAX_READ_ERRORS = "max_read_errors"; - protected static final String OPT_MULTIPUT = "multiput"; + public static final String OPT_MULTIPUT = "multiput"; public static final String OPT_MULTIGET = "multiget_batchsize"; protected static final String OPT_NUM_KEYS = "num_keys"; protected static final String OPT_READ = "read"; protected static final String OPT_START_KEY = "start_key"; public static final String OPT_TABLE_NAME = "tn"; + public static final String OPT_COLUMN_FAMILIES = "families"; protected static final String OPT_ZK_QUORUM = "zk"; protected static final String OPT_ZK_PARENT_NODE = "zk_root"; protected static final String OPT_SKIP_INIT = "skip_init"; @@ -251,6 +262,10 @@ public class LoadTestTool extends AbstractHBaseTool { return parseInt(numThreadsStr, 1, Short.MAX_VALUE); } + public byte[][] getColumnFamilies() { + return families; + } + /** * Apply column family options such as Bloom filters, compression, and data * block encoding. @@ -308,6 +323,7 @@ public class LoadTestTool extends AbstractHBaseTool { "without port numbers"); addOptWithArg(OPT_ZK_PARENT_NODE, "name of parent znode in zookeeper"); addOptWithArg(OPT_TABLE_NAME, "The name of the table to read or write"); + addOptWithArg(OPT_COLUMN_FAMILIES, "The name of the column families to use separated by comma"); addOptWithArg(OPT_WRITE, OPT_USAGE_LOAD); addOptWithArg(OPT_READ, OPT_USAGE_READ); addOptWithArg(OPT_UPDATE, OPT_USAGE_UPDATE); @@ -330,6 +346,8 @@ public class LoadTestTool extends AbstractHBaseTool { "separate updates for every column in a row"); addOptNoArg(OPT_INMEMORY, OPT_USAGE_IN_MEMORY); addOptWithArg(OPT_GENERATOR, OPT_GENERATOR_USAGE); + addOptWithArg(OPT_WRITER, OPT_WRITER_USAGE); + addOptWithArg(OPT_UPDATER, OPT_UPDATER_USAGE); addOptWithArg(OPT_READER, OPT_READER_USAGE); addOptWithArg(OPT_NUM_KEYS, "The number of keys to read/write"); @@ -363,6 +381,16 @@ public class LoadTestTool extends AbstractHBaseTool { tableName = TableName.valueOf(cmd.getOptionValue(OPT_TABLE_NAME, DEFAULT_TABLE_NAME)); + if (cmd.hasOption(OPT_COLUMN_FAMILIES)) { + String[] list = cmd.getOptionValue(OPT_COLUMN_FAMILIES).split(","); + families = new byte[list.length][]; + for (int i = 0; i < list.length; i++) { + families[i] = Bytes.toBytes(list[i]); + } + } else { + families = DEFAULT_COLUMN_FAMILIES; + } + isWrite = cmd.hasOption(OPT_WRITE); isRead = cmd.hasOption(OPT_READ); isUpdate = cmd.hasOption(OPT_UPDATE); @@ -519,9 +547,9 @@ public class LoadTestTool extends AbstractHBaseTool { } HBaseTestingUtility.createPreSplitLoadTestTable(conf, tableName, - COLUMN_FAMILY, compressAlgo, dataBlockEncodingAlgo, numRegionsPerServer, + getColumnFamilies(), compressAlgo, dataBlockEncodingAlgo, numRegionsPerServer, regionReplication, durability); - applyColumnFamilyOptions(tableName, COLUMN_FAMILIES); + applyColumnFamilyOptions(tableName, getColumnFamilies()); } @Override @@ -586,7 +614,7 @@ public class LoadTestTool extends AbstractHBaseTool { } else { // Default DataGenerator is MultiThreadedAction.DefaultDataGenerator dataGen = new MultiThreadedAction.DefaultDataGenerator(minColDataSize, maxColDataSize, - minColsPerKey, maxColsPerKey, COLUMN_FAMILY); + minColsPerKey, maxColsPerKey, families); } if (userOwner != null) { @@ -595,7 +623,8 @@ public class LoadTestTool extends AbstractHBaseTool { Permission.Action.ADMIN, Permission.Action.CREATE, Permission.Action.READ, Permission.Action.WRITE }; try { - AccessControlClient.grant(conf, tableName, userOwner.getShortName(), null, null, actions); + AccessControlClient.grant(ConnectionFactory.createConnection(conf), + tableName, userOwner.getShortName(), null, null, actions); } catch (Throwable e) { LOG.fatal("Error in granting permission for the user " + userOwner.getShortName(), e); return EXIT_FAILURE; @@ -619,7 +648,14 @@ public class LoadTestTool extends AbstractHBaseTool { if (userOwner != null) { writerThreads = new MultiThreadedWriterWithACL(dataGen, conf, tableName, userOwner); } else { - writerThreads = new MultiThreadedWriter(dataGen, conf, tableName); + String writerClass = null; + if (cmd.hasOption(OPT_WRITER)) { + writerClass = cmd.getOptionValue(OPT_WRITER); + } else { + writerClass = MultiThreadedWriter.class.getCanonicalName(); + } + + writerThreads = getMultiThreadedWriterInstance(writerClass, dataGen); } writerThreads.setMultiPut(isMultiPut); } @@ -629,7 +665,13 @@ public class LoadTestTool extends AbstractHBaseTool { updaterThreads = new MultiThreadedUpdaterWithACL(dataGen, conf, tableName, updatePercent, userOwner, userNames); } else { - updaterThreads = new MultiThreadedUpdater(dataGen, conf, tableName, updatePercent); + String updaterClass = null; + if (cmd.hasOption(OPT_UPDATER)) { + updaterClass = cmd.getOptionValue(OPT_UPDATER); + } else { + updaterClass = MultiThreadedUpdater.class.getCanonicalName(); + } + updaterThreads = getMultiThreadedUpdaterInstance(updaterClass, dataGen); } updaterThreads.setBatchUpdate(isBatchUpdate); updaterThreads.setIgnoreNonceConflicts(ignoreConflicts); @@ -716,7 +758,32 @@ public class LoadTestTool extends AbstractHBaseTool { Constructor constructor = clazz.getConstructor(int.class, int.class, int.class, int.class, byte[][].class); return (LoadTestDataGenerator) constructor.newInstance(minColDataSize, maxColDataSize, - minColsPerKey, maxColsPerKey, COLUMN_FAMILIES); + minColsPerKey, maxColsPerKey, families); + } catch (Exception e) { + throw new IOException(e); + } + } + + private MultiThreadedWriter getMultiThreadedWriterInstance(String clazzName + , LoadTestDataGenerator dataGen) throws IOException { + try { + Class clazz = Class.forName(clazzName); + Constructor constructor = clazz.getConstructor( + LoadTestDataGenerator.class, Configuration.class, TableName.class); + return (MultiThreadedWriter) constructor.newInstance(dataGen, conf, tableName); + } catch (Exception e) { + throw new IOException(e); + } + } + + private MultiThreadedUpdater getMultiThreadedUpdaterInstance(String clazzName + , LoadTestDataGenerator dataGen) throws IOException { + try { + Class clazz = Class.forName(clazzName); + Constructor constructor = clazz.getConstructor( + LoadTestDataGenerator.class, Configuration.class, TableName.class, double.class); + return (MultiThreadedUpdater) constructor.newInstance( + dataGen, conf, tableName, updatePercent); } catch (Exception e) { throw new IOException(e); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java index 5b04ab98aec..26f2db9e9a4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java @@ -39,8 +39,8 @@ import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.ClusterConnection; +import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.HConnection; -import org.apache.hadoop.hbase.client.HConnectionManager; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.io.compress.Compression.Algorithm; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; @@ -155,7 +155,7 @@ public abstract class MultiThreadedAction { this.dataGenerator = dataGen; this.tableName = tableName; this.actionLetter = actionLetter; - this.connection = HConnectionManager.createConnection(conf); + this.connection = (HConnection) ConnectionFactory.createConnection(conf); } public void start(long startKey, long endKey, int numThreads) throws IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriterBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriterBase.java index 9eb0c930765..d4e6d805d98 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriterBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedWriterBase.java @@ -46,7 +46,7 @@ public abstract class MultiThreadedWriterBase extends MultiThreadedAction { * {@link #wroteUpToKey}, the maximum key in the contiguous range of keys * being inserted/updated. This queue is supposed to stay small. */ - protected BlockingQueue wroteKeys = new ArrayBlockingQueue(10000); + protected BlockingQueue wroteKeys; /** * This is the current key to be inserted/updated by any thread. Each thread does an @@ -75,6 +75,11 @@ public abstract class MultiThreadedWriterBase extends MultiThreadedAction { public MultiThreadedWriterBase(LoadTestDataGenerator dataGen, Configuration conf, TableName tableName, String actionLetter) throws IOException { super(dataGen, conf, tableName, actionLetter); + this.wroteKeys = createWriteKeysQueue(conf); + } + + protected BlockingQueue createWriteKeysQueue(Configuration conf) { + return new ArrayBlockingQueue(10000); } @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/RestartMetaTest.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/RestartMetaTest.java index b0a17a91b99..6beb2e61655 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/RestartMetaTest.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/RestartMetaTest.java @@ -26,7 +26,6 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; -import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; @@ -81,7 +80,8 @@ public class RestartMetaTest extends AbstractHBaseTool { // start the writers LoadTestDataGenerator dataGen = new MultiThreadedAction.DefaultDataGenerator( - minColDataSize, maxColDataSize, minColsPerKey, maxColsPerKey, LoadTestTool.COLUMN_FAMILY); + minColDataSize, maxColDataSize, minColsPerKey, maxColsPerKey, + LoadTestTool.DEFAULT_COLUMN_FAMILY); MultiThreadedWriter writer = new MultiThreadedWriter(dataGen, conf, TABLE_NAME); writer.setMultiPut(true); writer.start(startKey, endKey, numThreads); @@ -101,7 +101,7 @@ public class RestartMetaTest extends AbstractHBaseTool { // create tables if needed HBaseTestingUtility.createPreSplitLoadTestTable(conf, TABLE_NAME, - LoadTestTool.COLUMN_FAMILY, Compression.Algorithm.NONE, + LoadTestTool.DEFAULT_COLUMN_FAMILY, Compression.Algorithm.NONE, DataBlockEncoding.NONE); LOG.debug("Loading data....\n\n"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestEncryptionTest.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestEncryptionTest.java index d615a293205..5d2f04f543a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestEncryptionTest.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestEncryptionTest.java @@ -75,10 +75,12 @@ public class TestEncryptionTest { public void testTestCipher() { Configuration conf = HBaseConfiguration.create(); conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, KeyProviderForTesting.class.getName()); + String algorithm = + conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); try { - EncryptionTest.testEncryption(conf, "AES", null); + EncryptionTest.testEncryption(conf, algorithm, null); } catch (Exception e) { - fail("Test for cipher AES should have succeeded"); + fail("Test for cipher " + algorithm + " should have succeeded"); } try { EncryptionTest.testEncryption(conf, "foobar", null); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java index c8b2285cb4d..e2c14884b0d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java @@ -330,16 +330,19 @@ public class TestFSUtils { ManualEnvironmentEdge mockEnv = new ManualEnvironmentEdge(); mockEnv.setValue(expect); EnvironmentEdgeManager.injectEdge(mockEnv); + try { + String dstFile = UUID.randomUUID().toString(); + Path dst = new Path(testDir , dstFile); - String dstFile = UUID.randomUUID().toString(); - Path dst = new Path(testDir , dstFile); + assertTrue(FSUtils.renameAndSetModifyTime(fs, p, dst)); + assertFalse("The moved file should not be present", FSUtils.isExists(fs, p)); + assertTrue("The dst file should be present", FSUtils.isExists(fs, dst)); - assertTrue(FSUtils.renameAndSetModifyTime(fs, p, dst)); - assertFalse("The moved file should not be present", FSUtils.isExists(fs, p)); - assertTrue("The dst file should be present", FSUtils.isExists(fs, dst)); - - assertEquals(expect, fs.getFileStatus(dst).getModificationTime()); - cluster.shutdown(); + assertEquals(expect, fs.getFileStatus(dst).getModificationTime()); + cluster.shutdown(); + } finally { + EnvironmentEdgeManager.reset(); + } } /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java index 0d3a94e25be..4a741a9819f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java @@ -78,13 +78,16 @@ import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.HTable; -import org.apache.hadoop.hbase.client.MetaScanner; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.coprocessor.BaseMasterObserver; +import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; +import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment; +import org.apache.hadoop.hbase.coprocessor.ObserverContext; import org.apache.hadoop.hbase.io.hfile.TestHFile; import org.apache.hadoop.hbase.master.AssignmentManager; import org.apache.hadoop.hbase.master.HMaster; @@ -97,8 +100,10 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.regionserver.HRegionServer; -import org.apache.hadoop.hbase.regionserver.SplitTransaction; +import org.apache.hadoop.hbase.regionserver.SplitTransactionFactory; +import org.apache.hadoop.hbase.regionserver.SplitTransactionImpl; import org.apache.hadoop.hbase.regionserver.TestEndToEndSplitTransaction; +import org.apache.hadoop.hbase.security.access.AccessControlClient; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter; @@ -112,6 +117,7 @@ import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; import org.apache.zookeeper.KeeperException; import org.junit.AfterClass; import org.junit.Assert; +import org.junit.Before; import org.junit.BeforeClass; import org.junit.Ignore; import org.junit.Test; @@ -150,6 +156,9 @@ public class TestHBaseFsck { @BeforeClass public static void setUpBeforeClass() throws Exception { + TEST_UTIL.getConfiguration().set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, + MasterSyncObserver.class.getName()); + conf.setInt("hbase.regionserver.handler.count", 2); conf.setInt("hbase.regionserver.metahandler.count", 2); @@ -157,7 +166,7 @@ public class TestHBaseFsck { conf.setInt("hbase.hconnection.threads.max", 2 * POOL_SIZE); conf.setInt("hbase.hconnection.threads.core", POOL_SIZE); conf.setInt("hbase.hbck.close.timeout", 2 * REGION_ONLINE_TIMEOUT); - conf.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, 2 * REGION_ONLINE_TIMEOUT); + conf.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, 8 * REGION_ONLINE_TIMEOUT); TEST_UTIL.startMiniCluster(3); tableExecutorService = new ThreadPoolExecutor(1, POOL_SIZE, 60, TimeUnit.SECONDS, @@ -173,6 +182,9 @@ public class TestHBaseFsck { admin = connection.getAdmin(); admin.setBalancerRunning(false, true); + + TEST_UTIL.waitUntilAllRegionsAssigned(TableName.META_TABLE_NAME); + TEST_UTIL.waitUntilAllRegionsAssigned(TableName.NAMESPACE_TABLE_NAME); } @AfterClass @@ -183,11 +195,19 @@ public class TestHBaseFsck { TEST_UTIL.shutdownMiniCluster(); } + @Before + public void setUp() { + EnvironmentEdgeManager.reset(); + } + @Test (timeout=180000) public void testHBaseFsck() throws Exception { assertNoErrors(doFsck(conf, false)); TableName table = TableName.valueOf("tableBadMetaAssign"); - TEST_UTIL.createTable(table, FAM); + HTableDescriptor desc = new HTableDescriptor(table); + HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toString(FAM)); + desc.addFamily(hcd); // If a table has no CF's it doesn't get checked + createTable(TEST_UTIL, desc, null); // We created 1 table, should be fine assertNoErrors(doFsck(conf, false)); @@ -406,7 +426,8 @@ public class TestHBaseFsck { desc.setRegionReplication(replicaCount); HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toString(FAM)); desc.addFamily(hcd); // If a table has no CF's it doesn't get checked - admin.createTable(desc, SPLITS); + createTable(TEST_UTIL, desc, SPLITS); + tbl = (HTable) connection.getTable(tablename, tableExecutorService); List puts = new ArrayList(); for (byte[] row : ROWKEYS) { @@ -437,15 +458,14 @@ public class TestHBaseFsck { * @param tablename * @throws IOException */ - void cleanupTable(TableName tablename) throws IOException { + void cleanupTable(TableName tablename) throws Exception { if (tbl != null) { tbl.close(); tbl = null; } ((ClusterConnection) connection).clearRegionCache(); - TEST_UTIL.deleteTable(tablename); - + deleteTable(TEST_UTIL, tablename); } /** @@ -555,8 +575,10 @@ public class TestHBaseFsck { boolean fail = true; @Override public HBaseFsck call(){ + Configuration c = new Configuration(conf); + c.setInt("hbase.hbck.lockfile.attempts", 1); try{ - return doFsck(conf, false); + return doFsck(c, false); } catch(Exception e){ if (e.getMessage().contains("Duplicate hbck")) { fail = false; @@ -585,6 +607,40 @@ public class TestHBaseFsck { } } + /** + * This test makes sure that with 5 retries both parallel instances + * of hbck will be completed successfully. + * + * @throws Exception + */ + @Test (timeout=180000) + public void testParallelWithRetriesHbck() throws Exception { + final ExecutorService service; + final Future hbck1,hbck2; + + class RunHbck implements Callable{ + + @Override + public HBaseFsck call() throws Exception { + return doFsck(conf, false); + } + } + service = Executors.newFixedThreadPool(2); + hbck1 = service.submit(new RunHbck()); + hbck2 = service.submit(new RunHbck()); + service.shutdown(); + //wait for 15 seconds, for both hbck calls finish + service.awaitTermination(15, TimeUnit.SECONDS); + HBaseFsck h1 = hbck1.get(); + HBaseFsck h2 = hbck2.get(); + // Both should be successful + assertNotNull(h1); + assertNotNull(h2); + assert(h1.getRetCode() >= 0); + assert(h2.getRetCode() >= 0); + + } + /** * This create and fixes a bad table with regions that have a duplicate * start key @@ -960,7 +1016,7 @@ public class TestHBaseFsck { // fix the problem. HBaseFsck fsck = new HBaseFsck(conf, hbfsckExecutorService); fsck.connect(); - fsck.setDisplayFullReport(); // i.e. -details + HBaseFsck.setDisplayFullReport(); // i.e. -details fsck.setTimeLag(0); fsck.setFixAssignments(true); fsck.setFixMeta(true); @@ -1183,8 +1239,9 @@ public class TestHBaseFsck { try { HTableDescriptor desc = new HTableDescriptor(table); desc.addFamily(new HColumnDescriptor(Bytes.toBytes("f"))); - admin.createTable(desc); - tbl = new HTable(cluster.getConfiguration(), desc.getTableName()); + createTable(TEST_UTIL, desc, null); + + tbl = (HTable) connection.getTable(desc.getTableName()); for (int i = 0; i < 5; i++) { Put p1 = new Put(("r" + i).getBytes()); p1.add(Bytes.toBytes("f"), "q1".getBytes(), "v".getBytes()); @@ -1192,10 +1249,12 @@ public class TestHBaseFsck { } admin.flush(desc.getTableName()); List regions = cluster.getRegions(desc.getTableName()); - int serverWith = cluster.getServerWith(regions.get(0).getRegionName()); + int serverWith = cluster.getServerWith(regions.get(0).getRegionInfo().getRegionName()); HRegionServer regionServer = cluster.getRegionServer(serverWith); - cluster.getServerWith(regions.get(0).getRegionName()); - SplitTransaction st = new SplitTransaction(regions.get(0), Bytes.toBytes("r3")); + cluster.getServerWith(regions.get(0).getRegionInfo().getRegionName()); + SplitTransactionImpl st = (SplitTransactionImpl) + new SplitTransactionFactory(TEST_UTIL.getConfiguration()) + .create(regions.get(0), Bytes.toBytes("r3")); st.prepare(); st.stepsBeforePONR(regionServer, regionServer, false); AssignmentManager am = cluster.getMaster().getAssignmentManager(); @@ -1314,8 +1373,8 @@ public class TestHBaseFsck { setupTableWithRegionReplica(table, 2); assertEquals(ROWKEYS.length, countRows()); NavigableMap map = - MetaScanner.allTableRegions(TEST_UTIL.getConnection(), - tbl.getName()); + MetaTableAccessor.allTableRegions(TEST_UTIL.getConnection(), + tbl.getName()); int i = 0; // store the HRIs of the regions we will mess up for (Map.Entry m : map.entrySet()) { @@ -1347,7 +1406,7 @@ public class TestHBaseFsck { i = 0; HRegionInfo[] newHris = new HRegionInfo[2]; // get all table's regions from meta - map = MetaScanner.allTableRegions(TEST_UTIL.getConnection(), tbl.getName()); + map = MetaTableAccessor.allTableRegions(TEST_UTIL.getConnection(), tbl.getName()); // get the HRIs of the new regions (hbck created new regions for fixing the hdfs mess-up) for (Map.Entry m : map.entrySet()) { if (m.getKey().getStartKey().length > 0 && @@ -1632,7 +1691,7 @@ public class TestHBaseFsck { // fix lingering split parent hbck = new HBaseFsck(conf, hbfsckExecutorService); hbck.connect(); - hbck.setDisplayFullReport(); // i.e. -details + HBaseFsck.setDisplayFullReport(); // i.e. -details hbck.setTimeLag(0); hbck.setFixSplitParents(true); hbck.onlineHbck(); @@ -1885,7 +1944,7 @@ public class TestHBaseFsck { // verify that noHdfsChecking report the same errors HBaseFsck fsck = new HBaseFsck(conf, hbfsckExecutorService); fsck.connect(); - fsck.setDisplayFullReport(); // i.e. -details + HBaseFsck.setDisplayFullReport(); // i.e. -details fsck.setTimeLag(0); fsck.setCheckHdfs(false); fsck.onlineHbck(); @@ -1896,7 +1955,7 @@ public class TestHBaseFsck { // verify that fixAssignments works fine with noHdfsChecking fsck = new HBaseFsck(conf, hbfsckExecutorService); fsck.connect(); - fsck.setDisplayFullReport(); // i.e. -details + HBaseFsck.setDisplayFullReport(); // i.e. -details fsck.setTimeLag(0); fsck.setCheckHdfs(false); fsck.setFixAssignments(true); @@ -1938,7 +1997,7 @@ public class TestHBaseFsck { // verify that noHdfsChecking report the same errors HBaseFsck fsck = new HBaseFsck(conf, hbfsckExecutorService); fsck.connect(); - fsck.setDisplayFullReport(); // i.e. -details + HBaseFsck.setDisplayFullReport(); // i.e. -details fsck.setTimeLag(0); fsck.setCheckHdfs(false); fsck.onlineHbck(); @@ -1949,7 +2008,7 @@ public class TestHBaseFsck { // verify that fixMeta doesn't work with noHdfsChecking fsck = new HBaseFsck(conf, hbfsckExecutorService); fsck.connect(); - fsck.setDisplayFullReport(); // i.e. -details + HBaseFsck.setDisplayFullReport(); // i.e. -details fsck.setTimeLag(0); fsck.setCheckHdfs(false); fsck.setFixAssignments(true); @@ -2004,7 +2063,7 @@ public class TestHBaseFsck { // verify that noHdfsChecking can't detect ORPHAN_HDFS_REGION HBaseFsck fsck = new HBaseFsck(conf, hbfsckExecutorService); fsck.connect(); - fsck.setDisplayFullReport(); // i.e. -details + HBaseFsck.setDisplayFullReport(); // i.e. -details fsck.setTimeLag(0); fsck.setCheckHdfs(false); fsck.onlineHbck(); @@ -2015,7 +2074,7 @@ public class TestHBaseFsck { // verify that fixHdfsHoles doesn't work with noHdfsChecking fsck = new HBaseFsck(conf, hbfsckExecutorService); fsck.connect(); - fsck.setDisplayFullReport(); // i.e. -details + HBaseFsck.setDisplayFullReport(); // i.e. -details fsck.setTimeLag(0); fsck.setCheckHdfs(false); fsck.setFixHdfsHoles(true); @@ -2270,10 +2329,10 @@ public class TestHBaseFsck { // Mess it up by removing the RegionInfo for one region. final List deletes = new LinkedList(); Table meta = connection.getTable(TableName.META_TABLE_NAME, hbfsckExecutorService); - MetaScanner.metaScan(connection, new MetaScanner.MetaScannerVisitor() { + MetaTableAccessor.fullScanRegions(connection, new MetaTableAccessor.Visitor() { @Override - public boolean processRow(Result rowResult) throws IOException { + public boolean visit(Result rowResult) throws IOException { HRegionInfo hri = MetaTableAccessor.getHRegionInfo(rowResult); if (hri != null && !hri.getTable().isSystemTable()) { Delete delete = new Delete(rowResult.getRow()); @@ -2282,10 +2341,6 @@ public class TestHBaseFsck { } return true; } - - @Override - public void close() throws IOException { - } }); meta.delete(deletes); @@ -2418,11 +2473,12 @@ public class TestHBaseFsck { assertNoErrors(hbck); ServerName mockName = ServerName.valueOf("localhost", 60000, 1); + final TableName tableName = TableName.valueOf("foo"); // obtain one lock - final TableLockManager tableLockManager = TableLockManager.createTableLockManager(conf, TEST_UTIL.getZooKeeperWatcher(), mockName); - TableLock writeLock = tableLockManager.writeLock(TableName.valueOf("foo"), - "testCheckTableLocks"); + final TableLockManager tableLockManager = + TableLockManager.createTableLockManager(conf, TEST_UTIL.getZooKeeperWatcher(), mockName); + TableLock writeLock = tableLockManager.writeLock(tableName, "testCheckTableLocks"); writeLock.acquire(); hbck = doFsck(conf, false); assertNoErrors(hbck); // should not have expired, no problems @@ -2437,8 +2493,7 @@ public class TestHBaseFsck { new Thread() { @Override public void run() { - TableLock readLock = tableLockManager.writeLock(TableName.valueOf("foo"), - "testCheckTableLocks"); + TableLock readLock = tableLockManager.writeLock(tableName, "testCheckTableLocks"); try { latch.countDown(); readLock.acquire(); @@ -2472,10 +2527,10 @@ public class TestHBaseFsck { assertNoErrors(hbck); // ensure that locks are deleted - writeLock = tableLockManager.writeLock(TableName.valueOf("foo"), - "should acquire without blocking"); + writeLock = tableLockManager.writeLock(tableName, "should acquire without blocking"); writeLock.acquire(); // this should not block. writeLock.release(); // release for clean state + tableLockManager.tableDeleted(tableName); } @Test (timeout=180000) @@ -2541,7 +2596,7 @@ public class TestHBaseFsck { HTableDescriptor desc = new HTableDescriptor(table); HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toString(FAM)); desc.addFamily(hcd); // If a table has no CF's it doesn't get checked - admin.createTable(desc); + createTable(TEST_UTIL, desc, null); tbl = (HTable) connection.getTable(table, tableExecutorService); // Mess it up by leaving a hole in the assignment, meta, and hdfs data @@ -2616,11 +2671,14 @@ public class TestHBaseFsck { HBaseFsck hbck = doFsck(conf, false); assertNoErrors(hbck); // no errors try { + hbck.connect(); // need connection to have access to META hbck.checkRegionBoundaries(); } catch (IllegalArgumentException e) { if (e.getMessage().endsWith("not a valid DFS filename.")) { fail("Table directory path is not valid." + e.getMessage()); } + } finally { + hbck.close(); } } @@ -2642,4 +2700,62 @@ public class TestHBaseFsck { Assert.assertEquals("shouldIgnorePreCheckPermission", true, hbck.shouldIgnorePreCheckPermission()); } + + public static class MasterSyncObserver extends BaseMasterObserver { + volatile CountDownLatch tableCreationLatch = null; + volatile CountDownLatch tableDeletionLatch = null; + + @Override + public void postCreateTableHandler(final ObserverContext ctx, + HTableDescriptor desc, HRegionInfo[] regions) throws IOException { + // the AccessController test, some times calls only and directly the postCreateTableHandler() + if (tableCreationLatch != null) { + tableCreationLatch.countDown(); + } + } + + @Override + public void postDeleteTableHandler(final ObserverContext ctx, + TableName tableName) + throws IOException { + // the AccessController test, some times calls only and directly the postDeleteTableHandler() + if (tableDeletionLatch != null) { + tableDeletionLatch.countDown(); + } + } + } + + public static void createTable(HBaseTestingUtility testUtil, HTableDescriptor htd, + byte [][] splitKeys) throws Exception { + // NOTE: We need a latch because admin is not sync, + // so the postOp coprocessor method may be called after the admin operation returned. + MasterSyncObserver observer = (MasterSyncObserver)testUtil.getHBaseCluster().getMaster() + .getMasterCoprocessorHost().findCoprocessor(MasterSyncObserver.class.getName()); + observer.tableCreationLatch = new CountDownLatch(1); + if (splitKeys != null) { + admin.createTable(htd, splitKeys); + } else { + admin.createTable(htd); + } + observer.tableCreationLatch.await(); + observer.tableCreationLatch = null; + testUtil.waitUntilAllRegionsAssigned(htd.getTableName()); + } + + public static void deleteTable(HBaseTestingUtility testUtil, TableName tableName) + throws Exception { + // NOTE: We need a latch because admin is not sync, + // so the postOp coprocessor method may be called after the admin operation returned. + MasterSyncObserver observer = (MasterSyncObserver)testUtil.getHBaseCluster().getMaster() + .getMasterCoprocessorHost().findCoprocessor(MasterSyncObserver.class.getName()); + observer.tableDeletionLatch = new CountDownLatch(1); + try { + admin.disableTable(tableName); + } catch (Exception e) { + LOG.debug("Table: " + tableName + " already disabled, so just deleting it."); + } + admin.deleteTable(tableName); + observer.tableDeletionLatch.await(); + observer.tableDeletionLatch = null; + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckEncryption.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckEncryption.java index 69ffa5593ee..5c08f1fe389 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckEncryption.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckEncryption.java @@ -35,7 +35,6 @@ import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.io.crypto.Encryption; @@ -43,7 +42,7 @@ import org.apache.hadoop.hbase.io.crypto.KeyProviderForTesting; import org.apache.hadoop.hbase.io.crypto.aes.AES; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFile; -import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.regionserver.StoreFile; import org.apache.hadoop.hbase.security.EncryptionUtil; @@ -52,7 +51,6 @@ import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker; import org.apache.hadoop.hbase.util.hbck.HbckTestingUtil; - import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -78,7 +76,9 @@ public class TestHBaseFsckEncryption { SecureRandom rng = new SecureRandom(); byte[] keyBytes = new byte[AES.KEY_LENGTH]; rng.nextBytes(keyBytes); - cfKey = new SecretKeySpec(keyBytes, "AES"); + String algorithm = + conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); + cfKey = new SecretKeySpec(keyBytes,algorithm); // Start the minicluster TEST_UTIL.startMiniCluster(3); @@ -86,7 +86,7 @@ public class TestHBaseFsckEncryption { // Create the table htd = new HTableDescriptor(TableName.valueOf("default", "TestHBaseFsckEncryption")); HColumnDescriptor hcd = new HColumnDescriptor("cf"); - hcd.setEncryptionType("AES"); + hcd.setEncryptionType(algorithm); hcd.setEncryptionKey(EncryptionUtil.wrapKey(conf, conf.get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, User.getCurrent().getShortName()), cfKey)); @@ -140,9 +140,9 @@ public class TestHBaseFsckEncryption { private List findStorefilePaths(TableName tableName) throws Exception { List paths = new ArrayList(); - for (HRegion region: + for (Region region: TEST_UTIL.getRSForFirstRegionInTable(tableName).getOnlineRegions(htd.getTableName())) { - for (Store store: region.getStores().values()) { + for (Store store: region.getStores()) { for (StoreFile storefile: store.getStorefiles()) { paths.add(storefile.getPath()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTable.java index 6a86580ca80..e31cb4e64da 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTable.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.junit.Test; @@ -142,7 +143,7 @@ public class TestMergeTable { HRegionInfo hri = new HRegionInfo(desc.getTableName(), startKey, endKey); HRegion region = HBaseTestingUtility.createRegionAndWAL(hri, rootdir, UTIL.getConfiguration(), desc); - LOG.info("Created region " + region.getRegionNameAsString()); + LOG.info("Created region " + region.getRegionInfo().getRegionNameAsString()); for(int i = firstRow; i < firstRow + nrows; i++) { Put put = new Put(Bytes.toBytes("row_" + String.format("%1$05d", i))); put.setDurability(Durability.SKIP_WAL); @@ -150,7 +151,7 @@ public class TestMergeTable { region.put(put); if (i % 10000 == 0) { LOG.info("Flushing write #" + i); - region.flushcache(); + region.flush(true); } } HBaseTestingUtility.closeRegionAndWAL(region); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java index b04e5de6dac..056eeb2fee7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java @@ -40,10 +40,10 @@ import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.InternalScanner; -import org.apache.hadoop.hbase.wal.WAL; -import org.apache.hadoop.hbase.wal.WALFactory; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.MiscTests; +import org.apache.hadoop.hbase.wal.WAL; +import org.apache.hadoop.hbase.wal.WALFactory; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.util.ToolRunner; import org.junit.After; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestTableName.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestTableName.java index 94070f3f8bc..f585f47f9f6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestTableName.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestTableName.java @@ -23,7 +23,6 @@ import java.util.Map; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertSame; import static org.junit.Assert.fail; @@ -53,8 +52,8 @@ public class TestTableName extends TestWatcher { public TableName getTableName() { return tableName; } - - String emptyTableNames[] ={"", " "}; + + String emptyNames[] ={"", " "}; String invalidNamespace[] = {":a", "%:a"}; String legalTableNames[] = { "foo", "with-dash_under.dot", "_under_start_ok", "with-dash.with_underscore", "02-01-2012.my_table_01-02", "xyz._mytable_", "9_9_0.table_02" @@ -73,9 +72,17 @@ public class TestTableName extends TestWatcher { } } + @Test(expected = IllegalArgumentException.class) + public void testEmptyNamespaceName() { + for (String nn : emptyNames) { + TableName.isLegalNamespaceName(Bytes.toBytes(nn)); + fail("invalid Namespace name " + nn + " should have failed with IllegalArgumentException"); + } + } + @Test(expected = IllegalArgumentException.class) public void testEmptyTableName() { - for (String tn : emptyTableNames) { + for (String tn : emptyNames) { TableName.isLegalFullyQualifiedTableName(Bytes.toBytes(tn)); fail("invalid tablename " + tn + " should have failed with IllegalArgumentException"); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/HbckTestingUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/HbckTestingUtil.java index 217f60b88d8..3691cd73a44 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/HbckTestingUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/HbckTestingUtil.java @@ -49,24 +49,27 @@ public class HbckTestingUtil { boolean fixReferenceFiles, boolean fixEmptyMetaRegionInfo, boolean fixTableLocks, TableName table) throws Exception { HBaseFsck fsck = new HBaseFsck(conf, exec); - fsck.connect(); - fsck.setDisplayFullReport(); // i.e. -details - fsck.setTimeLag(0); - fsck.setFixAssignments(fixAssignments); - fsck.setFixMeta(fixMeta); - fsck.setFixHdfsHoles(fixHdfsHoles); - fsck.setFixHdfsOverlaps(fixHdfsOverlaps); - fsck.setFixHdfsOrphans(fixHdfsOrphans); - fsck.setFixTableOrphans(fixTableOrphans); - fsck.setFixVersionFile(fixVersionFile); - fsck.setFixReferenceFiles(fixReferenceFiles); - fsck.setFixEmptyMetaCells(fixEmptyMetaRegionInfo); - fsck.setFixTableLocks(fixTableLocks); - if (table != null) { - fsck.includeTable(table); + try { + fsck.connect(); + HBaseFsck.setDisplayFullReport(); // i.e. -details + fsck.setTimeLag(0); + fsck.setFixAssignments(fixAssignments); + fsck.setFixMeta(fixMeta); + fsck.setFixHdfsHoles(fixHdfsHoles); + fsck.setFixHdfsOverlaps(fixHdfsOverlaps); + fsck.setFixHdfsOrphans(fixHdfsOrphans); + fsck.setFixTableOrphans(fixTableOrphans); + fsck.setFixVersionFile(fixVersionFile); + fsck.setFixReferenceFiles(fixReferenceFiles); + fsck.setFixEmptyMetaCells(fixEmptyMetaRegionInfo); + fsck.setFixTableLocks(fixTableLocks); + if (table != null) { + fsck.includeTable(table); + } + fsck.onlineHbck(); + } finally { + fsck.close(); } - fsck.onlineHbck(); - fsck.close(); return fsck; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildBase.java index fc222924567..d4f86e985ff 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildBase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildBase.java @@ -30,7 +30,6 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; -import org.apache.hadoop.hbase.client.HConnectionManager; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.util.HBaseFsck; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFiltering.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFiltering.java index 66868a10a10..8833edac05a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFiltering.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFiltering.java @@ -37,8 +37,8 @@ import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.protobuf.RequestConverter; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest; -import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.util.Bytes; import org.junit.After; import org.junit.Before; @@ -130,8 +130,8 @@ public class TestWALFiltering { private List getRegionsByServer(int rsId) throws IOException { List regionNames = Lists.newArrayList(); HRegionServer hrs = getRegionServer(rsId); - for (HRegion r : hrs.getOnlineRegions(TABLE_NAME)) { - regionNames.add(r.getRegionName()); + for (Region r : hrs.getOnlineRegions(TABLE_NAME)) { + regionNames.add(r.getRegionInfo().getRegionName()); } return regionNames; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestHQuorumPeer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestHQuorumPeer.java index 793cc1f5aa0..a8a61ad0c29 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestHQuorumPeer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestHQuorumPeer.java @@ -141,5 +141,14 @@ public class TestHQuorumPeer { assertEquals(2181, p.get("clientPort")); } + @Test + public void testGetZKQuorumServersString() { + Configuration config = new Configuration(TEST_UTIL.getConfiguration()); + config.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, 8888); + config.set(HConstants.ZOOKEEPER_QUORUM, "foo:1234,bar:5678,baz,qux:9012"); + + String s = ZKConfig.getZKQuorumServersString(config); + assertEquals("foo:1234,bar:5678,baz:8888,qux:9012", s); + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestRecoverableZooKeeper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestRecoverableZooKeeper.java index e83ac7443ba..b83f9137e49 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestRecoverableZooKeeper.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestRecoverableZooKeeper.java @@ -72,10 +72,9 @@ public class TestRecoverableZooKeeper { public void testSetDataVersionMismatchInLoop() throws Exception { String znode = "/hbase/splitWAL/9af7cfc9b15910a0b3d714bf40a3248f"; Configuration conf = TEST_UTIL.getConfiguration(); - Properties properties = ZKConfig.makeZKProps(conf); ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "testSetDataVersionMismatchInLoop", abortable, true); - String ensemble = ZKConfig.getZKQuorumServersString(properties); + String ensemble = ZKConfig.getZKQuorumServersString(conf); RecoverableZooKeeper rzk = ZKUtil.connect(conf, ensemble, zkw); rzk.create(znode, new byte[0], Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); rzk.setData(znode, "OPENING".getBytes(), 0); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZooKeeperMainServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZooKeeperMainServer.java index 1928b184299..4e9931f9496 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZooKeeperMainServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZooKeeperMainServer.java @@ -104,5 +104,15 @@ public class TestZooKeeperMainServer { c.set("hbase.zookeeper.quorum", "example1.com,example2.com,example3.com"); String ensemble = parser.parse(c); assertTrue(port, ensemble.matches("(example[1-3]\\.com:1234,){2}example[1-3]\\.com:" + port)); + + // multiple servers with its own port + c.set("hbase.zookeeper.quorum", "example1.com:5678,example2.com:9012,example3.com:3456"); + ensemble = parser.parse(c); + assertEquals(ensemble, "example1.com:5678,example2.com:9012,example3.com:3456"); + + // some servers without its own port, which will be assigned the default client port + c.set("hbase.zookeeper.quorum", "example1.com:5678,example2.com:9012,example3.com"); + ensemble = parser.parse(c); + assertEquals(ensemble, "example1.com:5678,example2.com:9012,example3.com:" + port); } } \ No newline at end of file diff --git a/hbase-server/src/test/resources/log4j.properties b/hbase-server/src/test/resources/log4j.properties index 6ee91efc3b2..13a95b4a673 100644 --- a/hbase-server/src/test/resources/log4j.properties +++ b/hbase-server/src/test/resources/log4j.properties @@ -63,4 +63,4 @@ log4j.logger.org.apache.hadoop.hbase=DEBUG log4j.org.apache.hadoop.metrics2.impl.MetricsSystemImpl=ERROR log4j.org.apache.hadoop.metrics2.util.MBeans=ERROR # Enable this to get detailed connection error/retry logging. -# log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=TRACE +# log4j.logger.org.apache.hadoop.hbase.client.ConnectionImplementation=TRACE diff --git a/hbase-shell/pom.xml b/hbase-shell/pom.xml index bdba00c4a12..f4d54f57182 100644 --- a/hbase-shell/pom.xml +++ b/hbase-shell/pom.xml @@ -50,27 +50,6 @@ - - maven-compiler-plugin - - - default-compile - - ${java.default.compiler} - true - false - - - - default-testCompile - - ${java.default.compiler} - true - false - - - - org.apache.maven.plugins maven-site-plugin @@ -170,7 +149,46 @@ ${surefire.firstPartGroups} - + + + org.eclipse.m2e + lifecycle-mapping + 1.0.0 + + + + + + org.apache.maven.plugins + maven-dependency-plugin + [2.8,) + + build-classpath + + + + + + + + + org.apache.maven.plugins + maven-compiler-plugin + [3.2,) + + compile + + + + + + + + + + + diff --git a/hbase-shell/src/main/asciidoc/.gitignore b/hbase-shell/src/main/asciidoc/.gitignore deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/hbase-shell/src/main/ruby/hbase.rb b/hbase-shell/src/main/ruby/hbase.rb index a857bd961aa..f181edabded 100644 --- a/hbase-shell/src/main/ruby/hbase.rb +++ b/hbase-shell/src/main/ruby/hbase.rb @@ -72,6 +72,11 @@ module HBaseConstants TYPE = 'TYPE' NONE = 'NONE' VALUE = 'VALUE' + ENDPOINT_CLASSNAME = 'ENDPOINT_CLASSNAME' + CLUSTER_KEY = 'CLUSTER_KEY' + TABLE_CFS = 'TABLE_CFS' + CONFIG = 'CONFIG' + DATA = 'DATA' # Load constants from hbase java API def self.promote_constants(constants) diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb b/hbase-shell/src/main/ruby/hbase/admin.rb index 51571c660e7..47b77a0ac89 100644 --- a/hbase-shell/src/main/ruby/hbase/admin.rb +++ b/hbase-shell/src/main/ruby/hbase/admin.rb @@ -34,7 +34,7 @@ module Hbase def initialize(admin, formatter) @admin = admin - connection = @admin.getConnection() + @connection = @admin.getConnection() @formatter = formatter end @@ -84,7 +84,7 @@ module Hbase #---------------------------------------------------------------------------------------------- # Requests a regionserver's WAL roll def wal_roll(server_name) - @admin.rollWALWriter(server_name) + @admin.rollWALWriter(ServerName.valueOf(server_name)) end # TODO remove older hlog_roll version alias :hlog_roll :wal_roll @@ -114,6 +114,13 @@ module Hbase java.lang.Boolean::valueOf(enableDisable), java.lang.Boolean::valueOf(false)) end + #---------------------------------------------------------------------------------------------- + # Query the current state of the LoadBalancer. + # Returns the balancer's state (true is enabled). + def balancer_enabled?() + @admin.isBalancerEnabled() + end + #---------------------------------------------------------------------------------------------- # Request a scan of the catalog table (for garbage collection) # Returns an int signifying the number of entries cleaned @@ -168,7 +175,7 @@ module Hbase #--------------------------------------------------------------------------------------------- # Throw exception if table doesn't exist def tableExists(table_name) - raise ArgumentError, "Table #{table_name} does not exist.'" unless exists?(table_name) + raise ArgumentError, "Table #{table_name} does not exist." unless exists?(table_name) end #---------------------------------------------------------------------------------------------- @@ -181,7 +188,7 @@ module Hbase # Drops a table def drop(table_name) tableExists(table_name) - raise ArgumentError, "Table #{table_name} is enabled. Disable it first.'" if enabled?(table_name) + raise ArgumentError, "Table #{table_name} is enabled. Disable it first." if enabled?(table_name) @admin.deleteTable(org.apache.hadoop.hbase.TableName.valueOf(table_name)) end @@ -372,7 +379,7 @@ module Hbase # Truncates table (deletes all records by recreating the table) def truncate(table_name, conf = @conf) table_description = @admin.getTableDescriptor(TableName.valueOf(table_name)) - raise ArgumentError, "Table #{table_name} is not enabled. Enable it first.'" unless enabled?(table_name) + raise ArgumentError, "Table #{table_name} is not enabled. Enable it first." unless enabled?(table_name) yield 'Disabling table...' if block_given? @admin.disableTable(table_name) @@ -399,14 +406,14 @@ module Hbase #---------------------------------------------------------------------------------------------- # Truncates table while maintaing region boundaries (deletes all records by recreating the table) def truncate_preserve(table_name, conf = @conf) - h_table = @conn.getTable(table_name) - locator = @conn.getRegionLocator(table_name) + h_table = @connection.getTable(TableName.valueOf(table_name)) + locator = @connection.getRegionLocator(TableName.valueOf(table_name)) splits = locator.getAllRegionLocations(). map{|i| Bytes.toString(i.getRegionInfo().getStartKey)}. delete_if{|k| k == ""}.to_java :String locator.close() - table_description = @admin.getTableDescriptor(table_name) + table_description = @admin.getTableDescriptor(TableName.valueOf(table_name)) yield 'Disabling table...' if block_given? disable(table_name) @@ -608,7 +615,7 @@ module Hbase end end - def status(format) + def status(format, type) status = @admin.getClusterStatus() if format == "detailed" puts("version %s" % [ status.getHBaseVersion() ]) @@ -635,6 +642,46 @@ module Hbase for server in status.getDeadServerNames() puts(" %s" % [ server ]) end + elsif format == "replication" + #check whether replication is enabled or not + if (!@admin.getConfiguration().getBoolean(org.apache.hadoop.hbase.HConstants::REPLICATION_ENABLE_KEY, + org.apache.hadoop.hbase.HConstants::REPLICATION_ENABLE_DEFAULT)) + puts("Please enable replication first.") + else + puts("version %s" % [ status.getHBaseVersion() ]) + puts("%d live servers" % [ status.getServersSize() ]) + for server in status.getServers() + sl = status.getLoad(server) + rSinkString = " SINK :" + rSourceString = " SOURCE:" + rLoadSink = sl.getReplicationLoadSink() + rSinkString << " AgeOfLastAppliedOp=" + rLoadSink.getAgeOfLastAppliedOp().to_s + rSinkString << ", TimeStampsOfLastAppliedOp=" + + (java.util.Date.new(rLoadSink.getTimeStampsOfLastAppliedOp())).toString() + rLoadSourceList = sl.getReplicationLoadSourceList() + index = 0 + while index < rLoadSourceList.size() + rLoadSource = rLoadSourceList.get(index) + rSourceString << " PeerID=" + rLoadSource.getPeerID() + rSourceString << ", AgeOfLastShippedOp=" + rLoadSource.getAgeOfLastShippedOp().to_s + rSourceString << ", SizeOfLogQueue=" + rLoadSource.getSizeOfLogQueue().to_s + rSourceString << ", TimeStampsOfLastShippedOp=" + + (java.util.Date.new(rLoadSource.getTimeStampOfLastShippedOp())).toString() + rSourceString << ", Replication Lag=" + rLoadSource.getReplicationLag().to_s + index = index + 1 + end + puts(" %s:" % + [ server.getHostname() ]) + if type.casecmp("SOURCE") == 0 + puts("%s" % rSourceString) + elsif type.casecmp("SINK") == 0 + puts("%s" % rSinkString) + else + puts("%s" % rSourceString) + puts("%s" % rSinkString) + end + end + end elsif format == "simple" load = 0 regions = 0 @@ -746,7 +793,7 @@ module Hbase # Enables/disables a region by name def online(region_name, on_off) # Open meta table - meta = connection.getTable(org.apache.hadoop.hbase.TableName::META_TABLE_NAME) + meta = @connection.getTable(org.apache.hadoop.hbase.TableName::META_TABLE_NAME) # Read region info # FIXME: fail gracefully if can't find the region diff --git a/hbase-shell/src/main/ruby/hbase/hbase.rb b/hbase-shell/src/main/ruby/hbase/hbase.rb index 030c88cb7f6..135e1d59c3d 100644 --- a/hbase-shell/src/main/ruby/hbase/hbase.rb +++ b/hbase-shell/src/main/ruby/hbase/hbase.rb @@ -57,15 +57,15 @@ module Hbase end def security_admin(formatter) - ::Hbase::SecurityAdmin.new(configuration, formatter) + ::Hbase::SecurityAdmin.new(@connection.getAdmin, formatter) end def visibility_labels_admin(formatter) - ::Hbase::VisibilityLabelsAdmin.new(configuration, formatter) + ::Hbase::VisibilityLabelsAdmin.new(@connection.getAdmin, formatter) end def quotas_admin(formatter) - ::Hbase::QuotasAdmin.new(configuration, formatter) + ::Hbase::QuotasAdmin.new(@connection.getAdmin, formatter) end def shutdown diff --git a/hbase-shell/src/main/ruby/hbase/quotas.rb b/hbase-shell/src/main/ruby/hbase/quotas.rb index 758e2ec6560..fa076a5e854 100644 --- a/hbase-shell/src/main/ruby/hbase/quotas.rb +++ b/hbase-shell/src/main/ruby/hbase/quotas.rb @@ -34,13 +34,15 @@ end module Hbase class QuotasAdmin - def initialize(configuration, formatter) - @config = configuration - @connection = org.apache.hadoop.hbase.client.ConnectionFactory.createConnection(configuration) - @admin = @connection.getAdmin() + def initialize(admin, formatter) + @admin = admin @formatter = formatter end + def close + @admin.close + end + def throttle(args) raise(ArgumentError, "Arguments should be a Hash") unless args.kind_of?(Hash) type = args.fetch(THROTTLE_TYPE, REQUEST) diff --git a/hbase-shell/src/main/ruby/hbase/replication_admin.rb b/hbase-shell/src/main/ruby/hbase/replication_admin.rb index 6dedb2effc7..b2ca8e1a6e9 100644 --- a/hbase-shell/src/main/ruby/hbase/replication_admin.rb +++ b/hbase-shell/src/main/ruby/hbase/replication_admin.rb @@ -19,21 +19,81 @@ include Java -# Wrapper for org.apache.hadoop.hbase.client.HBaseAdmin +java_import org.apache.hadoop.hbase.client.replication.ReplicationAdmin +java_import org.apache.hadoop.hbase.replication.ReplicationPeerConfig +java_import org.apache.hadoop.hbase.util.Bytes +java_import org.apache.hadoop.hbase.zookeeper.ZKUtil +java_import org.apache.hadoop.hbase.TableName + +# Wrapper for org.apache.hadoop.hbase.client.replication.ReplicationAdmin module Hbase class RepAdmin include HBaseConstants def initialize(configuration, formatter) - @replication_admin = org.apache.hadoop.hbase.client.replication.ReplicationAdmin.new(configuration) + @replication_admin = ReplicationAdmin.new(configuration) + @configuration = configuration @formatter = formatter end #---------------------------------------------------------------------------------------------- # Add a new peer cluster to replicate to - def add_peer(id, cluster_key, peer_tableCFs = nil) - @replication_admin.addPeer(id, cluster_key, peer_tableCFs) + def add_peer(id, args = {}, peer_tableCFs = nil) + # make add_peer backwards compatible to take in string for clusterKey and peer_tableCFs + if args.is_a?(String) + cluster_key = args + @replication_admin.addPeer(id, cluster_key, peer_tableCFs) + elsif args.is_a?(Hash) + unless peer_tableCFs.nil? + raise(ArgumentError, "peer_tableCFs should be specified as TABLE_CFS in args") + end + + endpoint_classname = args.fetch(ENDPOINT_CLASSNAME, nil) + cluster_key = args.fetch(CLUSTER_KEY, nil) + + # Handle cases where custom replication endpoint and cluster key are either both provided + # or neither are provided + if endpoint_classname.nil? and cluster_key.nil? + raise(ArgumentError, "Either ENDPOINT_CLASSNAME or CLUSTER_KEY must be specified.") + elsif !endpoint_classname.nil? and !cluster_key.nil? + raise(ArgumentError, "ENDPOINT_CLASSNAME and CLUSTER_KEY cannot both be specified.") + end + + # Cluster Key is required for ReplicationPeerConfig for a custom replication endpoint + if !endpoint_classname.nil? and cluster_key.nil? + cluster_key = ZKUtil.getZooKeeperClusterKey(@configuration) + end + + # Optional parameters + config = args.fetch(CONFIG, nil) + data = args.fetch(DATA, nil) + table_cfs = args.fetch(TABLE_CFS, nil) + + # Create and populate a ReplicationPeerConfig + replication_peer_config = ReplicationPeerConfig.new + replication_peer_config.set_cluster_key(cluster_key) + + unless endpoint_classname.nil? + replication_peer_config.set_replication_endpoint_impl(endpoint_classname) + end + + unless config.nil? + replication_peer_config.get_configuration.put_all(config) + end + + unless data.nil? + # Convert Strings to Bytes for peer_data + peer_data = replication_peer_config.get_peer_data + data.each{|key, val| + peer_data.put(Bytes.to_bytes(key), Bytes.to_bytes(val)) + } + end + + @replication_admin.add_peer(id, replication_peer_config, table_cfs) + else + raise(ArgumentError, "args must be either a String or Hash") + end end #---------------------------------------------------------------------------------------------- @@ -48,7 +108,7 @@ module Hbase def list_replicated_tables(regex = ".*") pattern = java.util.regex.Pattern.compile(regex) list = @replication_admin.listReplicated() - list.select {|s| pattern.match(s.get(org.apache.hadoop.hbase.client.replication.ReplicationAdmin::TNAME))} + list.select {|s| pattern.match(s.get(ReplicationAdmin.TNAME))} end #---------------------------------------------------------------------------------------------- @@ -98,5 +158,17 @@ module Hbase def remove_peer_tableCFs(id, tableCFs) @replication_admin.removePeerTableCFs(id, tableCFs) end + #---------------------------------------------------------------------------------------------- + # Enables a table's replication switch + def enable_tablerep(table_name) + tableName = TableName.valueOf(table_name) + @replication_admin.enableTableRep(tableName) + end + #---------------------------------------------------------------------------------------------- + # Disables a table's replication switch + def disable_tablerep(table_name) + tableName = TableName.valueOf(table_name) + @replication_admin.disableTableRep(tableName) + end end end diff --git a/hbase-shell/src/main/ruby/hbase/security.rb b/hbase-shell/src/main/ruby/hbase/security.rb index 5262f4508cb..2aaef02a92c 100644 --- a/hbase-shell/src/main/ruby/hbase/security.rb +++ b/hbase-shell/src/main/ruby/hbase/security.rb @@ -24,13 +24,16 @@ module Hbase class SecurityAdmin include HBaseConstants - def initialize(configuration, formatter) - @config = configuration - @connection = org.apache.hadoop.hbase.client.ConnectionFactory.createConnection(@config) - @admin = @connection.getAdmin() + def initialize(admin, formatter) + @admin = admin + @connection = @admin.getConnection() @formatter = formatter end + def close + @admin.close + end + #---------------------------------------------------------------------------------------------- def grant(user, permissions, table_name=nil, family=nil, qualifier=nil) security_available? @@ -56,7 +59,7 @@ module Hbase namespace_exists?(namespace_name) org.apache.hadoop.hbase.security.access.AccessControlClient.grant( - @config, namespace_name, user, perm.getActions()) + @connection, namespace_name, user, perm.getActions()) else # Table should exist raise(ArgumentError, "Can't find a table: #{table_name}") unless exists?(table_name) @@ -72,12 +75,12 @@ module Hbase qualbytes = qualifier.to_java_bytes if (qualifier != nil) org.apache.hadoop.hbase.security.access.AccessControlClient.grant( - @config, tableName, user, fambytes, qualbytes, perm.getActions()) + @connection, tableName, user, fambytes, qualbytes, perm.getActions()) end else # invoke cp endpoint to perform access controls org.apache.hadoop.hbase.security.access.AccessControlClient.grant( - @config, user, perm.getActions()) + @connection, user, perm.getActions()) end end end @@ -98,7 +101,7 @@ module Hbase tablebytes=table_name.to_java_bytes org.apache.hadoop.hbase.security.access.AccessControlClient.revoke( - @config, namespace_name, user) + @connection, namespace_name, user) else # Table should exist raise(ArgumentError, "Can't find a table: #{table_name}") unless exists?(table_name) @@ -114,12 +117,12 @@ module Hbase qualbytes = qualifier.to_java_bytes if (qualifier != nil) org.apache.hadoop.hbase.security.access.AccessControlClient.revoke( - @config, tableName, user, fambytes, qualbytes) + @connection, tableName, user, fambytes, qualbytes) end else perm = org.apache.hadoop.hbase.security.access.Permission.new(''.to_java_bytes) org.apache.hadoop.hbase.security.access.AccessControlClient.revoke( - @config, user, perm.getActions()) + @connection, user, perm.getActions()) end end end @@ -127,7 +130,8 @@ module Hbase #---------------------------------------------------------------------------------------------- def user_permission(table_regex=nil) security_available? - all_perms = org.apache.hadoop.hbase.security.access.AccessControlClient.getUserPermissions(@config,table_regex) + all_perms = org.apache.hadoop.hbase.security.access.AccessControlClient.getUserPermissions( + @connection,table_regex) res = {} count = 0 all_perms.each do |value| diff --git a/hbase-shell/src/main/ruby/hbase/table.rb b/hbase-shell/src/main/ruby/hbase/table.rb index eaa2d5c989a..9a71fa5b182 100644 --- a/hbase-shell/src/main/ruby/hbase/table.rb +++ b/hbase-shell/src/main/ruby/hbase/table.rb @@ -49,7 +49,7 @@ module Hbase end end end - + # General help for the table # class level so we can call it from anywhere def self.help @@ -103,7 +103,7 @@ Note that after dropping a table, your reference to it becomes useless and furth is undefined (and not recommended). EOF end - + #--------------------------------------------------------------------------------------------- # let external objects read the underlying table object @@ -150,7 +150,7 @@ EOF end end timestamp = nil - end + end if timestamp p.add(family, qualifier, timestamp, value.to_s.to_java_bytes) else @@ -161,14 +161,14 @@ EOF #---------------------------------------------------------------------------------------------- # Delete a cell - def _delete_internal(row, column, + def _delete_internal(row, column, timestamp = org.apache.hadoop.hbase.HConstants::LATEST_TIMESTAMP, args = {}) _deleteall_internal(row, column, timestamp, args) end #---------------------------------------------------------------------------------------------- # Delete a row - def _deleteall_internal(row, column = nil, + def _deleteall_internal(row, column = nil, timestamp = org.apache.hadoop.hbase.HConstants::LATEST_TIMESTAMP, args = {}) # delete operation doesn't need read permission. Retaining the read check for # meta table as a part of HBASE-5837. @@ -185,7 +185,7 @@ EOF if v.kind_of?(String) set_cell_visibility(d, v) if v end - end + end end if args.any? visibility = args[VISIBILITY] @@ -219,9 +219,14 @@ EOF set_op_ttl(incr, ttl) if ttl end incr.addColumn(family, qualifier, value) - @table.increment(incr) + result = @table.increment(incr) + return nil if result.isEmpty + + # Fetch cell value + cell = result.listCells[0] + org.apache.hadoop.hbase.util.Bytes::toLong(cell.getValue) end - + #---------------------------------------------------------------------------------------------- # appends the value atomically def _append_internal(row, column, value, args={}) @@ -262,7 +267,7 @@ EOF count += 1 next unless (block_given? && count % interval == 0) # Allow command modules to visualize counting process - yield(count, + yield(count, org.apache.hadoop.hbase.util.Bytes::toStringBinary(row.getRow)) end @@ -276,7 +281,7 @@ EOF get = org.apache.hadoop.hbase.client.Get.new(row.to_s.to_java_bytes) maxlength = -1 @converters.clear() - + # Normalize args args = args.first if args.first.kind_of?(Hash) if args.kind_of?(String) || args.kind_of?(Array) @@ -433,7 +438,7 @@ EOF # This will overwrite any startrow/stoprow settings scan.setRowPrefixFilter(rowprefixfilter.to_java_bytes) if rowprefixfilter - columns.each do |c| + columns.each do |c| family, qualifier = parse_column_name(c.to_s) if qualifier scan.addColumn(family, qualifier) @@ -645,7 +650,7 @@ EOF end (maxlength != -1) ? val[0, maxlength] : val end - + def convert(column, kv) #use org.apache.hadoop.hbase.util.Bytes as the default class klazz_name = 'org.apache.hadoop.hbase.util.Bytes' @@ -657,7 +662,7 @@ EOF if matches.nil? # cannot match the pattern of 'c(className).functionname' # use the default klazz_name - converter = @converters[column] + converter = @converters[column] else klazz_name = matches[1] converter = matches[2] @@ -666,7 +671,7 @@ EOF method = eval(klazz_name).method(converter) return method.call(kv.getValue) # apply the converter end - + # if the column spec contains CONVERTER information, to get rid of :CONVERTER info from column pair. # 1. return back normal column pair as usual, i.e., "cf:qualifier[:CONVERTER]" to "cf" and "qualifier" only # 2. register the CONVERTER information based on column spec - "cf:qualifier" @@ -678,5 +683,16 @@ EOF column[1] = parts[0] end end + + #---------------------------------------------------------------------------------------------- + # Get the split points for the table + def _get_splits_internal() + locator = @table.getRegionLocator() + splits = locator.getAllRegionLocations(). + map{|i| Bytes.toStringBinary(i.getRegionInfo().getStartKey)}.delete_if{|k| k == ""} + locator.close() + puts("Total number of splits = %s" % [splits.size + 1]) + return splits + end end end diff --git a/hbase-shell/src/main/ruby/hbase/visibility_labels.rb b/hbase-shell/src/main/ruby/hbase/visibility_labels.rb index 61a49e81332..a3f8b1a3364 100644 --- a/hbase-shell/src/main/ruby/hbase/visibility_labels.rb +++ b/hbase-shell/src/main/ruby/hbase/visibility_labels.rb @@ -24,13 +24,14 @@ java_import org.apache.hadoop.hbase.util.Bytes module Hbase class VisibilityLabelsAdmin - def initialize(configuration, formatter) - @config = configuration + def initialize(admin, formatter) + @admin = admin + @config = @admin.getConfiguration() @formatter = formatter - - # @connection = org.apache.hadoop.hbase.client.ConnectionFactory(configuration) - # @admin = @connection.getAdmin() - @admin = org.apache.hadoop.hbase.client.HBaseAdmin.new(configuration) + end + + def close + @admin.close end def add_labels(*args) diff --git a/hbase-shell/src/main/ruby/shell.rb b/hbase-shell/src/main/ruby/shell.rb index 8ec4da068ce..3d56c2e1440 100644 --- a/hbase-shell/src/main/ruby/shell.rb +++ b/hbase-shell/src/main/ruby/shell.rb @@ -301,6 +301,7 @@ Shell.load_command_group( truncate truncate_preserve append + get_splits ] ) @@ -312,6 +313,7 @@ Shell.load_command_group( assign balancer balance_switch + balancer_enabled close_region compact flush @@ -351,6 +353,8 @@ Shell.load_command_group( list_replicated_tables append_peer_tableCFs remove_peer_tableCFs + enable_table_replication + disable_table_replication ] ) diff --git a/hbase-shell/src/main/ruby/shell/commands.rb b/hbase-shell/src/main/ruby/shell/commands.rb index 2128164ed04..2abccddf045 100644 --- a/hbase-shell/src/main/ruby/shell/commands.rb +++ b/hbase-shell/src/main/ruby/shell/commands.rb @@ -102,9 +102,13 @@ module Shell # Get the special java exception which will be handled cause = e.cause + # let individual command handle exceptions first + if self.respond_to?(:handle_exceptions) + self.handle_exceptions(cause, *args) + end + # Global HBase exception handling below if not handled by respective command above if cause.kind_of?(org.apache.hadoop.hbase.TableNotFoundException) then - str = java.lang.String.new("#{cause}") - raise "Unknown table #{str}!" + raise "Unknown table #{args.first}!" end if cause.kind_of?(org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException) then exceptions = cause.getCauses @@ -116,15 +120,7 @@ module Shell end end if cause.kind_of?(org.apache.hadoop.hbase.TableExistsException) then - str = java.lang.String.new("#{cause}") - strs = str.split("\n") - if strs.size > 0 then - s = strs[0].split(' '); - if(s.size > 1) - raise "Table already exists: #{s[1]}!" - end - raise "Table already exists: #{strs[0]}!" - end + raise "Table already exists: #{args.first}!" end # To be safe, here only AccessDeniedException is considered. In future # we might support more in more generic approach when possible. diff --git a/hbase-shell/src/main/ruby/shell/commands/add_peer.rb b/hbase-shell/src/main/ruby/shell/commands/add_peer.rb index ecd8e753920..be010416445 100644 --- a/hbase-shell/src/main/ruby/shell/commands/add_peer.rb +++ b/hbase-shell/src/main/ruby/shell/commands/add_peer.rb @@ -22,21 +22,47 @@ module Shell class AddPeer< Command def help return <<-EOF -Add a peer cluster to replicate to, the id must be a short and -the cluster key is composed like this: +A peer can either be another HBase cluster or a custom replication endpoint. In either case an id +must be specified to identify the peer. + +For a HBase cluster peer, a cluster key must be provided and is composed like this: hbase.zookeeper.quorum:hbase.zookeeper.property.clientPort:zookeeper.znode.parent -This gives a full path for HBase to connect to another cluster. +This gives a full path for HBase to connect to another HBase cluster. An optional parameter for +table column families identifies which column families will be replicated to the peer cluster. Examples: hbase> add_peer '1', "server1.cie.com:2181:/hbase" hbase> add_peer '2', "zk1,zk2,zk3:2182:/hbase-prod" - hbase> add_peer '3', "zk4,zk5,zk6:11000:/hbase-test", "tab1; tab2:cf1; tab3:cf2,cf3" + hbase> add_peer '3', "zk4,zk5,zk6:11000:/hbase-test", "table1; table2:cf1; table3:cf1,cf2" + hbase> add_peer '4', CLUSTER_KEY => "server1.cie.com:2181:/hbase" + hbase> add_peer '5', CLUSTER_KEY => "server1.cie.com:2181:/hbase", + TABLE_CFS => { "table1" => [], "table2" => ["cf1"], "table3" => ["cf1", "cf2"] } + +For a custom replication endpoint, the ENDPOINT_CLASSNAME can be provided. Two optional arguments +are DATA and CONFIG which can be specified to set different either the peer_data or configuration +for the custom replication endpoint. Table column families is optional and can be specified with +the key TABLE_CFS. + + hbase> add_peer '6', ENDPOINT_CLASSNAME => 'org.apache.hadoop.hbase.MyReplicationEndpoint' + hbase> add_peer '7', ENDPOINT_CLASSNAME => 'org.apache.hadoop.hbase.MyReplicationEndpoint', + DATA => { "key1" => 1 } + hbase> add_peer '8', ENDPOINT_CLASSNAME => 'org.apache.hadoop.hbase.MyReplicationEndpoint', + CONFIG => { "config1" => "value1", "config2" => "value2" } + hbase> add_peer '9', ENDPOINT_CLASSNAME => 'org.apache.hadoop.hbase.MyReplicationEndpoint', + DATA => { "key1" => 1 }, CONFIG => { "config1" => "value1", "config2" => "value2" }, + hbase> add_peer '10', ENDPOINT_CLASSNAME => 'org.apache.hadoop.hbase.MyReplicationEndpoint', + TABLE_CFS => { "table1" => [], "table2" => ["cf1"], "table3" => ["cf1", "cf2"] } + hbase> add_peer '11', ENDPOINT_CLASSNAME => 'org.apache.hadoop.hbase.MyReplicationEndpoint', + DATA => { "key1" => 1 }, CONFIG => { "config1" => "value1", "config2" => "value2" }, + TABLE_CFS => { "table1" => [], "table2" => ["cf1"], "table3" => ["cf1", "cf2"] } + +Note: Either CLUSTER_KEY or ENDPOINT_CLASSNAME must be specified but not both. EOF end - def command(id, cluster_key, peer_tableCFs = nil) + def command(id, args = {}, peer_tableCFs = nil) format_simple_command do - replication_admin.add_peer(id, cluster_key, peer_tableCFs) + replication_admin.add_peer(id, args, peer_tableCFs) end end end diff --git a/hbase-shell/src/main/ruby/shell/commands/balancer_enabled.rb b/hbase-shell/src/main/ruby/shell/commands/balancer_enabled.rb new file mode 100644 index 00000000000..3b2f5c64fe5 --- /dev/null +++ b/hbase-shell/src/main/ruby/shell/commands/balancer_enabled.rb @@ -0,0 +1,41 @@ +#!/usr/bin/env hbase-jruby +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with this +# work for additional information regarding copyright ownership. The ASF +# licenses this file to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# Prints the current balancer status + +module Shell + module Commands + class BalancerEnabled < Command + def help + return <<-EOF +Query the balancer's state. +Examples: + + hbase> balancer_enabled +EOF + end + + def command() + format_simple_command do + formatter.row([ + admin.balancer_enabled?.to_s + ]) + end + end + end + end +end diff --git a/hbase-shell/src/main/ruby/shell/commands/clear_auths.rb b/hbase-shell/src/main/ruby/shell/commands/clear_auths.rb index 7bf4252dc75..8553fa64713 100644 --- a/hbase-shell/src/main/ruby/shell/commands/clear_auths.rb +++ b/hbase-shell/src/main/ruby/shell/commands/clear_auths.rb @@ -20,12 +20,13 @@ module Shell class ClearAuths < Command def help return <<-EOF -Add a set of visibility labels for an user that has to removed -Syntax : clear_auths 'user1',[label1, label2] +Clear visibility labels from a user or group +Syntax : clear_auths 'user',[label1, label2] For example: hbase> clear_auths 'user1', ['SECRET','PRIVATE'] + hbase> clear_auths '@group1', ['SECRET','PRIVATE'] EOF end diff --git a/hbase-shell/src/main/ruby/shell/commands/clone_snapshot.rb b/hbase-shell/src/main/ruby/shell/commands/clone_snapshot.rb index 8c193bb7c10..0498c8e3e4f 100644 --- a/hbase-shell/src/main/ruby/shell/commands/clone_snapshot.rb +++ b/hbase-shell/src/main/ruby/shell/commands/clone_snapshot.rb @@ -36,6 +36,13 @@ EOF admin.clone_snapshot(snapshot_name, table) end end + + def handle_exceptions(cause, *args) + if cause.kind_of?(org.apache.hadoop.hbase.TableExistsException) then + tableName = args[1] + raise "Table already exists: #{tableName}!" + end + end end end end diff --git a/hbase-shell/src/main/ruby/shell/commands/disable_table_replication.rb b/hbase-shell/src/main/ruby/shell/commands/disable_table_replication.rb new file mode 100644 index 00000000000..4c46feac3cd --- /dev/null +++ b/hbase-shell/src/main/ruby/shell/commands/disable_table_replication.rb @@ -0,0 +1,42 @@ +# +# Copyright 2010 The Apache Software Foundation +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Shell + module Commands + class DisableTableReplication< Command + def help + return <<-EOF +Disable a table's replication switch. + +Examples: + + hbase> disable_table_replication 'table_name' +EOF + end + + def command(table_name) + format_simple_command do + replication_admin.disable_tablerep(table_name) + end + puts "The replication swith of table '#{table_name}' successfully disabled" + end + end + end +end diff --git a/hbase-shell/src/main/ruby/shell/commands/enable_table_replication.rb b/hbase-shell/src/main/ruby/shell/commands/enable_table_replication.rb new file mode 100644 index 00000000000..5d57f03fb54 --- /dev/null +++ b/hbase-shell/src/main/ruby/shell/commands/enable_table_replication.rb @@ -0,0 +1,42 @@ +# +# Copyright 2010 The Apache Software Foundation +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Shell + module Commands + class EnableTableReplication< Command + def help + return <<-EOF +Enable a table's replication switch. + +Examples: + + hbase> enable_table_replication 'table_name' +EOF + end + + def command(table_name) + format_simple_command do + replication_admin.enable_tablerep(table_name) + end + puts "The replication swith of table '#{table_name}' successfully enabled" + end + end + end +end diff --git a/hbase-shell/src/main/ruby/shell/commands/get_auths.rb b/hbase-shell/src/main/ruby/shell/commands/get_auths.rb index 2bc3e09a61a..1b758ef9495 100644 --- a/hbase-shell/src/main/ruby/shell/commands/get_auths.rb +++ b/hbase-shell/src/main/ruby/shell/commands/get_auths.rb @@ -20,12 +20,13 @@ module Shell class GetAuths < Command def help return <<-EOF -Get the visibility labels set for a particular user -Syntax : get_auths 'user1' +Get the visibility labels set for a particular user or group +Syntax : get_auths 'user' For example: hbase> get_auths 'user1' + hbase> get_auths '@group1' EOF end diff --git a/hbase-shell/src/main/ruby/shell/commands/get_counter.rb b/hbase-shell/src/main/ruby/shell/commands/get_counter.rb index 00cf64ddfb7..6708c6a5d31 100644 --- a/hbase-shell/src/main/ruby/shell/commands/get_counter.rb +++ b/hbase-shell/src/main/ruby/shell/commands/get_counter.rb @@ -23,8 +23,8 @@ module Shell def help return <<-EOF Return a counter cell value at specified table/row/column coordinates. -A cell cell should be managed with atomic increment function oh HBase -and the data should be binary encoded. Example: +A counter cell should be managed with atomic increment functions on HBase +and the data should be binary encoded (as long value). Example: hbase> get_counter 'ns1:t1', 'r1', 'c1' hbase> get_counter 't1', 'r1', 'c1' @@ -36,11 +36,11 @@ t to table 't1', the corresponding command would be: EOF end - def command(table, row, column, value) - get_counter(table(table), row, column, value) + def command(table, row, column) + get_counter(table(table), row, column) end - def get_counter(table, row, column, value = nil) + def get_counter(table, row, column) if cnt = table._get_counter_internal(row, column) puts "COUNTER VALUE = #{cnt}" else diff --git a/hbase-shell/src/main/ruby/shell/commands/get_splits.rb b/hbase-shell/src/main/ruby/shell/commands/get_splits.rb new file mode 100644 index 00000000000..8b6ae825834 --- /dev/null +++ b/hbase-shell/src/main/ruby/shell/commands/get_splits.rb @@ -0,0 +1,46 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Shell + module Commands + class GetSplits < Command + def help + return <<-EOF +Get the splits of the named table: + hbase> get_splits 't1' + hbase> get_splits 'ns1:t1' + +The same commands also can be run on a table reference. Suppose you had a reference +t to table 't1', the corresponding command would be: + + hbase> t.get_splits +EOF + end + + def command(table) + get_splits(table(table)) + end + + def get_splits(table) + table._get_splits_internal() + end + end + end +end + +::Hbase::Table.add_shell_command("get_splits") \ No newline at end of file diff --git a/hbase-shell/src/main/ruby/shell/commands/incr.rb b/hbase-shell/src/main/ruby/shell/commands/incr.rb index a59869c3847..d223a45aafe 100644 --- a/hbase-shell/src/main/ruby/shell/commands/incr.rb +++ b/hbase-shell/src/main/ruby/shell/commands/incr.rb @@ -50,7 +50,11 @@ EOF def incr(table, row, column, value = nil, args={}) format_simple_command do - table._incr_internal(row, column, value, args) + if cnt = table._incr_internal(row, column, value, args) + puts "COUNTER VALUE = #{cnt}" + else + puts "No counter found at specified coordinates" + end end end end diff --git a/hbase-shell/src/main/ruby/shell/commands/set_auths.rb b/hbase-shell/src/main/ruby/shell/commands/set_auths.rb index 66797190eb2..4a52eb0ef7d 100644 --- a/hbase-shell/src/main/ruby/shell/commands/set_auths.rb +++ b/hbase-shell/src/main/ruby/shell/commands/set_auths.rb @@ -20,12 +20,13 @@ module Shell class SetAuths < Command def help return <<-EOF -Add a set of visibility labels for an user -Syntax : set_auths 'user1',[label1, label2] +Add a set of visibility labels for a user or group +Syntax : set_auths 'user',[label1, label2] For example: hbase> set_auths 'user1', ['SECRET','PRIVATE'] + hbase> set_auths '@group1', ['SECRET','PRIVATE'] EOF end diff --git a/hbase-shell/src/main/ruby/shell/commands/status.rb b/hbase-shell/src/main/ruby/shell/commands/status.rb index f72c13caef6..b22b2723987 100644 --- a/hbase-shell/src/main/ruby/shell/commands/status.rb +++ b/hbase-shell/src/main/ruby/shell/commands/status.rb @@ -22,18 +22,21 @@ module Shell class Status < Command def help return <<-EOF -Show cluster status. Can be 'summary', 'simple', or 'detailed'. The +Show cluster status. Can be 'summary', 'simple', 'detailed', or 'replication'. The default is 'summary'. Examples: hbase> status hbase> status 'simple' hbase> status 'summary' hbase> status 'detailed' + hbase> status 'replication' + hbase> status 'replication', 'source' + hbase> status 'replication', 'sink' EOF end - def command(format = 'summary') - admin.status(format) + def command(format = 'summary',type = 'both') + admin.status(format, type) end end end diff --git a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/AbstractTestShell.java b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/AbstractTestShell.java new file mode 100644 index 00000000000..24d07eda530 --- /dev/null +++ b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/AbstractTestShell.java @@ -0,0 +1,69 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; +import org.apache.hadoop.hbase.security.access.SecureTestUtil; +import org.apache.hadoop.hbase.security.visibility.VisibilityTestUtil; +import org.jruby.embed.ScriptingContainer; +import org.junit.AfterClass; +import org.junit.BeforeClass; + +public abstract class AbstractTestShell { + + protected final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + protected final static ScriptingContainer jruby = new ScriptingContainer(); + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + // Start mini cluster + TEST_UTIL.getConfiguration().setBoolean("hbase.online.schema.update.enable", true); + TEST_UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100); + TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 250); + TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6); + TEST_UTIL.getConfiguration().setBoolean(CoprocessorHost.ABORT_ON_ERROR_KEY, false); + TEST_UTIL.getConfiguration().setInt("hfile.format.version", 3); + TEST_UTIL.getConfiguration().setInt(HConstants.MASTER_INFO_PORT, -1); + TEST_UTIL.getConfiguration().setInt(HConstants.REGIONSERVER_INFO_PORT, -1); + // Security setup configuration + SecureTestUtil.enableSecurity(TEST_UTIL.getConfiguration()); + VisibilityTestUtil.enableVisiblityLabels(TEST_UTIL.getConfiguration()); + + TEST_UTIL.startMiniCluster(); + + // Configure jruby runtime + List loadPaths = new ArrayList(); + loadPaths.add("src/main/ruby"); + loadPaths.add("src/test/ruby"); + jruby.getProvider().setLoadPaths(loadPaths); + jruby.put("$TEST_CLUSTER", TEST_UTIL); + System.setProperty("jruby.jit.logging.verbose", "true"); + System.setProperty("jruby.jit.logging", "true"); + System.setProperty("jruby.native.verbose", "true"); + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } +} diff --git a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestReplicationShell.java b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestReplicationShell.java new file mode 100644 index 00000000000..da2b709e000 --- /dev/null +++ b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestReplicationShell.java @@ -0,0 +1,38 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import java.io.IOException; + +import org.apache.hadoop.hbase.testclassification.ClientTests; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.jruby.embed.PathType; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({ ClientTests.class, LargeTests.class }) +public class TestReplicationShell extends AbstractTestShell { + + @Test + public void testRunShellTests() throws IOException { + System.setProperty("shell.test.include", "replication_admin_test.rb"); + // Start all ruby tests + jruby.runScriptlet(PathType.ABSOLUTE, "src/test/ruby/tests_runner.rb"); + } + +} diff --git a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestShell.java b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestShell.java index 5fbf6a92487..976ba45078d 100644 --- a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestShell.java +++ b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestShell.java @@ -19,67 +19,21 @@ package org.apache.hadoop.hbase.client; import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; -import org.apache.hadoop.hbase.security.access.SecureTestUtil; -import org.apache.hadoop.hbase.security.visibility.VisibilityTestUtil; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.jruby.embed.PathType; -import org.jruby.embed.ScriptingContainer; -import org.junit.AfterClass; -import org.junit.BeforeClass; import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({ClientTests.class, LargeTests.class}) -public class TestShell { - final Log LOG = LogFactory.getLog(getClass()); - private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - private final static ScriptingContainer jruby = new ScriptingContainer(); - - @BeforeClass - public static void setUpBeforeClass() throws Exception { - // Start mini cluster - TEST_UTIL.getConfiguration().setBoolean("hbase.online.schema.update.enable", true); - TEST_UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100); - TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 250); - TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6); - TEST_UTIL.getConfiguration().setBoolean(CoprocessorHost.ABORT_ON_ERROR_KEY, false); - TEST_UTIL.getConfiguration().setInt("hfile.format.version", 3); - // Security setup configuration - SecureTestUtil.enableSecurity(TEST_UTIL.getConfiguration()); - VisibilityTestUtil.enableVisiblityLabels(TEST_UTIL.getConfiguration()); - - TEST_UTIL.startMiniCluster(); - - // Configure jruby runtime - List loadPaths = new ArrayList(); - loadPaths.add("src/main/ruby"); - loadPaths.add("src/test/ruby"); - jruby.getProvider().setLoadPaths(loadPaths); - jruby.put("$TEST_CLUSTER", TEST_UTIL); - System.setProperty("jruby.jit.logging.verbose", "true"); - System.setProperty("jruby.jit.logging", "true"); - System.setProperty("jruby.native.verbose", "true"); - } - - @AfterClass - public static void tearDownAfterClass() throws Exception { - TEST_UTIL.shutdownMiniCluster(); - } +@Category({ ClientTests.class, LargeTests.class }) +public class TestShell extends AbstractTestShell { @Test public void testRunShellTests() throws IOException { + System.setProperty("shell.test.exclude", "replication_admin_test.rb"); // Start all ruby tests jruby.runScriptlet(PathType.ABSOLUTE, "src/test/ruby/tests_runner.rb"); } } - diff --git a/hbase-shell/src/test/ruby/hbase/admin_test.rb b/hbase-shell/src/test/ruby/hbase/admin_test.rb index caede3ad9f6..19258649c79 100644 --- a/hbase-shell/src/test/ruby/hbase/admin_test.rb +++ b/hbase-shell/src/test/ruby/hbase/admin_test.rb @@ -356,5 +356,17 @@ module Hbase assert_not_equal(nil, table) table.close end + + define_test "Get replication status" do + replication_status("replication", "both") + end + + define_test "Get replication source metrics information" do + replication_status("replication", "source") + end + + define_test "Get replication sink metrics information" do + replication_status("replication", "sink") + end end end diff --git a/hbase-shell/src/test/ruby/hbase/replication_admin_test.rb b/hbase-shell/src/test/ruby/hbase/replication_admin_test.rb new file mode 100644 index 00000000000..648efa7f861 --- /dev/null +++ b/hbase-shell/src/test/ruby/hbase/replication_admin_test.rb @@ -0,0 +1,191 @@ +# +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +require 'shell' +require 'shell/formatter' +require 'hbase' +require 'hbase/hbase' +require 'hbase/table' + +include HBaseConstants + +module Hbase + class ReplicationAdminTest < Test::Unit::TestCase + include TestHelpers + + def setup + @test_name = "hbase_shell_tests_table" + @peer_id = '1' + + setup_hbase + drop_test_table(@test_name) + create_test_table(@test_name) + + assert_equal(0, replication_admin.list_peers.length) + end + + def teardown + assert_equal(0, replication_admin.list_peers.length) + + shutdown + end + + define_test "add_peer: should fail when args isn't specified" do + assert_raise(ArgumentError) do + replication_admin.add_peer(@peer_id, nil) + end + end + + define_test "add_peer: fail when neither CLUSTER_KEY nor ENDPOINT_CLASSNAME are specified" do + assert_raise(ArgumentError) do + args = {} + replication_admin.add_peer(@peer_id, args) + end + end + + define_test "add_peer: fail when both CLUSTER_KEY and ENDPOINT_CLASSNAME are specified" do + assert_raise(ArgumentError) do + args = { CLUSTER_KEY => 'zk1,zk2,zk3:2182:/hbase-prod', + ENDPOINT_CLASSNAME => 'org.apache.hadoop.hbase.MyReplicationEndpoint' } + replication_admin.add_peer(@peer_id, args) + end + end + + define_test "add_peer: args must be a string or number" do + assert_raise(ArgumentError) do + replication_admin.add_peer(@peer_id, 1) + end + assert_raise(ArgumentError) do + replication_admin.add_peer(@peer_id, ['test']) + end + end + + define_test "add_peer: single zk cluster key" do + cluster_key = "server1.cie.com:2181:/hbase" + + replication_admin.add_peer(@peer_id, cluster_key) + + assert_equal(1, replication_admin.list_peers.length) + assert(replication_admin.list_peers.key?(@peer_id)) + assert_equal(cluster_key, replication_admin.list_peers.fetch(@peer_id)) + + # cleanup for future tests + replication_admin.remove_peer(@peer_id) + end + + define_test "add_peer: multiple zk cluster key" do + cluster_key = "zk1,zk2,zk3:2182:/hbase-prod" + + replication_admin.add_peer(@peer_id, cluster_key) + + assert_equal(1, replication_admin.list_peers.length) + assert(replication_admin.list_peers.key?(@peer_id)) + assert_equal(replication_admin.list_peers.fetch(@peer_id), cluster_key) + + # cleanup for future tests + replication_admin.remove_peer(@peer_id) + end + + define_test "add_peer: multiple zk cluster key and table_cfs" do + cluster_key = "zk4,zk5,zk6:11000:/hbase-test" + table_cfs_str = "table1;table2:cf1;table3:cf2,cf3" + + replication_admin.add_peer(@peer_id, cluster_key, table_cfs_str) + + assert_equal(1, replication_admin.list_peers.length) + assert(replication_admin.list_peers.key?(@peer_id)) + assert_equal(cluster_key, replication_admin.list_peers.fetch(@peer_id)) + assert_equal(table_cfs_str, replication_admin.show_peer_tableCFs(@peer_id)) + + # cleanup for future tests + replication_admin.remove_peer(@peer_id) + end + + define_test "add_peer: single zk cluster key - peer config" do + cluster_key = "server1.cie.com:2181:/hbase" + + args = { CLUSTER_KEY => cluster_key } + replication_admin.add_peer(@peer_id, args) + + assert_equal(1, replication_admin.list_peers.length) + assert(replication_admin.list_peers.key?(@peer_id)) + assert_equal(cluster_key, replication_admin.list_peers.fetch(@peer_id)) + + # cleanup for future tests + replication_admin.remove_peer(@peer_id) + end + + define_test "add_peer: multiple zk cluster key - peer config" do + cluster_key = "zk1,zk2,zk3:2182:/hbase-prod" + + args = { CLUSTER_KEY => cluster_key } + replication_admin.add_peer(@peer_id, args) + + assert_equal(1, replication_admin.list_peers.length) + assert(replication_admin.list_peers.key?(@peer_id)) + assert_equal(cluster_key, replication_admin.list_peers.fetch(@peer_id)) + + # cleanup for future tests + replication_admin.remove_peer(@peer_id) + end + + define_test "add_peer: multiple zk cluster key and table_cfs - peer config" do + cluster_key = "zk4,zk5,zk6:11000:/hbase-test" + table_cfs = { "table1" => [], "table2" => ["cf1"], "table3" => ["cf1", "cf2"] } + table_cfs_str = "table1;table2:cf1;table3:cf1,cf2" + + args = { CLUSTER_KEY => cluster_key, TABLE_CFS => table_cfs } + replication_admin.add_peer(@peer_id, args) + + assert_equal(1, replication_admin.list_peers.length) + assert(replication_admin.list_peers.key?(@peer_id)) + assert_equal(cluster_key, replication_admin.list_peers.fetch(@peer_id)) + assert_equal(table_cfs_str, replication_admin.show_peer_tableCFs(@peer_id)) + + # cleanup for future tests + replication_admin.remove_peer(@peer_id) + end + + define_test "add_peer: should fail when args is a hash and peer_tableCFs provided" do + cluster_key = "zk4,zk5,zk6:11000:/hbase-test" + table_cfs_str = "table1;table2:cf1;table3:cf1,cf2" + + assert_raise(ArgumentError) do + args = { CLUSTER_KEY => cluster_key } + replication_admin.add_peer(@peer_id, args, table_cfs_str) + end + end + + # assert_raise fails on native exceptions - https://jira.codehaus.org/browse/JRUBY-5279 + # Can't catch native Java exception with assert_raise in JRuby 1.6.8 as in the test below. + # define_test "add_peer: adding a second peer with same id should error" do + # replication_admin.add_peer(@peer_id, '') + # assert_equal(1, replication_admin.list_peers.length) + # + # assert_raise(java.lang.IllegalArgumentException) do + # replication_admin.add_peer(@peer_id, '') + # end + # + # assert_equal(1, replication_admin.list_peers.length, 1) + # + # # cleanup for future tests + # replication_admin.remove_peer(@peer_id) + # end + end +end diff --git a/hbase-shell/src/test/ruby/hbase/table_test.rb b/hbase-shell/src/test/ruby/hbase/table_test.rb index dc4dc0dfe9a..184e0d4a601 100644 --- a/hbase-shell/src/test/ruby/hbase/table_test.rb +++ b/hbase-shell/src/test/ruby/hbase/table_test.rb @@ -611,5 +611,22 @@ module Hbase end end + define_test "Split count for a table" do + @testTableName = "tableWithSplits" + create_test_table_with_splits(@testTableName, SPLITS => ['10', '20', '30', '40']) + @table = table(@testTableName) + splits = @table._get_splits_internal() + #Total splits is 5 but here count is 4 as we ignore implicit empty split. + assert_equal(4, splits.size) + assert_equal(["10", "20", "30", "40"], splits) + drop_test_table(@testTableName) + end + + define_test "Split count for a empty table" do + splits = @test_table._get_splits_internal() + #Empty split should not be part of this array. + assert_equal(0, splits.size) + assert_equal([], splits) + end end end diff --git a/hbase-shell/src/test/ruby/hbase/visibility_labels_admin_test.rb b/hbase-shell/src/test/ruby/hbase/visibility_labels_admin_test.rb index 0815d83c015..47ac292d148 100644 --- a/hbase-shell/src/test/ruby/hbase/visibility_labels_admin_test.rb +++ b/hbase-shell/src/test/ruby/hbase/visibility_labels_admin_test.rb @@ -54,6 +54,7 @@ module Hbase label = 'TEST_AUTHS' user = org.apache.hadoop.hbase.security.User.getCurrent().getName(); visibility_admin.add_labels(label) + $TEST_CLUSTER.waitLabelAvailable(10000, label) count = visibility_admin.get_auths(user).length # verifying the set functionality @@ -74,6 +75,7 @@ module Hbase label = 'TEST_VISIBILITY' user = org.apache.hadoop.hbase.security.User.getCurrent().getName(); visibility_admin.add_labels(label) + $TEST_CLUSTER.waitLabelAvailable(10000, label) visibility_admin.set_auths(user, label) # verifying put functionality diff --git a/hbase-shell/src/test/ruby/test_helper.rb b/hbase-shell/src/test/ruby/test_helper.rb index 55797610614..80eb4f56a3b 100644 --- a/hbase-shell/src/test/ruby/test_helper.rb +++ b/hbase-shell/src/test/ruby/test_helper.rb @@ -68,6 +68,10 @@ module Hbase @shell.hbase_visibility_labels_admin end + def replication_admin + @shell.hbase_replication_admin + end + def create_test_table(name) # Create the table if needed unless admin.exists?(name) @@ -81,6 +85,18 @@ module Hbase end end + def create_test_table_with_splits(name, splits) + # Create the table if needed + unless admin.exists?(name) + admin.create name, 'f1', splits + end + + # Enable the table if needed + unless admin.enabled?(name) + admin.enable(name) + end + end + def drop_test_table(name) return unless admin.exists?(name) begin @@ -94,6 +110,10 @@ module Hbase puts "IGNORING DROP TABLE ERROR: #{e}" end end + + def replication_status(format,type) + return admin.status(format,type) + end end end diff --git a/hbase-shell/src/test/ruby/tests_runner.rb b/hbase-shell/src/test/ruby/tests_runner.rb index e1458dbb085..74ddb485131 100644 --- a/hbase-shell/src/test/ruby/tests_runner.rb +++ b/hbase-shell/src/test/ruby/tests_runner.rb @@ -19,6 +19,7 @@ require 'rubygems' require 'rake' +require 'set' unless defined?($TEST_CLUSTER) include Java @@ -44,8 +45,25 @@ require 'test_helper' puts "Running tests..." +if java.lang.System.get_property('shell.test.include') + includes = Set.new(java.lang.System.get_property('shell.test.include').split(',')) +end + +if java.lang.System.get_property('shell.test.exclude') + excludes = Set.new(java.lang.System.get_property('shell.test.exclude').split(',')) +end + files = Dir[ File.dirname(__FILE__) + "/**/*_test.rb" ] files.each do |file| + filename = File.basename(file) + if includes != nil && !includes.include?(filename) + puts "Skip #{filename} because of not included" + next + end + if excludes != nil && excludes.include?(filename) + puts "Skip #{filename} because of excluded" + next + end begin load(file) rescue => e diff --git a/hbase-testing-util/src/main/asciidoc/.gitignore b/hbase-testing-util/src/main/asciidoc/.gitignore deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/hbase-thrift/pom.xml b/hbase-thrift/pom.xml index c07a4178181..e8454820dca 100644 --- a/hbase-thrift/pom.xml +++ b/hbase-thrift/pom.xml @@ -46,27 +46,6 @@ - - maven-compiler-plugin - - - default-compile - - ${java.default.compiler} - true - false - - - - default-testCompile - - ${java.default.compiler} - true - false - - - - org.apache.maven.plugins maven-site-plugin @@ -171,6 +150,62 @@ maven-source-plugin + + + + + org.eclipse.m2e + lifecycle-mapping + 1.0.0 + + + + + + org.apache.maven.plugins + maven-antrun-plugin + [1.6,) + + run + + + + + + + + + org.apache.maven.plugins + maven-dependency-plugin + [2.8,) + + build-classpath + + + + + + + + + org.apache.maven.plugins + maven-compiler-plugin + [3.2,) + + compile + + + + + + + + + + + + diff --git a/hbase-thrift/src/main/asciidoc/.gitignore b/hbase-thrift/src/main/asciidoc/.gitignore deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftHttpServlet.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftHttpServlet.java index 25c6da3fbdb..c7ea46f61e1 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftHttpServlet.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftHttpServlet.java @@ -27,10 +27,10 @@ import javax.servlet.http.HttpServletResponse; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.commons.net.util.Base64; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.security.SecurityUtil; +import org.apache.hadoop.hbase.util.Base64; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.AuthorizationException; import org.apache.hadoop.security.authorize.ProxyUsers; @@ -57,6 +57,12 @@ public class ThriftHttpServlet extends TServlet { private final boolean securityEnabled; private final boolean doAsEnabled; private transient ThriftServerRunner.HBaseHandler hbaseHandler; + private String outToken; + + // HTTP Header related constants. + public static final String WWW_AUTHENTICATE = "WWW-Authenticate"; + public static final String AUTHORIZATION = "Authorization"; + public static final String NEGOTIATE = "Negotiate"; public ThriftHttpServlet(TProcessor processor, TProtocolFactory protocolFactory, UserGroupInformation realUser, Configuration conf, ThriftServerRunner.HBaseHandler @@ -72,27 +78,38 @@ public class ThriftHttpServlet extends TServlet { @Override protected void doPost(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { - String effectiveUser = realUser.getShortUserName(); + String effectiveUser = request.getRemoteUser(); if (securityEnabled) { try { // As Thrift HTTP transport doesn't support SPNEGO yet (THRIFT-889), // Kerberos authentication is being done at servlet level. effectiveUser = doKerberosAuth(request); + // It is standard for client applications expect this header. + // Please see http://tools.ietf.org/html/rfc4559 for more details. + response.addHeader(WWW_AUTHENTICATE, NEGOTIATE + " " + outToken); } catch (HttpAuthenticationException e) { LOG.error("Kerberos Authentication failed", e); // Send a 401 to the client response.setStatus(HttpServletResponse.SC_UNAUTHORIZED); + response.addHeader(WWW_AUTHENTICATE, NEGOTIATE); response.getWriter().println("Authentication Error: " + e.getMessage()); + return; } } String doAsUserFromQuery = request.getHeader("doAs"); + if(effectiveUser == null) { + effectiveUser = realUser.getShortUserName(); + } if (doAsUserFromQuery != null) { if (!doAsEnabled) { throw new ServletException("Support for proxyuser is not configured"); } + // The authenticated remote user is attempting to perform 'doAs' proxy user. + UserGroupInformation remoteUser = UserGroupInformation.createRemoteUser(effectiveUser); // create and attempt to authorize a proxy user (the client is attempting // to do proxy user) - UserGroupInformation ugi = UserGroupInformation.createProxyUser(doAsUserFromQuery, realUser); + UserGroupInformation ugi = UserGroupInformation.createProxyUser(doAsUserFromQuery, + remoteUser); // validate the proxy user authorization try { ProxyUsers.authorize(ugi, request.getRemoteAddr(), conf); @@ -112,8 +129,11 @@ public class ThriftHttpServlet extends TServlet { */ private String doKerberosAuth(HttpServletRequest request) throws HttpAuthenticationException { + HttpKerberosServerAction action = new HttpKerberosServerAction(request, realUser); try { - return realUser.doAs(new HttpKerberosServerAction(request, realUser)); + String principal = realUser.doAs(action); + outToken = action.outToken; + return principal; } catch (Exception e) { LOG.error("Failed to perform authentication"); throw new HttpAuthenticationException(e); @@ -124,6 +144,7 @@ public class ThriftHttpServlet extends TServlet { private static class HttpKerberosServerAction implements PrivilegedExceptionAction { HttpServletRequest request; UserGroupInformation serviceUGI; + String outToken = null; HttpKerberosServerAction(HttpServletRequest request, UserGroupInformation serviceUGI) { this.request = request; this.serviceUGI = serviceUGI; @@ -152,16 +173,19 @@ public class ThriftHttpServlet extends TServlet { // Create a GSS context gssContext = manager.createContext(serverCreds); // Get service ticket from the authorization header - String serviceTicketBase64 = getAuthHeader(request); - byte[] inToken = Base64.decodeBase64(serviceTicketBase64.getBytes()); - gssContext.acceptSecContext(inToken, 0, inToken.length); - // Authenticate or deny based on its context completion - if (!gssContext.isEstablished()) { + String serviceTicketBase64 = getAuthHeader(request); + byte[] inToken = Base64.decode(serviceTicketBase64); + byte[] res = gssContext.acceptSecContext(inToken, 0, inToken.length); + if(res != null) { + outToken = Base64.encodeBytes(res).replace("\n", ""); + } + // Authenticate or deny based on its context completion + if (!gssContext.isEstablished()) { throw new HttpAuthenticationException("Kerberos authentication failed: " + "unable to establish context with the service ticket " + "provided by the client."); - } - return SecurityUtil.getUserFromPrincipal(gssContext.getSrcName().toString()); + } + return SecurityUtil.getUserFromPrincipal(gssContext.getSrcName().toString()); } catch (GSSException e) { throw new HttpAuthenticationException("Kerberos authentication failed: ", e); } finally { @@ -182,14 +206,14 @@ public class ThriftHttpServlet extends TServlet { */ private String getAuthHeader(HttpServletRequest request) throws HttpAuthenticationException { - String authHeader = request.getHeader("Authorization"); + String authHeader = request.getHeader(AUTHORIZATION); // Each http request must have an Authorization header if (authHeader == null || authHeader.isEmpty()) { throw new HttpAuthenticationException("Authorization header received " + "from the client is empty."); } String authHeaderBase64String; - int beginIndex = ("Negotiate ").length(); + int beginIndex = (NEGOTIATE + " ").length(); authHeaderBase64String = authHeader.substring(beginIndex); // Authorization header must have a payload if (authHeaderBase64String == null || authHeaderBase64String.isEmpty()) { diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java index 9f23c09aea3..4e4ade37b2a 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java @@ -55,24 +55,27 @@ import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.HBaseAdmin; -import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Increment; import org.apache.hadoop.hbase.client.OperationWithAttributes; import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.filter.ParseFilter; import org.apache.hadoop.hbase.filter.PrefixFilter; @@ -127,6 +130,7 @@ import org.mortbay.jetty.servlet.ServletHolder; import org.mortbay.thread.QueuedThreadPool; import com.google.common.base.Joiner; +import com.google.common.base.Throwables; import com.google.common.util.concurrent.ThreadFactoryBuilder; /** @@ -623,7 +627,7 @@ public class ThriftServerRunner implements Runnable { /** * The HBaseHandler is a glue object that connects Thrift RPC calls to the - * HBase client API primarily defined in the HBaseAdmin and HTable objects. + * HBase client API primarily defined in the Admin and Table objects. */ public static class HBaseHandler implements Hbase.Iface { protected Configuration conf; @@ -636,11 +640,11 @@ public class ThriftServerRunner implements Runnable { private final ConnectionCache connectionCache; - private static ThreadLocal> threadLocalTables = - new ThreadLocal>() { + private static ThreadLocal> threadLocalTables = + new ThreadLocal>() { @Override - protected Map initialValue() { - return new TreeMap(); + protected Map initialValue() { + return new TreeMap(); } }; @@ -650,12 +654,12 @@ public class ThriftServerRunner implements Runnable { static final String MAX_IDLETIME = "hbase.thrift.connection.max-idletime"; /** - * Returns a list of all the column families for a given htable. + * Returns a list of all the column families for a given Table. * * @param table * @throws IOException */ - byte[][] getAllColumns(HTable table) throws IOException { + byte[][] getAllColumns(Table table) throws IOException { HColumnDescriptor[] cds = table.getTableDescriptor().getColumnFamilies(); byte[][] columns = new byte[cds.length][]; for (int i = 0; i < cds.length; i++) { @@ -666,25 +670,25 @@ public class ThriftServerRunner implements Runnable { } /** - * Creates and returns an HTable instance from a given table name. + * Creates and returns a Table instance from a given table name. * * @param tableName * name of table - * @return HTable object + * @return Table object * @throws IOException * @throws IOError */ - public HTable getTable(final byte[] tableName) throws + public Table getTable(final byte[] tableName) throws IOException { String table = Bytes.toString(tableName); - Map tables = threadLocalTables.get(); + Map tables = threadLocalTables.get(); if (!tables.containsKey(table)) { - tables.put(table, (HTable)connectionCache.getTable(table)); + tables.put(table, (Table)connectionCache.getTable(table)); } return tables.get(table); } - public HTable getTable(final ByteBuffer tableName) throws IOException { + public Table getTable(final ByteBuffer tableName) throws IOException { return getTable(getBytes(tableName)); } @@ -738,7 +742,7 @@ public class ThriftServerRunner implements Runnable { /** * Obtain HBaseAdmin. Creates the instance if it is not already created. */ - private HBaseAdmin getHBaseAdmin() throws IOException { + private Admin getAdmin() throws IOException { return connectionCache.getAdmin(); } @@ -749,57 +753,63 @@ public class ThriftServerRunner implements Runnable { @Override public void enableTable(ByteBuffer tableName) throws IOError { try{ - getHBaseAdmin().enableTable(getBytes(tableName)); + getAdmin().enableTable(getTableName(tableName)); } catch (IOException e) { LOG.warn(e.getMessage(), e); - throw new IOError(e.getMessage()); + throw new IOError(Throwables.getStackTraceAsString(e)); } } @Override public void disableTable(ByteBuffer tableName) throws IOError{ try{ - getHBaseAdmin().disableTable(getBytes(tableName)); + getAdmin().disableTable(getTableName(tableName)); } catch (IOException e) { LOG.warn(e.getMessage(), e); - throw new IOError(e.getMessage()); + throw new IOError(Throwables.getStackTraceAsString(e)); } } @Override public boolean isTableEnabled(ByteBuffer tableName) throws IOError { try { - return HTable.isTableEnabled(this.conf, getBytes(tableName)); + return this.connectionCache.getAdmin().isTableEnabled(getTableName(tableName)); } catch (IOException e) { LOG.warn(e.getMessage(), e); - throw new IOError(e.getMessage()); + throw new IOError(Throwables.getStackTraceAsString(e)); } } @Override public void compact(ByteBuffer tableNameOrRegionName) throws IOError { - try{ - getHBaseAdmin().compact(getBytes(tableNameOrRegionName)); + try { + // TODO: HBaseAdmin.compact(byte[]) deprecated and not trivial to replace here. + // ThriftServerRunner.compact should be deprecated and replaced with methods specific to + // table and region. + ((HBaseAdmin) getAdmin()).compact(getBytes(tableNameOrRegionName)); } catch (IOException e) { LOG.warn(e.getMessage(), e); - throw new IOError(e.getMessage()); + throw new IOError(Throwables.getStackTraceAsString(e)); } } @Override public void majorCompact(ByteBuffer tableNameOrRegionName) throws IOError { - try{ - getHBaseAdmin().majorCompact(getBytes(tableNameOrRegionName)); + try { + // TODO: HBaseAdmin.majorCompact(byte[]) deprecated and not trivial to replace here. + // ThriftServerRunner.majorCompact should be deprecated and replaced with methods specific + // to table and region. + ((HBaseAdmin) getAdmin()).majorCompact(getBytes(tableNameOrRegionName)); } catch (IOException e) { LOG.warn(e.getMessage(), e); - throw new IOError(e.getMessage()); + throw new IOError(Throwables.getStackTraceAsString(e)); } } @Override public List getTableNames() throws IOError { try { - TableName[] tableNames = this.getHBaseAdmin().listTableNames(); + TableName[] tableNames = this.getAdmin().listTableNames(); ArrayList list = new ArrayList(tableNames.length); for (int i = 0; i < tableNames.length; i++) { list.add(ByteBuffer.wrap(tableNames[i].getName())); @@ -807,7 +817,7 @@ public class ThriftServerRunner implements Runnable { return list; } catch (IOException e) { LOG.warn(e.getMessage(), e); - throw new IOError(e.getMessage()); + throw new IOError(Throwables.getStackTraceAsString(e)); } } @@ -817,20 +827,12 @@ public class ThriftServerRunner implements Runnable { @Override public List getTableRegions(ByteBuffer tableName) throws IOError { - try { - HTable table; - try { - table = getTable(tableName); - } catch (TableNotFoundException ex) { - return new ArrayList(); - } - Map regionLocations = - table.getRegionLocations(); + try (RegionLocator locator = connectionCache.getRegionLocator(getBytes(tableName))) { + List regionLocations = locator.getAllRegionLocations(); List results = new ArrayList(); - for (Map.Entry entry : - regionLocations.entrySet()) { - HRegionInfo info = entry.getKey(); - ServerName serverName = entry.getValue(); + for (HRegionLocation regionLocation : regionLocations) { + HRegionInfo info = regionLocation.getRegionInfo(); + ServerName serverName = regionLocation.getServerName(); TRegionInfo region = new TRegionInfo(); region.serverName = ByteBuffer.wrap( Bytes.toBytes(serverName.getHostname())); @@ -848,7 +850,7 @@ public class ThriftServerRunner implements Runnable { return Collections.emptyList(); } catch (IOException e){ LOG.warn(e.getMessage(), e); - throw new IOError(e.getMessage()); + throw new IOError(Throwables.getStackTraceAsString(e)); } } @@ -881,7 +883,7 @@ public class ThriftServerRunner implements Runnable { byte[] qualifier, Map attributes) throws IOError { try { - HTable table = getTable(tableName); + Table table = getTable(tableName); Get get = new Get(getBytes(row)); addAttributes(get, attributes); if (qualifier == null) { @@ -893,7 +895,7 @@ public class ThriftServerRunner implements Runnable { return ThriftUtilities.cellFromHBase(result.rawCells()); } catch (IOException e) { LOG.warn(e.getMessage(), e); - throw new IOError(e.getMessage()); + throw new IOError(Throwables.getStackTraceAsString(e)); } } @@ -923,7 +925,7 @@ public class ThriftServerRunner implements Runnable { public List getVer(ByteBuffer tableName, ByteBuffer row, byte[] family, byte[] qualifier, int numVersions, Map attributes) throws IOError { try { - HTable table = getTable(tableName); + Table table = getTable(tableName); Get get = new Get(getBytes(row)); addAttributes(get, attributes); if (null == qualifier) { @@ -936,7 +938,7 @@ public class ThriftServerRunner implements Runnable { return ThriftUtilities.cellFromHBase(result.rawCells()); } catch (IOException e) { LOG.warn(e.getMessage(), e); - throw new IOError(e.getMessage()); + throw new IOError(Throwables.getStackTraceAsString(e)); } } @@ -966,7 +968,7 @@ public class ThriftServerRunner implements Runnable { byte[] qualifier, long timestamp, int numVersions, Map attributes) throws IOError { try { - HTable table = getTable(tableName); + Table table = getTable(tableName); Get get = new Get(getBytes(row)); addAttributes(get, attributes); if (null == qualifier) { @@ -980,7 +982,7 @@ public class ThriftServerRunner implements Runnable { return ThriftUtilities.cellFromHBase(result.rawCells()); } catch (IOException e) { LOG.warn(e.getMessage(), e); - throw new IOError(e.getMessage()); + throw new IOError(Throwables.getStackTraceAsString(e)); } } @@ -1014,7 +1016,7 @@ public class ThriftServerRunner implements Runnable { ByteBuffer tableName, ByteBuffer row, List columns, long timestamp, Map attributes) throws IOError { try { - HTable table = getTable(tableName); + Table table = getTable(tableName); if (columns == null) { Get get = new Get(getBytes(row)); addAttributes(get, attributes); @@ -1037,7 +1039,7 @@ public class ThriftServerRunner implements Runnable { return ThriftUtilities.rowResultFromHBase(result); } catch (IOException e) { LOG.warn(e.getMessage(), e); - throw new IOError(e.getMessage()); + throw new IOError(Throwables.getStackTraceAsString(e)); } } @@ -1077,7 +1079,7 @@ public class ThriftServerRunner implements Runnable { Map attributes) throws IOError { try { List gets = new ArrayList(rows.size()); - HTable table = getTable(tableName); + Table table = getTable(tableName); if (metrics != null) { metrics.incNumRowKeysInBatchGet(rows.size()); } @@ -1102,7 +1104,7 @@ public class ThriftServerRunner implements Runnable { return ThriftUtilities.rowResultFromHBase(result); } catch (IOException e) { LOG.warn(e.getMessage(), e); - throw new IOError(e.getMessage()); + throw new IOError(Throwables.getStackTraceAsString(e)); } } @@ -1121,7 +1123,7 @@ public class ThriftServerRunner implements Runnable { ByteBuffer column, long timestamp, Map attributes) throws IOError { try { - HTable table = getTable(tableName); + Table table = getTable(tableName); Delete delete = new Delete(getBytes(row)); addAttributes(delete, attributes); byte [][] famAndQf = KeyValue.parseColumn(getBytes(column)); @@ -1134,7 +1136,7 @@ public class ThriftServerRunner implements Runnable { } catch (IOException e) { LOG.warn(e.getMessage(), e); - throw new IOError(e.getMessage()); + throw new IOError(Throwables.getStackTraceAsString(e)); } } @@ -1150,13 +1152,13 @@ public class ThriftServerRunner implements Runnable { ByteBuffer tableName, ByteBuffer row, long timestamp, Map attributes) throws IOError { try { - HTable table = getTable(tableName); + Table table = getTable(tableName); Delete delete = new Delete(getBytes(row), timestamp); addAttributes(delete, attributes); table.delete(delete); } catch (IOException e) { LOG.warn(e.getMessage(), e); - throw new IOError(e.getMessage()); + throw new IOError(Throwables.getStackTraceAsString(e)); } } @@ -1164,40 +1166,44 @@ public class ThriftServerRunner implements Runnable { public void createTable(ByteBuffer in_tableName, List columnFamilies) throws IOError, IllegalArgument, AlreadyExists { - byte [] tableName = getBytes(in_tableName); + TableName tableName = getTableName(in_tableName); try { - if (getHBaseAdmin().tableExists(tableName)) { + if (getAdmin().tableExists(tableName)) { throw new AlreadyExists("table name already in use"); } - HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName)); + HTableDescriptor desc = new HTableDescriptor(tableName); for (ColumnDescriptor col : columnFamilies) { HColumnDescriptor colDesc = ThriftUtilities.colDescFromThrift(col); desc.addFamily(colDesc); } - getHBaseAdmin().createTable(desc); + getAdmin().createTable(desc); } catch (IOException e) { LOG.warn(e.getMessage(), e); - throw new IOError(e.getMessage()); + throw new IOError(Throwables.getStackTraceAsString(e)); } catch (IllegalArgumentException e) { LOG.warn(e.getMessage(), e); - throw new IllegalArgument(e.getMessage()); + throw new IllegalArgument(Throwables.getStackTraceAsString(e)); } } + private static TableName getTableName(ByteBuffer buffer) { + return TableName.valueOf(getBytes(buffer)); + } + @Override public void deleteTable(ByteBuffer in_tableName) throws IOError { - byte [] tableName = getBytes(in_tableName); + TableName tableName = getTableName(in_tableName); if (LOG.isDebugEnabled()) { - LOG.debug("deleteTable: table=" + Bytes.toString(tableName)); + LOG.debug("deleteTable: table=" + tableName); } try { - if (!getHBaseAdmin().tableExists(tableName)) { + if (!getAdmin().tableExists(tableName)) { throw new IOException("table does not exist"); } - getHBaseAdmin().deleteTable(tableName); + getAdmin().deleteTable(tableName); } catch (IOException e) { LOG.warn(e.getMessage(), e); - throw new IOError(e.getMessage()); + throw new IOError(Throwables.getStackTraceAsString(e)); } } @@ -1214,7 +1220,7 @@ public class ThriftServerRunner implements Runnable { List mutations, long timestamp, Map attributes) throws IOError, IllegalArgument { - HTable table = null; + Table table = null; try { table = getTable(tableName); Put put = new Put(getBytes(row), timestamp); @@ -1255,10 +1261,10 @@ public class ThriftServerRunner implements Runnable { table.put(put); } catch (IOException e) { LOG.warn(e.getMessage(), e); - throw new IOError(e.getMessage()); + throw new IOError(Throwables.getStackTraceAsString(e)); } catch (IllegalArgumentException e) { LOG.warn(e.getMessage(), e); - throw new IllegalArgument(e.getMessage()); + throw new IllegalArgument(Throwables.getStackTraceAsString(e)); } } @@ -1316,7 +1322,7 @@ public class ThriftServerRunner implements Runnable { puts.add(put); } - HTable table = null; + Table table = null; try { table = getTable(tableName); if (!puts.isEmpty()) @@ -1326,10 +1332,10 @@ public class ThriftServerRunner implements Runnable { } catch (IOException e) { LOG.warn(e.getMessage(), e); - throw new IOError(e.getMessage()); + throw new IOError(Throwables.getStackTraceAsString(e)); } catch (IllegalArgumentException e) { LOG.warn(e.getMessage(), e); - throw new IllegalArgument(e.getMessage()); + throw new IllegalArgument(Throwables.getStackTraceAsString(e)); } } @@ -1348,14 +1354,14 @@ public class ThriftServerRunner implements Runnable { protected long atomicIncrement(ByteBuffer tableName, ByteBuffer row, byte [] family, byte [] qualifier, long amount) throws IOError, IllegalArgument, TException { - HTable table; + Table table; try { table = getTable(tableName); return table.incrementColumnValue( getBytes(row), family, qualifier, amount); } catch (IOException e) { LOG.warn(e.getMessage(), e); - throw new IOError(e.getMessage()); + throw new IOError(Throwables.getStackTraceAsString(e)); } } @@ -1391,7 +1397,7 @@ public class ThriftServerRunner implements Runnable { } } catch (IOException e) { LOG.warn(e.getMessage(), e); - throw new IOError(e.getMessage()); + throw new IOError(Throwables.getStackTraceAsString(e)); } return ThriftUtilities.rowResultFromHBase(results, resultScannerWrapper.isColumnSorted()); } @@ -1406,7 +1412,7 @@ public class ThriftServerRunner implements Runnable { Map attributes) throws IOError { try { - HTable table = getTable(tableName); + Table table = getTable(tableName); Scan scan = new Scan(); addAttributes(scan, attributes); if (tScan.isSetStartRow()) { @@ -1445,7 +1451,7 @@ public class ThriftServerRunner implements Runnable { return addScanner(table.getScanner(scan), tScan.sortColumns); } catch (IOException e) { LOG.warn(e.getMessage(), e); - throw new IOError(e.getMessage()); + throw new IOError(Throwables.getStackTraceAsString(e)); } } @@ -1454,7 +1460,7 @@ public class ThriftServerRunner implements Runnable { List columns, Map attributes) throws IOError { try { - HTable table = getTable(tableName); + Table table = getTable(tableName); Scan scan = new Scan(getBytes(startRow)); addAttributes(scan, attributes); if(columns != null && columns.size() != 0) { @@ -1470,7 +1476,7 @@ public class ThriftServerRunner implements Runnable { return addScanner(table.getScanner(scan), false); } catch (IOException e) { LOG.warn(e.getMessage(), e); - throw new IOError(e.getMessage()); + throw new IOError(Throwables.getStackTraceAsString(e)); } } @@ -1480,7 +1486,7 @@ public class ThriftServerRunner implements Runnable { Map attributes) throws IOError, TException { try { - HTable table = getTable(tableName); + Table table = getTable(tableName); Scan scan = new Scan(getBytes(startRow), getBytes(stopRow)); addAttributes(scan, attributes); if(columns != null && columns.size() != 0) { @@ -1496,7 +1502,7 @@ public class ThriftServerRunner implements Runnable { return addScanner(table.getScanner(scan), false); } catch (IOException e) { LOG.warn(e.getMessage(), e); - throw new IOError(e.getMessage()); + throw new IOError(Throwables.getStackTraceAsString(e)); } } @@ -1507,7 +1513,7 @@ public class ThriftServerRunner implements Runnable { Map attributes) throws IOError, TException { try { - HTable table = getTable(tableName); + Table table = getTable(tableName); Scan scan = new Scan(getBytes(startAndPrefix)); addAttributes(scan, attributes); Filter f = new WhileMatchFilter( @@ -1526,7 +1532,7 @@ public class ThriftServerRunner implements Runnable { return addScanner(table.getScanner(scan), false); } catch (IOException e) { LOG.warn(e.getMessage(), e); - throw new IOError(e.getMessage()); + throw new IOError(Throwables.getStackTraceAsString(e)); } } @@ -1535,7 +1541,7 @@ public class ThriftServerRunner implements Runnable { List columns, long timestamp, Map attributes) throws IOError, TException { try { - HTable table = getTable(tableName); + Table table = getTable(tableName); Scan scan = new Scan(getBytes(startRow)); addAttributes(scan, attributes); scan.setTimeRange(0, timestamp); @@ -1552,7 +1558,7 @@ public class ThriftServerRunner implements Runnable { return addScanner(table.getScanner(scan), false); } catch (IOException e) { LOG.warn(e.getMessage(), e); - throw new IOError(e.getMessage()); + throw new IOError(Throwables.getStackTraceAsString(e)); } } @@ -1562,7 +1568,7 @@ public class ThriftServerRunner implements Runnable { Map attributes) throws IOError, TException { try { - HTable table = getTable(tableName); + Table table = getTable(tableName); Scan scan = new Scan(getBytes(startRow), getBytes(stopRow)); addAttributes(scan, attributes); scan.setTimeRange(0, timestamp); @@ -1580,7 +1586,7 @@ public class ThriftServerRunner implements Runnable { return addScanner(table.getScanner(scan), false); } catch (IOException e) { LOG.warn(e.getMessage(), e); - throw new IOError(e.getMessage()); + throw new IOError(Throwables.getStackTraceAsString(e)); } } @@ -1591,7 +1597,7 @@ public class ThriftServerRunner implements Runnable { TreeMap columns = new TreeMap(); - HTable table = getTable(tableName); + Table table = getTable(tableName); HTableDescriptor desc = table.getTableDescriptor(); for (HColumnDescriptor e : desc.getFamilies()) { @@ -1601,7 +1607,7 @@ public class ThriftServerRunner implements Runnable { return columns; } catch (IOException e) { LOG.warn(e.getMessage(), e); - throw new IOError(e.getMessage()); + throw new IOError(Throwables.getStackTraceAsString(e)); } } @@ -1610,22 +1616,20 @@ public class ThriftServerRunner implements Runnable { public List getRowOrBefore(ByteBuffer tableName, ByteBuffer row, ByteBuffer family) throws IOError { try { - HTable table = getTable(getBytes(tableName)); - Result result = table.getRowOrBefore(getBytes(row), getBytes(family)); + Result result = getRowOrBefore(getBytes(tableName), getBytes(row), getBytes(family)); return ThriftUtilities.cellFromHBase(result.rawCells()); } catch (IOException e) { LOG.warn(e.getMessage(), e); - throw new IOError(e.getMessage()); + throw new IOError(Throwables.getStackTraceAsString(e)); } } @Override public TRegionInfo getRegionInfo(ByteBuffer searchRow) throws IOError { try { - HTable table = getTable(TableName.META_TABLE_NAME.getName()); byte[] row = getBytes(searchRow); - Result startRowResult = table.getRowOrBefore( - row, HConstants.CATALOG_FAMILY); + Result startRowResult = + getRowOrBefore(TableName.META_TABLE_NAME.getName(), row, HConstants.CATALOG_FAMILY); if (startRowResult == null) { throw new IOException("Cannot find row in "+ TableName.META_TABLE_NAME+", row=" @@ -1655,7 +1659,19 @@ public class ThriftServerRunner implements Runnable { return region; } catch (IOException e) { LOG.warn(e.getMessage(), e); - throw new IOError(e.getMessage()); + throw new IOError(Throwables.getStackTraceAsString(e)); + } + } + + private Result getRowOrBefore(byte[] tableName, byte[] row, byte[] family) throws IOException { + Scan scan = new Scan(row); + scan.setReversed(true); + scan.addFamily(family); + scan.setStartRow(row); + + Table table = getTable(tableName); + try (ResultScanner scanner = table.getScanner(scan)) { + return scanner.next(); } } @@ -1676,12 +1692,12 @@ public class ThriftServerRunner implements Runnable { } try { - HTable table = getTable(tincrement.getTable()); + Table table = getTable(tincrement.getTable()); Increment inc = ThriftUtilities.incrementFromThrift(tincrement); table.increment(inc); } catch (IOException e) { LOG.warn(e.getMessage(), e); - throw new IOError(e.getMessage()); + throw new IOError(Throwables.getStackTraceAsString(e)); } } @@ -1703,13 +1719,13 @@ public class ThriftServerRunner implements Runnable { } try { - HTable table = getTable(tappend.getTable()); + Table table = getTable(tappend.getTable()); Append append = ThriftUtilities.appendFromThrift(tappend); Result result = table.append(append); return ThriftUtilities.cellFromHBase(result.rawCells()); } catch (IOException e) { LOG.warn(e.getMessage(), e); - throw new IOError(e.getMessage()); + throw new IOError(Throwables.getStackTraceAsString(e)); } } @@ -1730,10 +1746,10 @@ public class ThriftServerRunner implements Runnable { put.setDurability(mput.writeToWAL ? Durability.SYNC_WAL : Durability.SKIP_WAL); } catch (IllegalArgumentException e) { LOG.warn(e.getMessage(), e); - throw new IllegalArgument(e.getMessage()); + throw new IllegalArgument(Throwables.getStackTraceAsString(e)); } - HTable table = null; + Table table = null; try { table = getTable(tableName); byte[][] famAndQf = KeyValue.parseColumn(getBytes(column)); @@ -1741,10 +1757,10 @@ public class ThriftServerRunner implements Runnable { value != null ? getBytes(value) : HConstants.EMPTY_BYTE_ARRAY, put); } catch (IOException e) { LOG.warn(e.getMessage(), e); - throw new IOError(e.getMessage()); + throw new IOError(Throwables.getStackTraceAsString(e)); } catch (IllegalArgumentException e) { LOG.warn(e.getMessage(), e); - throw new IllegalArgument(e.getMessage()); + throw new IllegalArgument(Throwables.getStackTraceAsString(e)); } } } diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/HTablePool.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/HTablePool.java deleted file mode 100644 index e9c9e1fec8f..00000000000 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/HTablePool.java +++ /dev/null @@ -1,696 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.thrift2; - -import java.io.Closeable; -import java.io.IOException; -import java.util.Collection; -import java.util.List; -import java.util.Map; - -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.client.Append; -import org.apache.hadoop.hbase.client.Delete; -import org.apache.hadoop.hbase.client.Durability; -import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.HTableFactory; -import org.apache.hadoop.hbase.client.HTableInterface; -import org.apache.hadoop.hbase.client.HTableInterfaceFactory; -import org.apache.hadoop.hbase.client.Increment; -import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.client.ResultScanner; -import org.apache.hadoop.hbase.client.Row; -import org.apache.hadoop.hbase.client.RowMutations; -import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.client.coprocessor.Batch; -import org.apache.hadoop.hbase.client.coprocessor.Batch.Callback; -import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; -import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.PoolMap; -import org.apache.hadoop.hbase.util.PoolMap.PoolType; - -import com.google.protobuf.Descriptors; -import com.google.protobuf.Message; -import com.google.protobuf.Service; -import com.google.protobuf.ServiceException; - -/** - * A simple pool of HTable instances. - * - * Each HTablePool acts as a pool for all tables. To use, instantiate an - * HTablePool and use {@link #getTable(String)} to get an HTable from the pool. - * - * This method is not needed anymore, clients should call - * HTableInterface.close() rather than returning the tables to the pool - * - * Once you are done with it, close your instance of - * {@link org.apache.hadoop.hbase.client.HTableInterface} - * by calling {@link org.apache.hadoop.hbase.client.HTableInterface#close()} rather than returning - * the tablesto the pool with (deprecated) - * {@link #putTable(org.apache.hadoop.hbase.client.HTableInterface)}. - * - *

      - * A pool can be created with a maxSize which defines the most HTable - * references that will ever be retained for each table. Otherwise the default - * is {@link Integer#MAX_VALUE}. - * - *

      - * Pool will manage its own connections to the cluster. See - * {@link org.apache.hadoop.hbase.client.HConnectionManager}. - * Was @deprecated made @InterfaceAudience.private as of 0.98.1. - * See {@link org.apache.hadoop.hbase.client.HConnection#getTable(String)}, - * Moved to thrift2 module for 2.0 - */ -@InterfaceAudience.Private -public class HTablePool implements Closeable { - private final PoolMap tables; - private final int maxSize; - private final PoolType poolType; - private final Configuration config; - private final HTableInterfaceFactory tableFactory; - - /** - * Default Constructor. Default HBaseConfiguration and no limit on pool size. - */ - public HTablePool() { - this(HBaseConfiguration.create(), Integer.MAX_VALUE); - } - - /** - * Constructor to set maximum versions and use the specified configuration. - * - * @param config - * configuration - * @param maxSize - * maximum number of references to keep for each table - */ - public HTablePool(final Configuration config, final int maxSize) { - this(config, maxSize, null, null); - } - - /** - * Constructor to set maximum versions and use the specified configuration and - * table factory. - * - * @param config - * configuration - * @param maxSize - * maximum number of references to keep for each table - * @param tableFactory - * table factory - */ - public HTablePool(final Configuration config, final int maxSize, - final HTableInterfaceFactory tableFactory) { - this(config, maxSize, tableFactory, PoolType.Reusable); - } - - /** - * Constructor to set maximum versions and use the specified configuration and - * pool type. - * - * @param config - * configuration - * @param maxSize - * maximum number of references to keep for each table - * @param poolType - * pool type which is one of {@link PoolType#Reusable} or - * {@link PoolType#ThreadLocal} - */ - public HTablePool(final Configuration config, final int maxSize, - final PoolType poolType) { - this(config, maxSize, null, poolType); - } - - /** - * Constructor to set maximum versions and use the specified configuration, - * table factory and pool type. The HTablePool supports the - * {@link PoolType#Reusable} and {@link PoolType#ThreadLocal}. If the pool - * type is null or not one of those two values, then it will default to - * {@link PoolType#Reusable}. - * - * @param config - * configuration - * @param maxSize - * maximum number of references to keep for each table - * @param tableFactory - * table factory - * @param poolType - * pool type which is one of {@link PoolType#Reusable} or - * {@link PoolType#ThreadLocal} - */ - public HTablePool(final Configuration config, final int maxSize, - final HTableInterfaceFactory tableFactory, PoolType poolType) { - // Make a new configuration instance so I can safely cleanup when - // done with the pool. - this.config = config == null ? HBaseConfiguration.create() : config; - this.maxSize = maxSize; - this.tableFactory = tableFactory == null ? new HTableFactory() - : tableFactory; - if (poolType == null) { - this.poolType = PoolType.Reusable; - } else { - switch (poolType) { - case Reusable: - case ThreadLocal: - this.poolType = poolType; - break; - default: - this.poolType = PoolType.Reusable; - break; - } - } - this.tables = new PoolMap(this.poolType, - this.maxSize); - } - - /** - * Get a reference to the specified table from the pool. - *

      - *

      - * - * @param tableName - * table name - * @return a reference to the specified table - * @throws RuntimeException - * if there is a problem instantiating the HTable - */ - public HTableInterface getTable(String tableName) { - // call the old getTable implementation renamed to findOrCreateTable - HTableInterface table = findOrCreateTable(tableName); - // return a proxy table so when user closes the proxy, the actual table - // will be returned to the pool - return new PooledHTable(table); - } - - /** - * Get a reference to the specified table from the pool. - *

      - * - * Create a new one if one is not available. - * - * @param tableName - * table name - * @return a reference to the specified table - * @throws RuntimeException - * if there is a problem instantiating the HTable - */ - private HTableInterface findOrCreateTable(String tableName) { - HTableInterface table = tables.get(tableName); - if (table == null) { - table = createHTable(tableName); - } - return table; - } - - /** - * Get a reference to the specified table from the pool. - *

      - * - * Create a new one if one is not available. - * - * @param tableName - * table name - * @return a reference to the specified table - * @throws RuntimeException if there is a problem instantiating the HTable - */ - public HTableInterface getTable(byte[] tableName) { - return getTable(Bytes.toString(tableName)); - } - - /** - * This method is not needed anymore, clients should call - * HTableInterface.close() rather than returning the tables to the pool - * - * @param table - * the proxy table user got from pool - * @deprecated - */ - @Deprecated - public void putTable(HTableInterface table) throws IOException { - // we need to be sure nobody puts a proxy implementation in the pool - // but if the client code is not updated - // and it will continue to call putTable() instead of calling close() - // then we need to return the wrapped table to the pool instead of the - // proxy - // table - if (table instanceof PooledHTable) { - returnTable(((PooledHTable) table).getWrappedTable()); - } else { - // normally this should not happen if clients pass back the same - // table - // object they got from the pool - // but if it happens then it's better to reject it - throw new IllegalArgumentException("not a pooled table: " + table); - } - } - - /** - * Puts the specified HTable back into the pool. - *

      - * - * If the pool already contains maxSize references to the table, then - * the table instance gets closed after flushing buffered edits. - * - * @param table - * table - */ - private void returnTable(HTableInterface table) throws IOException { - // this is the old putTable method renamed and made private - String tableName = Bytes.toString(table.getTableName()); - if (tables.size(tableName) >= maxSize) { - // release table instance since we're not reusing it - this.tables.removeValue(tableName, table); - this.tableFactory.releaseHTableInterface(table); - return; - } - tables.put(tableName, table); - } - - protected HTableInterface createHTable(String tableName) { - return this.tableFactory.createHTableInterface(config, - Bytes.toBytes(tableName)); - } - - /** - * Closes all the HTable instances , belonging to the given table, in the - * table pool. - *

      - * Note: this is a 'shutdown' of the given table pool and different from - * {@link #putTable(HTableInterface)}, that is used to return the table - * instance to the pool for future re-use. - * - * @param tableName - */ - public void closeTablePool(final String tableName) throws IOException { - Collection tables = this.tables.values(tableName); - if (tables != null) { - for (HTableInterface table : tables) { - this.tableFactory.releaseHTableInterface(table); - } - } - this.tables.remove(tableName); - } - - /** - * See {@link #closeTablePool(String)}. - * - * @param tableName - */ - public void closeTablePool(final byte[] tableName) throws IOException { - closeTablePool(Bytes.toString(tableName)); - } - - /** - * Closes all the HTable instances , belonging to all tables in the table - * pool. - *

      - * Note: this is a 'shutdown' of all the table pools. - */ - public void close() throws IOException { - for (String tableName : tables.keySet()) { - closeTablePool(tableName); - } - this.tables.clear(); - } - - public int getCurrentPoolSize(String tableName) { - return tables.size(tableName); - } - - /** - * A proxy class that implements HTableInterface.close method to return the - * wrapped table back to the table pool - * - */ - class PooledHTable implements HTableInterface { - - private boolean open = false; - - private HTableInterface table; // actual table implementation - - public PooledHTable(HTableInterface table) { - this.table = table; - this.open = true; - } - - @Override - public byte[] getTableName() { - checkState(); - return table.getTableName(); - } - - @Override - public TableName getName() { - return table.getName(); - } - - @Override - public Configuration getConfiguration() { - checkState(); - return table.getConfiguration(); - } - - @Override - public HTableDescriptor getTableDescriptor() throws IOException { - checkState(); - return table.getTableDescriptor(); - } - - @Override - public boolean exists(Get get) throws IOException { - checkState(); - return table.exists(get); - } - - @Override - public boolean[] existsAll(List gets) throws IOException { - checkState(); - return table.existsAll(gets); - } - - @Override - public Boolean[] exists(List gets) throws IOException { - checkState(); - return table.exists(gets); - } - - @Override - public void batch(List actions, Object[] results) throws IOException, - InterruptedException { - checkState(); - table.batch(actions, results); - } - - /** - * {@inheritDoc} - * @deprecated If any exception is thrown by one of the actions, there is no way to - * retrieve the partially executed results. Use {@link #batch(List, Object[])} instead. - */ - @Deprecated - @Override - public Object[] batch(List actions) throws IOException, - InterruptedException { - checkState(); - return table.batch(actions); - } - - @Override - public Result get(Get get) throws IOException { - checkState(); - return table.get(get); - } - - @Override - public Result[] get(List gets) throws IOException { - checkState(); - return table.get(gets); - } - - @Override - @SuppressWarnings("deprecation") - @Deprecated - public Result getRowOrBefore(byte[] row, byte[] family) throws IOException { - checkState(); - return table.getRowOrBefore(row, family); - } - - @Override - public ResultScanner getScanner(Scan scan) throws IOException { - checkState(); - return table.getScanner(scan); - } - - @Override - public ResultScanner getScanner(byte[] family) throws IOException { - checkState(); - return table.getScanner(family); - } - - @Override - public ResultScanner getScanner(byte[] family, byte[] qualifier) - throws IOException { - checkState(); - return table.getScanner(family, qualifier); - } - - @Override - public void put(Put put) throws IOException { - checkState(); - table.put(put); - } - - @Override - public void put(List puts) throws IOException { - checkState(); - table.put(puts); - } - - @Override - public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, - byte[] value, Put put) throws IOException { - checkState(); - return table.checkAndPut(row, family, qualifier, value, put); - } - - @Override - public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, - CompareOp compareOp, byte[] value, Put put) throws IOException { - checkState(); - return table.checkAndPut(row, family, qualifier, compareOp, value, put); - } - - @Override - public void delete(Delete delete) throws IOException { - checkState(); - table.delete(delete); - } - - @Override - public void delete(List deletes) throws IOException { - checkState(); - table.delete(deletes); - } - - @Override - public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, - byte[] value, Delete delete) throws IOException { - checkState(); - return table.checkAndDelete(row, family, qualifier, value, delete); - } - - @Override - public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, - CompareOp compareOp, byte[] value, Delete delete) throws IOException { - checkState(); - return table.checkAndDelete(row, family, qualifier, compareOp, value, delete); - } - - @Override - public Result increment(Increment increment) throws IOException { - checkState(); - return table.increment(increment); - } - - @Override - public long incrementColumnValue(byte[] row, byte[] family, - byte[] qualifier, long amount) throws IOException { - checkState(); - return table.incrementColumnValue(row, family, qualifier, amount); - } - - @Override - public long incrementColumnValue(byte[] row, byte[] family, - byte[] qualifier, long amount, Durability durability) throws IOException { - checkState(); - return table.incrementColumnValue(row, family, qualifier, amount, - durability); - } - - @Override - public boolean isAutoFlush() { - checkState(); - return table.isAutoFlush(); - } - - @Override - public void flushCommits() throws IOException { - checkState(); - table.flushCommits(); - } - - /** - * Returns the actual table back to the pool - * - * @throws IOException - */ - public void close() throws IOException { - checkState(); - open = false; - returnTable(table); - } - - @Override - public CoprocessorRpcChannel coprocessorService(byte[] row) { - checkState(); - return table.coprocessorService(row); - } - - @Override - public Map coprocessorService(Class service, - byte[] startKey, byte[] endKey, Batch.Call callable) - throws ServiceException, Throwable { - checkState(); - return table.coprocessorService(service, startKey, endKey, callable); - } - - @Override - public void coprocessorService(Class service, - byte[] startKey, byte[] endKey, Batch.Call callable, Callback callback) - throws ServiceException, Throwable { - checkState(); - table.coprocessorService(service, startKey, endKey, callable, callback); - } - - @Override - public String toString() { - return "PooledHTable{" + ", table=" + table + '}'; - } - - /** - * Expose the wrapped HTable to tests in the same package - * - * @return wrapped htable - */ - HTableInterface getWrappedTable() { - return table; - } - - @Override - public void batchCallback(List actions, - Object[] results, Callback callback) throws IOException, - InterruptedException { - checkState(); - table.batchCallback(actions, results, callback); - } - - /** - * {@inheritDoc} - * @deprecated If any exception is thrown by one of the actions, there is no way to - * retrieve the partially executed results. Use - * {@link #batchCallback(List, Object[], org.apache.hadoop.hbase.client.coprocessor.Batch.Callback)} - * instead. - */ - @Deprecated - @Override - public Object[] batchCallback(List actions, - Callback callback) throws IOException, InterruptedException { - checkState(); - return table.batchCallback(actions, callback); - } - - @Override - public void mutateRow(RowMutations rm) throws IOException { - checkState(); - table.mutateRow(rm); - } - - @Override - public Result append(Append append) throws IOException { - checkState(); - return table.append(append); - } - - @Override - public void setAutoFlush(boolean autoFlush) { - checkState(); - table.setAutoFlush(autoFlush, autoFlush); - } - - @Override - public void setAutoFlush(boolean autoFlush, boolean clearBufferOnFail) { - checkState(); - table.setAutoFlush(autoFlush, clearBufferOnFail); - } - - @Override - public void setAutoFlushTo(boolean autoFlush) { - table.setAutoFlushTo(autoFlush); - } - - @Override - public long getWriteBufferSize() { - checkState(); - return table.getWriteBufferSize(); - } - - @Override - public void setWriteBufferSize(long writeBufferSize) throws IOException { - checkState(); - table.setWriteBufferSize(writeBufferSize); - } - - boolean isOpen() { - return open; - } - - private void checkState() { - if (!isOpen()) { - throw new IllegalStateException("Table=" + table.getName() - + " already closed"); - } - } - - @Override - public long incrementColumnValue(byte[] row, byte[] family, - byte[] qualifier, long amount, boolean writeToWAL) throws IOException { - return table.incrementColumnValue(row, family, qualifier, amount, writeToWAL); - } - - @Override - public Map batchCoprocessorService( - Descriptors.MethodDescriptor method, Message request, - byte[] startKey, byte[] endKey, R responsePrototype) throws ServiceException, Throwable { - checkState(); - return table.batchCoprocessorService(method, request, startKey, endKey, - responsePrototype); - } - - @Override - public void batchCoprocessorService( - Descriptors.MethodDescriptor method, Message request, - byte[] startKey, byte[] endKey, R responsePrototype, Callback callback) - throws ServiceException, Throwable { - checkState(); - table.batchCoprocessorService(method, request, startKey, endKey, responsePrototype, callback); - } - - @Override - public boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, CompareOp compareOp, - byte[] value, RowMutations mutation) throws IOException { - checkState(); - return table.checkAndMutate(row, family, qualifier, compareOp, value, mutation); - } - } -} diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftHBaseServiceHandler.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftHBaseServiceHandler.java index 41305a6b2d3..5031fb15b56 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftHBaseServiceHandler.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftHBaseServiceHandler.java @@ -18,7 +18,18 @@ */ package org.apache.hadoop.hbase.thrift2; -import static org.apache.hadoop.hbase.thrift2.ThriftUtilities.*; +import static org.apache.hadoop.hbase.thrift2.ThriftUtilities.appendFromThrift; +import static org.apache.hadoop.hbase.thrift2.ThriftUtilities.deleteFromThrift; +import static org.apache.hadoop.hbase.thrift2.ThriftUtilities.deletesFromThrift; +import static org.apache.hadoop.hbase.thrift2.ThriftUtilities.getFromThrift; +import static org.apache.hadoop.hbase.thrift2.ThriftUtilities.getsFromThrift; +import static org.apache.hadoop.hbase.thrift2.ThriftUtilities.incrementFromThrift; +import static org.apache.hadoop.hbase.thrift2.ThriftUtilities.putFromThrift; +import static org.apache.hadoop.hbase.thrift2.ThriftUtilities.putsFromThrift; +import static org.apache.hadoop.hbase.thrift2.ThriftUtilities.resultFromHBase; +import static org.apache.hadoop.hbase.thrift2.ThriftUtilities.resultsFromHBase; +import static org.apache.hadoop.hbase.thrift2.ThriftUtilities.rowMutationsFromThrift; +import static org.apache.hadoop.hbase.thrift2.ThriftUtilities.scanFromThrift; import static org.apache.thrift.TBaseHelper.byteBufferToByteArray; import java.io.IOException; @@ -30,30 +41,32 @@ import java.nio.ByteBuffer; import java.util.Collections; import java.util.List; import java.util.Map; -import java.util.concurrent.Callable; import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.client.HTableFactory; -import org.apache.hadoop.hbase.client.HTableInterface; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.security.UserProvider; import org.apache.hadoop.hbase.thrift.ThriftMetrics; -import org.apache.hadoop.hbase.thrift2.generated.*; +import org.apache.hadoop.hbase.thrift2.generated.TAppend; +import org.apache.hadoop.hbase.thrift2.generated.TDelete; +import org.apache.hadoop.hbase.thrift2.generated.TGet; +import org.apache.hadoop.hbase.thrift2.generated.THBaseService; +import org.apache.hadoop.hbase.thrift2.generated.TIOError; +import org.apache.hadoop.hbase.thrift2.generated.TIllegalArgument; +import org.apache.hadoop.hbase.thrift2.generated.TIncrement; +import org.apache.hadoop.hbase.thrift2.generated.TPut; +import org.apache.hadoop.hbase.thrift2.generated.TResult; +import org.apache.hadoop.hbase.thrift2.generated.TRowMutations; +import org.apache.hadoop.hbase.thrift2.generated.TScan; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ConnectionCache; import org.apache.thrift.TException; -import com.google.common.cache.Cache; -import com.google.common.cache.CacheBuilder; - /** * This class is a glue object that connects Thrift RPC calls to the HBase client API primarily * defined in the HTableInterface. @@ -63,8 +76,6 @@ import com.google.common.cache.CacheBuilder; public class ThriftHBaseServiceHandler implements THBaseService.Iface { // TODO: Size of pool configuraple - private final Cache htablePools; - private final Callable htablePoolCreater; private static final Log LOG = LogFactory.getLog(ThriftHBaseServiceHandler.class); // nextScannerId and scannerMap are used to manage scanner state @@ -74,8 +85,6 @@ public class ThriftHBaseServiceHandler implements THBaseService.Iface { new ConcurrentHashMap(); private final ConnectionCache connectionCache; - private final HTableFactory tableFactory; - private final int maxPoolSize; static final String CLEANUP_INTERVAL = "hbase.thrift.connection.cleanup-interval"; static final String MAX_IDLETIME = "hbase.thrift.connection.max-idletime"; @@ -86,7 +95,7 @@ public class ThriftHBaseServiceHandler implements THBaseService.Iface { new Class[] { THBaseService.Iface.class }, new THBaseServiceMetricsProxy(handler, metrics)); } - private static class THBaseServiceMetricsProxy implements InvocationHandler { + private static final class THBaseServiceMetricsProxy implements InvocationHandler { private final THBaseService.Iface handler; private final ThriftMetrics metrics; @@ -122,34 +131,13 @@ public class ThriftHBaseServiceHandler implements THBaseService.Iface { int maxIdleTime = conf.getInt(MAX_IDLETIME, 10 * 60 * 1000); connectionCache = new ConnectionCache( conf, userProvider, cleanInterval, maxIdleTime); - tableFactory = new HTableFactory() { - @Override - public HTableInterface createHTableInterface(Configuration config, - byte[] tableName) { - try { - return connectionCache.getTable(Bytes.toString(tableName)); - } catch (IOException ioe) { - throw new RuntimeException(ioe); - } - } - }; - htablePools = CacheBuilder.newBuilder().expireAfterAccess( - maxIdleTime, TimeUnit.MILLISECONDS).softValues().concurrencyLevel(4).build(); - maxPoolSize = conf.getInt("hbase.thrift.htablepool.size.max", 1000); - htablePoolCreater = new Callable() { - public HTablePool call() { - return new HTablePool(conf, maxPoolSize, tableFactory); - } - }; } private Table getTable(ByteBuffer tableName) { - String currentUser = connectionCache.getEffectiveUser(); try { - HTablePool htablePool = htablePools.get(currentUser, htablePoolCreater); - return htablePool.getTable(byteBufferToByteArray(tableName)); - } catch (ExecutionException ee) { - throw new RuntimeException(ee); + return connectionCache.getTable(Bytes.toString(byteBufferToByteArray(tableName))); + } catch (IOException ie) { + throw new RuntimeException(ie); } } diff --git a/hbase-thrift/src/main/resources/org/apache/hadoop/hbase/thrift2/hbase.thrift b/hbase-thrift/src/main/resources/org/apache/hadoop/hbase/thrift2/hbase.thrift index 7c6d6a28e2e..bc790e70421 100644 --- a/hbase-thrift/src/main/resources/org/apache/hadoop/hbase/thrift2/hbase.thrift +++ b/hbase-thrift/src/main/resources/org/apache/hadoop/hbase/thrift2/hbase.thrift @@ -230,8 +230,8 @@ struct TScan { * Atomic mutation for the specified row. It can be either Put or Delete. */ union TMutation { - 1: optional TPut put, - 2: optional TDelete deleteSingle, + 1: TPut put, + 2: TDelete deleteSingle, } /** @@ -275,7 +275,7 @@ service THBaseService { 1: required binary table, /** the TGet to check for */ - 2: required TGet get + 2: required TGet tget ) throws (1:TIOError io) /** @@ -291,7 +291,7 @@ service THBaseService { 1: required binary table, /** the TGet to fetch */ - 2: required TGet get + 2: required TGet tget ) throws (1: TIOError io) /** @@ -310,7 +310,7 @@ service THBaseService { /** a list of TGets to fetch, the Result list will have the Results at corresponding positions or null if there was an error */ - 2: required list gets + 2: required list tgets ) throws (1: TIOError io) /** @@ -321,7 +321,7 @@ service THBaseService { 1: required binary table, /** the TPut to put */ - 2: required TPut put + 2: required TPut tput ) throws (1: TIOError io) /** @@ -349,7 +349,7 @@ service THBaseService { 5: binary value, /** the TPut to put if the check succeeds */ - 6: required TPut put + 6: required TPut tput ) throws (1: TIOError io) /** @@ -360,7 +360,7 @@ service THBaseService { 1: required binary table, /** a list of TPuts to commit */ - 2: required list puts + 2: required list tputs ) throws (1: TIOError io) /** @@ -374,7 +374,7 @@ service THBaseService { 1: required binary table, /** the TDelete to delete */ - 2: required TDelete deleteSingle + 2: required TDelete tdelete ) throws (1: TIOError io) /** @@ -389,7 +389,7 @@ service THBaseService { 1: required binary table, /** list of TDeletes to delete */ - 2: required list deletes + 2: required list tdeletes ) throws (1: TIOError io) /** @@ -417,7 +417,7 @@ service THBaseService { 5: binary value, /** the TDelete to execute if the check succeeds */ - 6: required TDelete deleteSingle + 6: required TDelete tdelete ) throws (1: TIOError io) TResult increment( @@ -425,7 +425,7 @@ service THBaseService { 1: required binary table, /** the TIncrement to increment */ - 2: required TIncrement increment + 2: required TIncrement tincrement ) throws (1: TIOError io) TResult append( @@ -433,7 +433,7 @@ service THBaseService { 1: required binary table, /** the TAppend to append */ - 2: required TAppend append + 2: required TAppend tappend ) throws (1: TIOError io) /** @@ -446,7 +446,7 @@ service THBaseService { 1: required binary table, /** the scan object to get a Scanner for */ - 2: required TScan scan, + 2: required TScan tscan, ) throws (1: TIOError io) /** @@ -490,7 +490,7 @@ service THBaseService { 1: required binary table, /** mutations to apply */ - 2: required TRowMutations rowMutations + 2: required TRowMutations trowMutations ) throws (1: TIOError io) /** @@ -504,7 +504,7 @@ service THBaseService { 1: required binary table, /** the scan object to get a Scanner for */ - 2: required TScan scan, + 2: required TScan tscan, /** number of rows to return */ 3: i32 numRows = 1 diff --git a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestHTablePool.java b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestHTablePool.java deleted file mode 100644 index 101a7cf4719..00000000000 --- a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestHTablePool.java +++ /dev/null @@ -1,366 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.thrift2; - -import java.io.IOException; - -import org.apache.hadoop.hbase.*; -import org.apache.hadoop.hbase.client.HBaseAdmin; -import org.apache.hadoop.hbase.client.HTable; -import org.apache.hadoop.hbase.client.HTableInterface; -import org.apache.hadoop.hbase.client.Table; -import org.apache.hadoop.hbase.testclassification.ClientTests; -import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.PoolMap.PoolType; -import org.junit.*; -import org.junit.experimental.categories.Category; -import org.junit.runner.RunWith; -import org.junit.runners.Suite; - -/** - * Tests HTablePool. - */ -@RunWith(Suite.class) -@Suite.SuiteClasses({TestHTablePool.TestHTableReusablePool.class, TestHTablePool.TestHTableThreadLocalPool.class}) -@Category({ClientTests.class, MediumTests.class}) -public class TestHTablePool { - private static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - private final static String TABLENAME = "TestHTablePool"; - - public abstract static class TestHTablePoolType { - - @BeforeClass - public static void setUpBeforeClass() throws Exception { - TEST_UTIL.startMiniCluster(1); - TEST_UTIL.createTable(TableName.valueOf(TABLENAME), HConstants.CATALOG_FAMILY); - } - - @AfterClass - public static void tearDownAfterClass() throws Exception { - TEST_UTIL.shutdownMiniCluster(); - } - - protected abstract PoolType getPoolType(); - - @Test - public void testTableWithStringName() throws Exception { - HTablePool pool = new HTablePool(TEST_UTIL.getConfiguration(), - Integer.MAX_VALUE, getPoolType()); - String tableName = TABLENAME; - - // Request a table from an empty pool - Table table = pool.getTable(tableName); - Assert.assertNotNull(table); - - // Close table (returns table to the pool) - table.close(); - - // Request a table of the same name - Table sameTable = pool.getTable(tableName); - Assert.assertSame( - ((HTablePool.PooledHTable) table).getWrappedTable(), - ((HTablePool.PooledHTable) sameTable).getWrappedTable()); - } - - @Test - public void testTableWithByteArrayName() throws IOException { - HTablePool pool = new HTablePool(TEST_UTIL.getConfiguration(), - Integer.MAX_VALUE, getPoolType()); - - // Request a table from an empty pool - Table table = pool.getTable(TABLENAME); - Assert.assertNotNull(table); - - // Close table (returns table to the pool) - table.close(); - - // Request a table of the same name - Table sameTable = pool.getTable(TABLENAME); - Assert.assertSame( - ((HTablePool.PooledHTable) table).getWrappedTable(), - ((HTablePool.PooledHTable) sameTable).getWrappedTable()); - } - - @Test - public void testTablesWithDifferentNames() throws IOException { - HTablePool pool = new HTablePool(TEST_UTIL.getConfiguration(), - Integer.MAX_VALUE, getPoolType()); - // We add the class to the table name as the HBase cluster is reused - // during the tests: this gives naming unicity. - byte[] otherTable = Bytes.toBytes( - "OtherTable_" + getClass().getSimpleName() - ); - TEST_UTIL.createTable(otherTable, HConstants.CATALOG_FAMILY); - - // Request a table from an empty pool - Table table1 = pool.getTable(TABLENAME); - Table table2 = pool.getTable(otherTable); - Assert.assertNotNull(table2); - - // Close tables (returns tables to the pool) - table1.close(); - table2.close(); - - // Request tables of the same names - Table sameTable1 = pool.getTable(TABLENAME); - Table sameTable2 = pool.getTable(otherTable); - Assert.assertSame( - ((HTablePool.PooledHTable) table1).getWrappedTable(), - ((HTablePool.PooledHTable) sameTable1).getWrappedTable()); - Assert.assertSame( - ((HTablePool.PooledHTable) table2).getWrappedTable(), - ((HTablePool.PooledHTable) sameTable2).getWrappedTable()); - } - @Test - public void testProxyImplementationReturned() { - HTablePool pool = new HTablePool(TEST_UTIL.getConfiguration(), - Integer.MAX_VALUE); - String tableName = TABLENAME;// Request a table from - // an - // empty pool - Table table = pool.getTable(tableName); - - // Test if proxy implementation is returned - Assert.assertTrue(table instanceof HTablePool.PooledHTable); - } - - @Test - public void testDeprecatedUsagePattern() throws IOException { - HTablePool pool = new HTablePool(TEST_UTIL.getConfiguration(), - Integer.MAX_VALUE); - String tableName = TABLENAME;// Request a table from - // an - // empty pool - - // get table will return proxy implementation - HTableInterface table = pool.getTable(tableName); - - // put back the proxy implementation instead of closing it - pool.putTable(table); - - // Request a table of the same name - Table sameTable = pool.getTable(tableName); - - // test no proxy over proxy created - Assert.assertSame(((HTablePool.PooledHTable) table).getWrappedTable(), - ((HTablePool.PooledHTable) sameTable).getWrappedTable()); - } - - @Test - public void testReturnDifferentTable() throws IOException { - HTablePool pool = new HTablePool(TEST_UTIL.getConfiguration(), - Integer.MAX_VALUE); - String tableName = TABLENAME;// Request a table from - // an - // empty pool - - // get table will return proxy implementation - final Table table = pool.getTable(tableName); - HTableInterface alienTable = new HTable(TEST_UTIL.getConfiguration(), - TableName.valueOf(TABLENAME)) { - // implementation doesn't matter as long the table is not from - // pool - }; - try { - // put the wrong table in pool - pool.putTable(alienTable); - Assert.fail("alien table accepted in pool"); - } catch (IllegalArgumentException e) { - Assert.assertTrue("alien table rejected", true); - } - } - - @Test - public void testHTablePoolCloseTwice() throws Exception { - HTablePool pool = new HTablePool(TEST_UTIL.getConfiguration(), - Integer.MAX_VALUE, getPoolType()); - String tableName = TABLENAME; - - // Request a table from an empty pool - Table table = pool.getTable(tableName); - Assert.assertNotNull(table); - Assert.assertTrue(((HTablePool.PooledHTable) table).isOpen()); - // Close table (returns table to the pool) - table.close(); - // check if the table is closed - Assert.assertFalse(((HTablePool.PooledHTable) table).isOpen()); - try { - table.close(); - Assert.fail("Should not allow table to be closed twice"); - } catch (IllegalStateException ex) { - Assert.assertTrue("table cannot be closed twice", true); - } finally { - pool.close(); - } - } - } - - @Category({ClientTests.class, MediumTests.class}) - public static class TestHTableReusablePool extends TestHTablePoolType { - @Override - protected PoolType getPoolType() { - return PoolType.Reusable; - } - - @Test - public void testTableWithMaxSize() throws Exception { - HTablePool pool = new HTablePool(TEST_UTIL.getConfiguration(), 2, - getPoolType()); - - // Request tables from an empty pool - Table table1 = pool.getTable(TABLENAME); - Table table2 = pool.getTable(TABLENAME); - Table table3 = pool.getTable(TABLENAME); - - // Close tables (returns tables to the pool) - table1.close(); - table2.close(); - // The pool should reject this one since it is already full - table3.close(); - - // Request tables of the same name - Table sameTable1 = pool.getTable(TABLENAME); - Table sameTable2 = pool.getTable(TABLENAME); - Table sameTable3 = pool.getTable(TABLENAME); - Assert.assertSame( - ((HTablePool.PooledHTable) table1).getWrappedTable(), - ((HTablePool.PooledHTable) sameTable1).getWrappedTable()); - Assert.assertSame( - ((HTablePool.PooledHTable) table2).getWrappedTable(), - ((HTablePool.PooledHTable) sameTable2).getWrappedTable()); - Assert.assertNotSame( - ((HTablePool.PooledHTable) table3).getWrappedTable(), - ((HTablePool.PooledHTable) sameTable3).getWrappedTable()); - } - - @Test - public void testCloseTablePool() throws IOException { - HTablePool pool = new HTablePool(TEST_UTIL.getConfiguration(), 4, - getPoolType()); - HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); - - if (admin.tableExists(TABLENAME)) { - admin.disableTable(TABLENAME); - admin.deleteTable(TABLENAME); - } - - HTableDescriptor tableDescriptor = new HTableDescriptor(TableName.valueOf(TABLENAME)); - tableDescriptor.addFamily(new HColumnDescriptor("randomFamily")); - admin.createTable(tableDescriptor); - - // Request tables from an empty pool - Table[] tables = new Table[4]; - for (int i = 0; i < 4; ++i) { - tables[i] = pool.getTable(TABLENAME); - } - - pool.closeTablePool(TABLENAME); - - for (int i = 0; i < 4; ++i) { - tables[i].close(); - } - - Assert.assertEquals(4, - pool.getCurrentPoolSize(TABLENAME)); - - pool.closeTablePool(TABLENAME); - - Assert.assertEquals(0, - pool.getCurrentPoolSize(TABLENAME)); - } - } - - @Category({ClientTests.class, MediumTests.class}) - public static class TestHTableThreadLocalPool extends TestHTablePoolType { - @Override - protected PoolType getPoolType() { - return PoolType.ThreadLocal; - } - - @Test - public void testTableWithMaxSize() throws Exception { - HTablePool pool = new HTablePool(TEST_UTIL.getConfiguration(), 2, - getPoolType()); - - // Request tables from an empty pool - Table table1 = pool.getTable(TABLENAME); - Table table2 = pool.getTable(TABLENAME); - Table table3 = pool.getTable(TABLENAME); - - // Close tables (returns tables to the pool) - table1.close(); - table2.close(); - // The pool should not reject this one since the number of threads - // <= 2 - table3.close(); - - // Request tables of the same name - Table sameTable1 = pool.getTable(TABLENAME); - Table sameTable2 = pool.getTable(TABLENAME); - Table sameTable3 = pool.getTable(TABLENAME); - Assert.assertSame( - ((HTablePool.PooledHTable) table3).getWrappedTable(), - ((HTablePool.PooledHTable) sameTable1).getWrappedTable()); - Assert.assertSame( - ((HTablePool.PooledHTable) table3).getWrappedTable(), - ((HTablePool.PooledHTable) sameTable2).getWrappedTable()); - Assert.assertSame( - ((HTablePool.PooledHTable) table3).getWrappedTable(), - ((HTablePool.PooledHTable) sameTable3).getWrappedTable()); - } - - @Test - public void testCloseTablePool() throws IOException { - HTablePool pool = new HTablePool(TEST_UTIL.getConfiguration(), 4, - getPoolType()); - HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); - - if (admin.tableExists(TABLENAME)) { - admin.disableTable(TABLENAME); - admin.deleteTable(TABLENAME); - } - - HTableDescriptor tableDescriptor = new HTableDescriptor(TableName.valueOf(TABLENAME)); - tableDescriptor.addFamily(new HColumnDescriptor("randomFamily")); - admin.createTable(tableDescriptor); - - // Request tables from an empty pool - Table[] tables = new Table[4]; - for (int i = 0; i < 4; ++i) { - tables[i] = pool.getTable(TABLENAME); - } - - pool.closeTablePool(TABLENAME); - - for (int i = 0; i < 4; ++i) { - tables[i].close(); - } - - Assert.assertEquals(1, - pool.getCurrentPoolSize(TABLENAME)); - - pool.closeTablePool(TABLENAME); - - Assert.assertEquals(0, - pool.getCurrentPoolSize(TABLENAME)); - } - } - -} diff --git a/pom.xml b/pom.xml index ff3d4d32a61..cee2f3c9a88 100644 --- a/pom.xml +++ b/pom.xml @@ -56,6 +56,7 @@ hbase-client hbase-hadoop-compat hbase-common + hbase-procedure hbase-it hbase-examples hbase-prefix-tree @@ -198,8 +199,8 @@ Gary Helmling garyh@apache.org -8 - Continuuity - http://www.continuuity.com + Cask + http://www.cask.co gchanan @@ -225,6 +226,14 @@ Hortonworks http://www.hortonworks.com + + jerryjch + Jing Chen (Jerry) He + jerryjch@apache.org + -8 + IBM + http://www.ibm.com + jyates Jesse Yates @@ -337,6 +346,14 @@ Facebook http://www.facebook.com + + octo47 + Andrey Stepachev + octo47@gmail.com + 0 + WANdisco + http://www.wandisco.com/ + rawson Ryan Rawson @@ -353,6 +370,14 @@ Hortonworks http://www.hortonworks.com + + ssrungarapu + Srikanth Srungarapu + ssrungarapu@apache.org + -8 + Cloudera + http://www.cloudera.com + stack Michael Stack @@ -433,6 +458,14 @@ Yahoo! http://www.yahoo.com + + zhangduo + Duo Zhang + zhangduo@apache.org + +8 + Wandoujia + http://www.wandoujia.com + zjushch Chunhui Shen @@ -474,23 +507,6 @@ false -Xlint:-options - - - com.google.errorprone - error_prone_core - 1.1.1 - - - org.codehaus.plexus - plexus-compiler-javac - 2.3 - - - org.codehaus.plexus - plexus-compiler-javac-errorprone - 2.3 - - @@ -633,16 +649,63 @@ maven-eclipse-plugin 2.9 - - + org.eclipse.m2e lifecycle-mapping 1.0.0 + + + + + + org.jacoco + jacoco-maven-plugin + [0.6.2.201302030002,) + + prepare-agent + + + + + + + + + org.apache.maven.plugins + maven-enforcer-plugin + [1.0.1,) + + enforce + + + + + + + + + org.apache.maven.plugins + maven-remote-resources-plugin + [1.4,) + + process + + + + + + + + + @@ -673,6 +736,7 @@ **/META-INF/services/** **/src/main/asciidoc/hbase.css + **/src/main/asciidoc/asciidoctor.css **/bootstrap-theme.css **/bootstrap-theme.min.css @@ -804,7 +868,7 @@ ${basedir}/target/asciidoc - + @@ -947,25 +1011,75 @@ org.asciidoctor asciidoctor-maven-plugin - 1.5.2 + 1.5.2 + false + + + org.asciidoctor + asciidoctorj-pdf + 1.5.0-alpha.6 + + + + target/site + book + images + coderay + + ${project.version} + + - output-html - - process-asciidoc - + output-html site + + process-asciidoc + - ./images - book hbase.css html5 - coderay - target/site + + output-pdf + site + + process-asciidoc + + + pdf + + + + + - + + + + + + + maven-antrun-plugin + ${maven.antrun.version} + false + + + + rename-pdf + post-site + + + + + + + + run + + @@ -994,6 +1108,13 @@ + + org.apache.felix + maven-bundle-plugin + 2.5.3 + true + true + @@ -1006,7 +1127,6 @@ 3.0.3 ${compileSource} - javac-with-errorprone 2.5.1 3.0.0-SNAPSHOT @@ -1049,6 +1169,8 @@ 4.0.23.Final 2.1.2 1.0.8 + 2.11.6 + 1.46 2.4 1.6 @@ -1067,6 +1189,7 @@ Modules are pretty heavy-weight things, so doing this work isn't too bad. --> hbase-server-${project.version}-tests.jar hbase-common-${project.version}-tests.jar + hbase-procedure-${project.version}-tests.jar hbase-it-${project.version}-tests.jar hbase-annotations-${project.version}-tests.jar 2.18 @@ -1082,9 +1205,9 @@ true 900 - - 1900m - 1900m + + 2800m + 2800m -enableassertions -XX:MaxDirectMemorySize=1G -Xmx${surefire.Xmx} -XX:MaxPermSize=256m -Djava.security.egd=file:/dev/./urandom -Djava.net.preferIPv4Stack=true -Djava.awt.headless=true @@ -1136,6 +1259,17 @@ hbase-protocol ${project.version} + + org.apache.hbase + hbase-procedure + ${project.version} + + + org.apache.hbase + hbase-procedure + ${project.version} + test-jar + org.apache.hbase hbase-hadoop-compat @@ -1540,6 +1674,12 @@ disruptor ${disruptor.version} + + net.spy + spymemcached + ${spy.version} + true + org.jmock jmock-junit4 @@ -1552,6 +1692,12 @@ + + org.bouncycastle + bcprov-jdk16 + ${bouncycastle.version} + test + @@ -1606,6 +1752,39 @@ --> + + + jenkins.patch + + false + + HBasePatchProcess + + + + + + org.apache.maven.plugins + maven-antrun-plugin + false + + + validate + + run + + + + Maven Exceution Environment + MAVEN_OPTS="${env.MAVEN_OPTS}" + + + + + + + + os.linux @@ -1838,6 +2017,12 @@ + + org.apache.hadoop + hadoop-minikdc + ${hadoop-two.version} + test + @@ -2003,6 +2188,12 @@ + + org.apache.hadoop + hadoop-minikdc + ${hadoop-three.version} + test + @@ -2430,13 +2621,31 @@ - javac + errorProne false - - javac - + + + + + org.apache.maven.plugins + maven-compiler-plugin + 3.1 + + javac-with-errorprone + true + + + + org.codehaus.plexus + plexus-compiler-javac-errorprone + 2.5 + + + + + @@ -2536,6 +2745,7 @@ maven-checkstyle-plugin 2.13 + target/** hbase/checkstyle.xml hbase/checkstyle-suppressions.xml diff --git a/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc b/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc index 7cf70b23143..bf35c1a73a4 100644 --- a/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc +++ b/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc @@ -30,7 +30,7 @@ :toc: left :source-language: java -The following matrix shows the minimum permission set required to perform operations in HBase. +The following matrix shows the permission set required to perform operations in HBase. Before using the table, read through the information about how to interpret it. .Interpreting the ACL Matrix Table @@ -70,64 +70,92 @@ The [systemitem]+hbase:meta+ table is readable by every user, regardless of the This is a requirement for HBase to function correctly. `CheckAndPut` and `CheckAndDelete` operations will fail if the user does not have both Write and Read permission.:: `Increment` and `Append` operations do not require Read access.:: +The `superuser`, as the name suggests has permissions to perform all possible operations.:: +And for the operations marked with *, the checks are done in post hook and only subset of results satisfying access checks are returned back to the user.:: The following table is sorted by the interface that provides each operation. In case the table goes out of date, the unit tests which check for accuracy of permissions can be found in _hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java_, and the access controls themselves can be examined in _hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java_. .ACL Matrix -[cols="1,1,1,1", frame="all", options="header"] +[cols="1,1,1", frame="all", options="header"] |=== -| Interface | Operation | Minimum Scope | Minimum Permission -| Master | createTable | Global | C -| | modifyTable | Table | A\|C -| | deleteTable | Table | A\|C -| | truncateTable | Table | A\|C -| | addColumn | Table | A\|C -| | modifyColumn | Table | A\|C -| | deleteColumn | Table | A\|C -| | disableTable | Table | A\|C -| | disableAclTable | None | Not allowed -| | enableTable | Table | A\|C -| | move | Global | A -| | assign | Global | A -| | unassign | Global | A -| | regionOffline | Global | A -| | balance | Global | A -| | balanceSwitch | Global | A -| | shutdown | Global | A -| | stopMaster | Global | A -| | snapshot | Global | A -| | clone | Global | A -| | restore | Global | A -| | deleteSnapshot | Global | A -| | createNamespace | Global | A -| | deleteNamespace | Namespace | A -| | modifyNamespace | Namespace | A -| | flushTable | Table | A\|C -| | getTableDescriptors | Global\|Table | A -| | mergeRegions | Global | A -| Region | openRegion | Global | A -| | closeRegion | Global | A -| | stopRegionServer | Global | A -| | rollHLog | Global | A -| | mergeRegions | Global | A -| | flush | Global\|Table | A\|C -| | split | Global\|Table | A -| | compact | Global\|Table | A\|C -| | bulkLoadHFile | Table | W -| | prepareBulkLoad | Table |C -| | cleanupBulkLoad | Table |W -| | checkAndDelete | Table\|CF\|CQ | RW -| | checkAndPut | Table\|CF\|CQ | RW -| | incrementColumnValue | Table\|CF\|CQ | RW -| | scannerClose | Table | R -| | scannerNext | Table | R -| | scannerOpen | Table\|CF\|CQ | R -| Endpoint | invoke | Endpoint | X -| AccessController | grant | Global\|Table\|NS | A -| | revoke | Global\|Table\|NS | A -| | getUserPermissions | Global\|Table\|NS | A -| | checkPermissions | Global\|Table\|NS | A +| Interface | Operation | Permissions +| Master | createTable | superuser\|global\(C)\|NS\(C) +| | modifyTable | superuser\|global(A)\|global\(C)\|NS(A)\|NS\(C)\|table(A)\|table\(C) +| | deleteTable | superuser\|global(A)\|global\(C)\|NS(A)\|NS\(C)\|table(A)\|table\(C) +| | truncateTable | superuser\|global(A)\|global\(C)\|NS(A)\|NS\(C)\|table(A)\|table\(C) +| | addColumn | superuser\|global(A)\|global\(C)\|NS(A)\|NS\(C)\|table(A)\|table\(C) +| | modifyColumn | superuser\|global(A)\|global\(C)\|NS(A)\|NS\(C)\|table(A)\|table\(C)\|column(A)\|column\(C) +| | deleteColumn | superuser\|global(A)\|global\(C)\|NS(A)\|NS\(C)\|table(A)\|table\(C)\|column(A)\|column\(C) +| | enableTable | superuser\|global(A)\|global\(C)\|NS(A)\|NS\(C)\|table(A)\|table\(C) +| | disableTable | superuser\|global(A)\|global\(C)\|NS(A)\|NS\(C)\|table(A)\|table\(C) +| | disableAclTable | Not allowed +| | move | superuser\|global(A)\|NS(A)\|Table(A) +| | assign | superuser\|global(A)\|NS(A)\|Table(A) +| | unassign | superuser\|global(A)\|NS(A)\|Table(A) +| | regionOffline | superuser\|global(A)\|NS(A)\|Table(A) +| | balance | superuser\|global(A) +| | balanceSwitch | superuser\|global(A) +| | shutdown | superuser\|global(A) +| | stopMaster | superuser\|global(A) +| | snapshot | superuser\|global(A)\|NS(A)\|Table(A) +| | listSnapshot | superuser\|global(A)\|SnapshotOwner +| | cloneSnapshot | superuser\|global(A) +| | restoreSnapshot | superuser\|global(A)\|SnapshotOwner & (NS(A)\|Table(A)) +| | deleteSnapshot | superuser\|global(A)\|SnapshotOwner +| | createNamespace | superuser\|global(A) +| | deleteNamespace | superuser\|global(A) +| | modifyNamespace | superuser\|global(A) +| | getNamespaceDescriptor | superuser\|global(A)\|NS(A) +| | listNamespaceDescriptors* | superuser\|global(A)\|NS(A) +| | flushTable | superuser\|global(A)\|global\(C)\|NS(A)\|NS(\C)\|table(A)\|table\(C) +| | getTableDescriptors* | superuser\|global(A)\|global\(C)\|NS(A)\|NS\(C)\|table(A)\|table\(C) +| | getTableNames* | Any global or table perm +| | setUserQuota(global level) | superuser\|global(A) +| | setUserQuota(namespace level) | superuser\|global(A) +| | setUserQuota(Table level) | superuser\|global(A)\|NS(A)\|Table(A) +| | setTableQuota | superuser\|global(A)\|NS(A)\|Table(A) +| | setNamespaceQuota | superuser\|global(A) +| Region | openRegion | superuser\|global(A) +| | closeRegion | superuser\|global(A) +| | flush | superuser\|global(A)\|global\(C)\|table(A)\|table\(C) +| | split | superuser\|global(A)\|Table(A) +| | compact | superuser\|global(A)\|global\(C)\|table(A)\|table\(C) +| | getClosestRowBefore | superuser\|global\(R)\|NS\(R)\|Table\(R)\|CF\(R)\|CQ\(R) +| | getOp | superuser\|global\(R)\|NS\(R)\|Table\(R)\|CF\(R)\|CQ\(R) +| | exists | superuser\|global\(R)\|NS\(R)\|Table\(R)\|CF\(R)\|CQ\(R) +| | put | superuser\|global(W)\|NS(W)\|Table(W)\|CF(W)\|CQ(W) +| | delete | superuser\|global(W)\|NS(W)\|Table(W)\|CF(W)\|CQ(W) +| | batchMutate | superuser\|global(W)\|NS(W)\|Table(W)\|CF(W)\|CQ(W) +| | checkAndPut | superuser\|global(RW)\|NS(RW)\|Table(RW)\|CF(RW)\|CQ(RW) +| | checkAndPutAfterRowLock | superuser\|global\(R)\|NS\(R)\|Table\(R)\|CF\(R)\|CQ\(R) +| | checkAndDelete | superuser\|global(RW)\|NS(RW)\|Table(RW)\|CF(RW)\|CQ(RW) +| | checkAndDeleteAfterRowLock | superuser\|global\(R)\|NS\(R)\|Table\(R)\|CF\(R)\|CQ\(R) +| | incrementColumnValue | superuser\|global(W)\|NS(W)\|Table(W)\|CF(W)\|CQ(W) +| | append | superuser\|global(W)\|NS(W)\|Table(W)\|CF(W)\|CQ(W) +| | appendAfterRowLock | superuser\|global(W)\|NS(W)\|Table(W)\|CF(W)\|CQ(W) +| | increment | superuser\|global(W)\|NS(W)\|Table(W)\|CF(W)\|CQ(W) +| | incrementAfterRowLock | superuser\|global(W)\|NS(W)\|Table(W)\|CF(W)\|CQ(W) +| | scannerOpen | superuser\|global\(R)\|NS\(R)\|Table\(R)\|CF\(R)\|CQ\(R) +| | scannerNext | superuser\|global\(R)\|NS\(R)\|Table\(R)\|CF\(R)\|CQ\(R) +| | scannerClose | superuser\|global\(R)\|NS\(R)\|Table\(R)\|CF\(R)\|CQ\(R) +| | bulkLoadHFile | superuser\|global\(C)\|table\(C)\|CF\(C) +| | prepareBulkLoad | superuser\|global\(C)\|table\(C)\|CF\(C) +| | cleanupBulkLoad | superuser\|global\(C)\|table\(C)\|CF\(C) +| Endpoint | invoke | superuser\|global(X)\|NS(X)\|Table(X) +| AccessController | grant(global level) | global(A) +| | grant(namespace level) | global(A)\|NS(A) +| | grant(table level) | global(A)\|NS(A)\|table(A)\|CF(A)\|CQ(A) +| | revoke(global level) | global(A) +| | revoke(namespace level) | global(A)\|NS(A) +| | revoke(table level) | global(A)\|NS(A)\|table(A)\|CF(A)\|CQ(A) +| | getUserPermissions(global level) | global(A) +| | getUserPermissions(namespace level) | global(A)\|NS(A) +| | getUserPermissions(table level) | global(A)\|NS(A)\|table(A)\|CF(A)\|CQ(A) +| RegionServer | stopRegionServer | superuser\|global(A) +| | mergeRegions | superuser\|global(A) +| | rollWALWriterRequest | superuser\|global(A) +| | replicateLogEntries | superuser\|global(W) |=== :numbered: diff --git a/src/main/asciidoc/_chapters/architecture.adoc b/src/main/asciidoc/_chapters/architecture.adoc index bae4a23ee4d..0236d81eeea 100644 --- a/src/main/asciidoc/_chapters/architecture.adoc +++ b/src/main/asciidoc/_chapters/architecture.adoc @@ -682,7 +682,7 @@ The last value (99%) is the default acceptable loading factor in the LRU cache a The reason it is included in this equation is that it would be unrealistic to say that it is possible to use 100% of the available memory since this would make the process blocking from the point where it loads new blocks. Here are some examples: -* One region server with the default heap size (1 GB) and the default block cache size will have 253 MB of block cache available. +* One region server with the heap size set to 1 GB and the default block cache size will have 253 MB of block cache available. * 20 region servers with the heap size set to 8 GB and a default block cache size will have 39.6 of block cache. * 100 region servers with the heap size set to 24 GB and a block cache size of 0.5 will have about 1.16 TB of block cache. @@ -858,6 +858,31 @@ For a RegionServer hosting data that can comfortably fit into cache, or if your The compressed BlockCache is disabled by default. To enable it, set `hbase.block.data.cachecompressed` to `true` in _hbase-site.xml_ on all RegionServers. +[[regionserver_splitting_implementation]] +=== RegionServer Splitting Implementation + +As write requests are handled by the region server, they accumulate in an in-memory storage system called the _memstore_. Once the memstore fills, its content are written to disk as additional store files. This event is called a _memstore flush_. As store files accumulate, the RegionServer will <> them into fewer, larger files. After each flush or compaction finishes, the amount of data stored in the region has changed. The RegionServer consults the region split policy to determine if the region has grown too large or should be split for another policy-specific reason. A region split request is enqueued if the policy recommends it. + +Logically, the process of splitting a region is simple. We find a suitable point in the keyspace of the region where we should divide the region in half, then split the region's data into two new regions at that point. The details of the process however are not simple. When a split happens, the newly created _daughter regions_ do not rewrite all the data into new files immediately. Instead, they create small files similar to symbolic link files, named link:http://www.google.com/url?q=http%3A%2F%2Fhbase.apache.org%2Fapidocs%2Forg%2Fapache%2Fhadoop%2Fhbase%2Fio%2FReference.html&sa=D&sntz=1&usg=AFQjCNEkCbADZ3CgKHTtGYI8bJVwp663CA[Reference files], which point to either the top or bottom part of the parent store file according to the split point. The reference file is used just like a regular data file, but only half of the records are considered. The region can only be split if there are no more references to the immutable data files of the parent region. Those reference files are cleaned gradually by compactions, so that the region will stop referring to its parents files, and can be split further. + +Although splitting the region is a local decision made by the RegionServer, the split process itself must coordinate with many actors. The RegionServer notifies the Master before and after the split, updates the `.META.` table so that clients can discover the new daughter regions, and rearranges the directory structure and data files in HDFS. Splitting is a multi-task process. To enable rollback in case of an error, the RegionServer keeps an in-memory journal about the execution state. The steps taken by the RegionServer to execute the split are illustrated in <>. Each step is labeled with its step number. Actions from RegionServers or Master are shown in red, while actions from the clients are show in green. + +[[regionserver_split_process_image]] +.RegionServer Split Process +image::region_split_process.png[Region Split Process] + +. The RegionServer decides locally to split the region, and prepares the split. *THE SPLIT TRANSACTION IS STARTED.* As a first step, the RegionServer acquires a shared read lock on the table to prevent schema modifications during the splitting process. Then it creates a znode in zookeeper under `/hbase/region-in-transition/region-name`, and sets the znode's state to `SPLITTING`. +. The Master learns about this znode, since it has a watcher for the parent `region-in-transition` znode. +. The RegionServer creates a sub-directory named `.splits` under the parent’s `region` directory in HDFS. +. The RegionServer closes the parent region and marks the region as offline in its local data structures. *THE SPLITTING REGION IS NOW OFFLINE.* At this point, client requests coming to the parent region will throw `NotServingRegionException`. The client will retry with some backoff. The closing region is flushed. +. The RegionServer creates region directories under the `.splits` directory, for daughter regions A and B, and creates necessary data structures. Then it splits the store files, in the sense that it creates two link:http://www.google.com/url?q=http%3A%2F%2Fhbase.apache.org%2Fapidocs%2Forg%2Fapache%2Fhadoop%2Fhbase%2Fio%2FReference.html&sa=D&sntz=1&usg=AFQjCNEkCbADZ3CgKHTtGYI8bJVwp663CA[Reference] files per store file in the parent region. Those reference files will point to the parent regions'files. +. The RegionServer creates the actual region directory in HDFS, and moves the reference files for each daughter. +. The RegionServer sends a `Put` request to the `.META.` table, to set the parent as offline in the `.META.` table and add information about daughter regions. At this point, there won’t be individual entries in `.META.` for the daughters. Clients will see that the parent region is split if they scan `.META.`, but won’t know about the daughters until they appear in `.META.`. Also, if this `Put` to `.META`. succeeds, the parent will be effectively split. If the RegionServer fails before this RPC succeeds, Master and the next Region Server opening the region will clean dirty state about the region split. After the `.META.` update, though, the region split will be rolled-forward by Master. +. The RegionServer opens daughters A and B in parallel. +. The RegionServer adds the daughters A and B to `.META.`, together with information that it hosts the regions. *THE SPLIT REGIONS (DAUGHTERS WITH REFERENCES TO PARENT) ARE NOW ONLINE.* After this point, clients can discover the new regions and issue requests to them. Clients cache the `.META.` entries locally, but when they make requests to the RegionServer or `.META.`, their caches will be invalidated, and they will learn about the new regions from `.META.`. +. The RegionServer updates znode `/hbase/region-in-transition/region-name` in ZooKeeper to state `SPLIT`, so that the master can learn about it. The balancer can freely re-assign the daughter regions to other region servers if necessary. *THE SPLIT TRANSACTION IS NOW FINISHED.* +. After the split, `.META.` and HDFS will still contain references to the parent region. Those references will be removed when compactions in daughter regions rewrite the data files. Garbage collection tasks in the master periodically check whether the daughter regions still refer to the parent region's files. If not, the parent region will be removed. + [[wal]] === Write Ahead Log (WAL) @@ -885,6 +910,29 @@ The WAL resides in HDFS in the _/hbase/WALs/_ directory (prior to HBase 0.94, th For more general information about the concept of write ahead logs, see the Wikipedia link:http://en.wikipedia.org/wiki/Write-ahead_logging[Write-Ahead Log] article. +==== MultiWAL +With a single WAL per RegionServer, the RegionServer must write to the WAL serially, because HDFS files must be sequential. This causes the WAL to be a performance bottleneck. + +HBase 1.0 introduces support MultiWal in link:https://issues.apache.org/jira/browse/HBASE-5699[HBASE-5699]. MultiWAL allows a RegionServer to write multiple WAL streams in parallel, by using multiple pipelines in the underlying HDFS instance, which increases total throughput during writes. This parallelization is done by partitioning incoming edits by their Region. Thus, the current implementation will not help with increasing the throughput to a single Region. + +RegionServers using the original WAL implementation and those using the MultiWAL implementation can each handle recovery of either set of WALs, so a zero-downtime configuration update is possible through a rolling restart. + +.Configure MultiWAL +To configure MultiWAL for a RegionServer, set the value of the property `hbase.wal.provider` to `multiwal` by pasting in the following XML: + +[source,xml] +---- + + hbase.wal.provider + multiwal + +---- + +Restart the RegionServer for the changes to take effect. + +To disable MultiWAL for a RegionServer, unset the property and restart the RegionServer. + + [[wal_flush]] ==== WAL Flushing diff --git a/src/main/asciidoc/_chapters/configuration.adoc b/src/main/asciidoc/_chapters/configuration.adoc index 6f8858d50cb..ed00a498399 100644 --- a/src/main/asciidoc/_chapters/configuration.adoc +++ b/src/main/asciidoc/_chapters/configuration.adoc @@ -172,6 +172,9 @@ session required pam_limits.so ---- ==== +Linux Shell:: + All of the shell scripts that come with HBase rely on the link:http://www.gnu.org/software/bash[GNU Bash] shell. + Windows:: Prior to HBase 0.96, testing for running HBase on Microsoft Windows was limited. Running a on Windows nodes is not recommended for production systems. @@ -708,8 +711,8 @@ The following lines in the _hbase-env.sh_ file show how to set the `JAVA_HOME` e # The java implementation to use. export JAVA_HOME=/usr/java/jdk1.7.0/ -# The maximum amount of heap to use, in MB. Default is 1000. -export HBASE_HEAPSIZE=4096 +# The maximum amount of heap to use. Default is left to JVM default. +export HBASE_HEAPSIZE=4G ---- Use +rsync+ to copy the content of the _conf_ directory to all nodes of the cluster. diff --git a/src/main/asciidoc/_chapters/developer.adoc b/src/main/asciidoc/_chapters/developer.adoc index ea23cda3966..26ba3257eed 100644 --- a/src/main/asciidoc/_chapters/developer.adoc +++ b/src/main/asciidoc/_chapters/developer.adoc @@ -579,7 +579,7 @@ Extract the tarball and make sure it looks good. A good test for the src tarball being 'complete' is to see if you can build new tarballs from this source bundle. If the source tarball is good, save it off to a _version directory_, a directory somewhere where you are collecting all of the tarballs you will publish as part of the release candidate. For example if you were building a hbase-0.96.0 release candidate, you might call the directory _hbase-0.96.0RC0_. -Later you will publish this directory as our release candidate up on link:people.apache.org/~YOU[people.apache.org/~YOU/]. +Later you will publish this directory as our release candidate up on http://people.apache.org/~YOU. . Build the binary tarball. + diff --git a/src/main/asciidoc/_chapters/hbase-default.adoc b/src/main/asciidoc/_chapters/hbase-default.adoc index 23b96d5c15b..bf56dd30cde 100644 --- a/src/main/asciidoc/_chapters/hbase-default.adoc +++ b/src/main/asciidoc/_chapters/hbase-default.adoc @@ -1284,11 +1284,11 @@ Whether or not the bucketcache is used in league with the LRU *`hbase.bucketcache.size`*:: + .Description -The size of the buckets for the bucketcache if you only use a single size. - Defaults to the default blocksize, which is 64 * 1024. +Used along with bucket cache, this is a float that EITHER represents a percentage of total heap + memory size to give to the cache (if < 1.0) OR, it is the capacity in megabytes of the cache. + .Default -`65536` +`0` when specified as a float [[hbase.bucketcache.sizes]] diff --git a/src/main/asciidoc/_chapters/hbase_apis.adoc b/src/main/asciidoc/_chapters/hbase_apis.adoc index d73de619a90..85dbad14181 100644 --- a/src/main/asciidoc/_chapters/hbase_apis.adoc +++ b/src/main/asciidoc/_chapters/hbase_apis.adoc @@ -111,6 +111,13 @@ public static void upgradeFrom0 (Configuration config) { newColumn.setMaxVersions(HConstants.ALL_VERSIONS); admin.addColumn(tableName, newColumn); + // Update existing column family + HColumnDescriptor existingColumn = new HColumnDescriptor(CF_DEFAULT); + existingColumn.setCompactionCompressionType(Algorithm.GZ); + existingColumn.setMaxVersions(HConstants.ALL_VERSIONS); + table_assetmeta.modifyFamily(existingColumn) + admin.modifyTable(tableName, table_assetmeta); + // Disable an existing table admin.disableTable(tableName); diff --git a/src/main/asciidoc/_chapters/images b/src/main/asciidoc/_chapters/images new file mode 120000 index 00000000000..1e0c6c1bac5 --- /dev/null +++ b/src/main/asciidoc/_chapters/images @@ -0,0 +1 @@ +../../site/resources/images \ No newline at end of file diff --git a/src/main/asciidoc/_chapters/ops_mgt.adoc b/src/main/asciidoc/_chapters/ops_mgt.adoc index 1402f5283cd..b8018b6661d 100644 --- a/src/main/asciidoc/_chapters/ops_mgt.adoc +++ b/src/main/asciidoc/_chapters/ops_mgt.adoc @@ -412,6 +412,8 @@ Invoke via: $ bin/hbase org.apache.hadoop.hbase.mapreduce.Export [ [ []]] ---- +By default, the `Export` tool only exports the newest version of a given cell, regardless of the number of versions stored. To export more than one version, replace *__* with the desired number of versions. + Note: caching for the input Scan is configured via `hbase.client.scanner.caching` in the job configuration. === Import @@ -580,6 +582,8 @@ It will run the mapreduce all in a single process but it will run faster if you $ bin/hbase org.apache.hadoop.hbase.mapreduce.RowCounter [ ...] ---- +RowCounter only counts one version per cell. + Note: caching for the input Scan is configured via `hbase.client.scanner.caching` in the job configuration. HBase ships another diagnostic mapreduce job called link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/mapreduce/CellCounter.html[CellCounter]. @@ -1240,6 +1244,8 @@ Some use cases for cluster replication include: NOTE: Replication is enabled at the granularity of the column family. Before enabling replication for a column family, create the table and all column families to be replicated, on the destination cluster. +=== Replication Overview + Cluster replication uses a source-push methodology. An HBase cluster can be a source (also called master or active, meaning that it is the originator of new data), a destination (also called slave or passive, meaning that it receives data via replication), or can fulfill both roles at once. Replication is asynchronous, and the goal of replication is eventual consistency. @@ -1281,58 +1287,48 @@ image::hbase_replication_diagram.jpg[] HBase replication borrows many concepts from the [firstterm]_statement-based replication_ design used by MySQL. Instead of SQL statements, entire WALEdits (consisting of multiple cell inserts coming from Put and Delete operations on the clients) are replicated in order to maintain atomicity. -=== Configuring Cluster Replication - -The following is a simplified procedure for configuring cluster replication. -It may not cover every edge case. -For more information, see the link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/replication/package-summary.html#requirements[ API documentation for replication]. +=== Managing and Configuring Cluster Replication +.Cluster Configuration Overview . Configure and start the source and destination clusters. Create tables with the same names and column families on both the source and destination clusters, so that the destination cluster knows where to store data it will receive. - All hosts in the source and destination clusters should be reachable to each other. -. On the source cluster, enable replication by setting `hbase.replication` to `true` in _hbase-site.xml_. +. All hosts in the source and destination clusters should be reachable to each other. +. If both clusters use the same ZooKeeper cluster, you must use a different `zookeeper.znode.parent`, because they cannot write in the same folder. +. Check to be sure that replication has not been disabled. `hbase.replication` defaults to `true`. . On the source cluster, in HBase Shell, add the destination cluster as a peer, using the `add_peer` command. - The syntax is as follows: -+ +. On the source cluster, in HBase Shell, enable the table replication, using the `enable_table_replication` command. +. Check the logs to see if replication is taking place. If so, you will see messages like the following, coming from the ReplicationSource. ---- -hbase> add_peer 'ID' 'CLUSTER_KEY' ----- -+ -The ID is a string (prior to link:https://issues.apache.org/jira/browse/HBASE-11367[HBASE-11367], it was a short integer), which _must not contain a hyphen_ (see link:https://issues.apache.org/jira/browse/HBASE-11394[HBASE-11394]). To compose the CLUSTER_KEY, use the following template: -+ ----- -hbase.zookeeper.quorum:hbase.zookeeper.property.clientPort:zookeeper.znode.parent ----- -+ -If both clusters use the same ZooKeeper cluster, you must use a different `zookeeper.znode.parent`, because they cannot write in the same folder. - -. On the source cluster, configure each column family to be replicated by setting its REPLICATION_SCOPE to 1, using commands such as the following in HBase Shell. -+ ----- -hbase> disable 'example_table' -hbase> alter 'example_table', {NAME => 'example_family', REPLICATION_SCOPE => '1'} -hbase> enable 'example_table' +LOG.info("Replicating "+clusterId + " -> " + peerClusterId); ---- -. You can verify that replication is taking place by examining the logs on the source cluster for messages such as the following. -+ ----- -Considering 1 rs, with ratio 0.1 -Getting 1 rs from peer cluster # 0 -Choosing peer 10.10.1.49:62020 ----- +.Cluster Management Commands +add_peer :: + Adds a replication relationship between two clusters. + + * ID -- a unique string, which must not contain a hyphen. + * CLUSTER_KEY: composed using the following template, with appropriate place-holders: `hbase.zookeeper.quorum:hbase.zookeeper.property.clientPort:zookeeper.znode.parent` +list_peers:: list all replication relationships known by this cluster +enable_peer :: + Enable a previously-disabled replication relationship +disable_peer :: + Disable a replication relationship. HBase will no longer send edits to that peer cluster, but it still keeps track of all the new WALs that it will need to replicate if and when it is re-enabled. +remove_peer :: + Disable and remove a replication relationship. HBase will no longer send edits to that peer cluster or keep track of WALs. +enable_table_replication :: + Enable the table replication switch for all it's column families. If the table is not found in the destination cluster then it will create one with the same name and column families. +disable_table_replication :: + Disable the table replication switch for all it's column families. -. To verify the validity of replicated data, you can use the included `VerifyReplication` MapReduce job on the source cluster, providing it with the ID of the replication peer and table name to verify. - Other options are possible, such as a time range or specific families to verify. -+ -The command has the following form: -+ ----- -hbase org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication [--starttime=timestamp1] [--stoptime=timestamp [--families=comma separated list of families] ----- -+ -The `VerifyReplication` command prints out `GOODROWS` and `BADROWS` counters to indicate rows that did and did not replicate correctly. +=== Verifying Replicated Data +The `VerifyReplication` MapReduce job, which is included in HBase, performs a systematic comparison of replicated data between two different clusters. Run the VerifyReplication job on the master cluster, supplying it with the peer ID and table name to use for validation. You can limit the verification further by specifying a time range or specific families. The job's short name is `verifyrep`. To run the job, use a command like the following: ++ +[source,bash] +---- +$ HADOOP_CLASSPATH=`${HBASE_HOME}/bin/hbase classpath` "${HADOOP_HOME}/bin/hadoop" jar "${HBASE_HOME}/hbase-server-VERSION.jar" verifyrep --starttime= --stoptime= --families= +---- ++ +The `VerifyReplication` command prints out `GOODROWS` and `BADROWS` counters to indicate rows that did and did not replicate correctly. === Detailed Information About Cluster Replication @@ -1606,6 +1602,13 @@ The following metrics are exposed at the global region server level and (since H | 1 |=== +=== Monitoring Replication Status + +You can use the HBase Shell command `status 'replication'` to monitor the replication status on your cluster. The command has three variations: +* `status 'replication'` -- prints the status of each source and its sinks, sorted by hostname. +* `status 'replication', 'source'` -- prints the status for each replication source, sorted by hostname. +* `status 'replication', 'sink'` -- prints the status for each replication sink, sorted by hostname. + [[ops.backup]] == HBase Backup @@ -1782,7 +1785,7 @@ To copy a snapshot called MySnapshot to an HBase cluster srv2 (hdfs:///srv2:8082 [source,bourne] ---- -$ bin/hbase class org.apache.hadoop.hbase.snapshot.ExportSnapshot -snapshot MySnapshot -copy-to hdfs://srv2:8082/hbase -mappers 16 +$ bin/hbase org.apache.hadoop.hbase.snapshot.ExportSnapshot -snapshot MySnapshot -copy-to hdfs://srv2:8082/hbase -mappers 16 ---- .Limiting Bandwidth Consumption @@ -1791,7 +1794,7 @@ The following example limits the above example to 200 MB/sec. [source,bourne] ---- -$ bin/hbase class org.apache.hadoop.hbase.snapshot.ExportSnapshot -snapshot MySnapshot -copy-to hdfs://srv2:8082/hbase -mappers 16 -bandwidth 200 +$ bin/hbase org.apache.hadoop.hbase.snapshot.ExportSnapshot -snapshot MySnapshot -copy-to hdfs://srv2:8082/hbase -mappers 16 -bandwidth 200 ---- [[ops.capacity]] diff --git a/src/main/asciidoc/_chapters/performance.adoc b/src/main/asciidoc/_chapters/performance.adoc index 48b647ce563..2155d52b6c2 100644 --- a/src/main/asciidoc/_chapters/performance.adoc +++ b/src/main/asciidoc/_chapters/performance.adoc @@ -100,6 +100,17 @@ Using 10Gbe links between racks will greatly increase performance, and assuming Are all the network interfaces functioning correctly? Are you sure? See the Troubleshooting Case Study in <>. +[[perf.network.call_me_maybe]] +=== Network Consistency and Partition Tolerance +The link:http://en.wikipedia.org/wiki/CAP_theorem[CAP Theorem] states that a distributed system can maintain two out of the following three charateristics: +- *C*onsistency -- all nodes see the same data. +- *A*vailability -- every request receives a response about whether it succeeded or failed. +- *P*artition tolerance -- the system continues to operate even if some of its components become unavailable to the others. + +HBase favors consistency and partition tolerance, where a decision has to be made. Coda Hale explains why partition tolerance is so important, in http://codahale.com/you-cant-sacrifice-partition-tolerance/. + +Robert Yokota used an automated testing framework called link:https://aphyr.com/tags/jepsen[Jepson] to test HBase's partition tolerance in the face of network partitions, using techniques modeled after Aphyr's link:https://aphyr.com/posts/281-call-me-maybe-carly-rae-jepsen-and-the-perils-of-network-partitions[Call Me Maybe] series. The results, available as a link:http://eng.yammer.com/call-me-maybe-hbase/[blog post] and an link:http://eng.yammer.com/call-me-maybe-hbase-addendum/[addendum], show that HBase performs correctly. + [[jvm]] == Java diff --git a/src/main/asciidoc/_chapters/preface.adoc b/src/main/asciidoc/_chapters/preface.adoc index 2eb84114025..960fcc4a5c8 100644 --- a/src/main/asciidoc/_chapters/preface.adoc +++ b/src/main/asciidoc/_chapters/preface.adoc @@ -55,5 +55,10 @@ That said, you are welcome. + It's a fun place to be. + Yours, the HBase Community. +.Reporting Bugs + +Please use link:https://issues.apache.org/jira/browse/hbase[JIRA] to report non-security-related bugs. + +To protect existing HBase installations from new vulnerabilities, please *do not* use JIRA to report security-related bugs. Instead, send your report to the mailing list private@apache.org, which allows anyone to send messages, but restricts who can read them. Someone on that list will contact you to follow up on your report. :numbered: diff --git a/src/main/asciidoc/_chapters/rpc.adoc b/src/main/asciidoc/_chapters/rpc.adoc index 5d8b2306d5a..43e71568cd1 100644 --- a/src/main/asciidoc/_chapters/rpc.adoc +++ b/src/main/asciidoc/_chapters/rpc.adoc @@ -28,7 +28,7 @@ :icons: font :experimental: -In 0.95, all client/server communication is done with link:https://code.google.com/p/protobuf/[protobuf'ed] Messages rather than with link:http://hadoop.apache.org/docs/current/api/org/apache/hadoop/io/Writable.html[Hadoop +In 0.95, all client/server communication is done with link:https://developers.google.com/protocol-buffers/[protobuf'ed] Messages rather than with link:http://hadoop.apache.org/docs/current/api/org/apache/hadoop/io/Writable.html[Hadoop Writables]. Our RPC wire format therefore changes. This document describes the client/server request/response protocol and our new RPC wire-format. diff --git a/src/main/asciidoc/_chapters/security.adoc b/src/main/asciidoc/_chapters/security.adoc index 9cffbdb605e..25153a5f2b8 100644 --- a/src/main/asciidoc/_chapters/security.adoc +++ b/src/main/asciidoc/_chapters/security.adoc @@ -27,6 +27,16 @@ :icons: font :experimental: +[IMPORTANT] +.Reporting Security Bugs +==== +NOTE: To protect existing HBase installations from exploitation, please *do not* use JIRA to report security-related bugs. Instead, send your report to the mailing list private@apache.org, which allows anyone to send messages, but restricts who can read them. Someone on that list will contact you to follow up on your report. + +HBase adheres to the Apache Software Foundation's policy on reported vulnerabilities, available at http://apache.org/security/. + +If you wish to send an encrypted report, you can use the GPG details provided for the general ASF security list. This will likely increase the response time to your report. +==== + HBase provides mechanisms to secure various components and aspects of HBase and how it relates to the rest of the Hadoop infrastructure, as well as clients and resources outside Hadoop. == Using Secure HTTP (HTTPS) for the Web UI @@ -270,8 +280,6 @@ Add the following to the `hbase-site.xml` file for every REST gateway: Substitute the appropriate credential and keytab for _$USER_ and _$KEYTAB_ respectively. The REST gateway will authenticate with HBase using the supplied credential. -No authentication will be performed by the REST gateway itself. -All client access via the REST gateway will use the REST gateway's credential and have its privilege. In order to use the REST API principal to interact with HBase, it is also necessary to add the `hbase.rest.kerberos.principal` to the `_acl_` table. For example, to give the REST API principal, `rest_server`, administrative access, a command such as this one will suffice: @@ -283,8 +291,30 @@ grant 'rest_server', 'RWCA' For more information about ACLs, please see the <> section -It should be possible for clients to authenticate with the HBase cluster through the REST gateway in a pass-through manner via SPNEGO HTTP authentication. -This is future work. +HBase REST gateway supports link:http://hadoop.apache.org/docs/stable/hadoop-auth/index.html[SPNEGO HTTP authentication] for client access to the gateway. +To enable REST gateway Kerberos authentication for client access, add the following to the `hbase-site.xml` file for every REST gateway. + +[source,xml] +---- + + hbase.rest.authentication.type + kerberos + + + hbase.rest.authentication.kerberos.principal + HTTP/_HOST@HADOOP.LOCALDOMAIN + + + hbase.rest.authentication.kerberos.keytab + $KEYTAB + +---- + +Substitute the keytab for HTTP for _$KEYTAB_. + +HBase REST gateway supports different 'hbase.rest.authentication.type': simple, kerberos. +You can also implement a custom authentication by implemening Hadoop AuthenticationHandler, then specify the full class name as 'hbase.rest.authentication.type' value. +For more information, refer to link:http://hadoop.apache.org/docs/stable/hadoop-auth/index.html[SPNEGO HTTP authentication]. [[security.rest.gateway]] === REST Gateway Impersonation Configuration @@ -1027,6 +1057,9 @@ The default plugin passes through labels specified in Authorizations added to th When the client passes labels for which the user is not authenticated, the default plugin drops them. You can pass a subset of user authenticated labels via the `Get#setAuthorizations(Authorizations(String,...))` and `Scan#setAuthorizations(Authorizations(String,...));` methods. +Groups can be granted visibility labels the same way as users. Groups are prefixed with an @ symbol. When checking visibility labels of a user, the server will include the visibility labels of the groups of which the user is a member, together with the user's own labels. +When the visibility labels are retrieved using API `VisibilityClient#getAuths` or Shell command `get_auths` for a user, we will return labels added specifically for that user alone, not the group level labels. + Visibility label access checking is performed by the VisibilityController coprocessor. You can use interface `VisibilityLabelService` to provide a custom implementation and/or control the way that visibility labels are stored with cells. See the source file _hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithCustomVisLabService.java_ for one example. @@ -1141,12 +1174,16 @@ hbase> set_auths 'service', [ 'service' ] ---- ---- -gbase> set_auths 'testuser', [ 'test' ] +hbase> set_auths 'testuser', [ 'test' ] ---- ---- hbase> set_auths 'qa', [ 'test', 'developer' ] ---- + +---- +hbase> set_auths '@qagroup', [ 'test' ] +---- ==== + .Java API @@ -1183,6 +1220,10 @@ hbase> clear_auths 'testuser', [ 'test' ] ---- hbase> clear_auths 'qa', [ 'test', 'developer' ] ---- + +---- +hbase> clear_auths '@qagroup', [ 'test', 'developer' ] +---- ==== + .Java API @@ -1253,13 +1294,59 @@ static Table createTableAndWriteDataWithLabels(TableName tableName, String... la ---- ==== +<> +==== Reading Cells with Labels +When you issue a Scan or Get, HBase uses your default set of authorizations to filter out cells that you do not have access to. A superuser can set the default set of authorizations for a given user by using the `set_auths` HBase Shell command or the link:http://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/security/visibility/VisibilityClient.html#setAuths(org.apache.hadoop.conf.Configuration,%20java.lang.String\[\],%20java.lang.String)[VisibilityClient.setAuths()] method. + +You can specify a different authorization during the Scan or Get, by passing the AUTHORIZATIONS option in HBase Shell, or the link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Scan.html#setAuthorizations%28org.apache.hadoop.hbase.security.visibility.Authorizations%29[setAuthorizations()] method if you use the API. This authorization will be combined with your default set as an additional filter. It will further filter your results, rather than giving you additional authorization. + +.HBase Shell +==== +---- +hbase> get_auths 'myUser' +hbase> scan 'table1', AUTHORIZATIONS => ['private'] +---- +==== + +.Java API +==== +[source,java] +---- +... +public Void run() throws Exception { + String[] auths1 = { SECRET, CONFIDENTIAL }; + GetAuthsResponse authsResponse = null; + try { + VisibilityClient.setAuths(conf, auths1, user); + try { + authsResponse = VisibilityClient.getAuths(conf, user); + } catch (Throwable e) { + fail("Should not have failed"); + } + } catch (Throwable e) { + } + List authsList = new ArrayList(); + for (ByteString authBS : authsResponse.getAuthList()) { + authsList.add(Bytes.toString(authBS.toByteArray())); + } + assertEquals(2, authsList.size()); + assertTrue(authsList.contains(SECRET)); + assertTrue(authsList.contains(CONFIDENTIAL)); + return null; +} +... +---- +==== + + ==== Implementing Your Own Visibility Label Algorithm Interpreting the labels authenticated for a given get/scan request is a pluggable algorithm. -You can specify a custom plugin by using the property `hbase.regionserver.scan.visibility.label.generator.class`. -The default implementation class is `org.apache.hadoop.hbase.security.visibility.DefaultScanLabelGenerator`. -You can also configure a set of `ScanLabelGenerators` to be used by the system, as a comma-separated list. + +You can specify a custom plugin or plugins by using the property `hbase.regionserver.scan.visibility.label.generator.class`. The output for the first `ScanLabelGenerator` will be the input for the next one, until the end of the list. + +The default implementation, which was implemented in link:https://issues.apache.org/jira/browse/HBASE-12466[HBASE-12466], loads two plugins, `FeedUserAuthScanLabelGenerator` and `DefinedSetFilterScanLabelGenerator`. See <>. ==== Replicating Visibility Tags as Strings diff --git a/src/main/asciidoc/asciidoctor.css b/src/main/asciidoc/asciidoctor.css new file mode 100644 index 00000000000..c75f7b0106c --- /dev/null +++ b/src/main/asciidoc/asciidoctor.css @@ -0,0 +1,399 @@ +/* Asciidoctor default stylesheet | MIT License | http://asciidoctor.org */ +/* Remove the comments around the @import statement below when using this as a custom stylesheet */ +/*@import "https://fonts.googleapis.com/css?family=Open+Sans:300,300italic,400,400italic,600,600italic%7CNoto+Serif:400,400italic,700,700italic%7CDroid+Sans+Mono:400";*/ +article,aside,details,figcaption,figure,footer,header,hgroup,main,nav,section,summary{display:block} +audio,canvas,video{display:inline-block} +audio:not([controls]){display:none;height:0} +[hidden],template{display:none} +script{display:none!important} +html{font-family:sans-serif;-ms-text-size-adjust:100%;-webkit-text-size-adjust:100%} +body{margin:0} +a{background:transparent} +a:focus{outline:thin dotted} +a:active,a:hover{outline:0} +h1{font-size:2em;margin:.67em 0} +abbr[title]{border-bottom:1px dotted} +b,strong{font-weight:bold} +dfn{font-style:italic} +hr{-moz-box-sizing:content-box;box-sizing:content-box;height:0} +mark{background:#ff0;color:#000} +code,kbd,pre,samp{font-family:monospace;font-size:1em} +pre{white-space:pre-wrap} +q{quotes:"\201C" "\201D" "\2018" "\2019"} +small{font-size:80%} +sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline} +sup{top:-.5em} +sub{bottom:-.25em} +img{border:0} +svg:not(:root){overflow:hidden} +figure{margin:0} +fieldset{border:1px solid silver;margin:0 2px;padding:.35em .625em .75em} +legend{border:0;padding:0} +button,input,select,textarea{font-family:inherit;font-size:100%;margin:0} +button,input{line-height:normal} +button,select{text-transform:none} +button,html input[type="button"],input[type="reset"],input[type="submit"]{-webkit-appearance:button;cursor:pointer} +button[disabled],html input[disabled]{cursor:default} +input[type="checkbox"],input[type="radio"]{box-sizing:border-box;padding:0} +input[type="search"]{-webkit-appearance:textfield;-moz-box-sizing:content-box;-webkit-box-sizing:content-box;box-sizing:content-box} +input[type="search"]::-webkit-search-cancel-button,input[type="search"]::-webkit-search-decoration{-webkit-appearance:none} +button::-moz-focus-inner,input::-moz-focus-inner{border:0;padding:0} +textarea{overflow:auto;vertical-align:top} +table{border-collapse:collapse;border-spacing:0} +*,*:before,*:after{-moz-box-sizing:border-box;-webkit-box-sizing:border-box;box-sizing:border-box} +html,body{font-size:100%} +body{background:#fff;color:rgba(0,0,0,.8);padding:0;margin:0;font-family:"Noto Serif","DejaVu Serif",serif;font-weight:400;font-style:normal;line-height:1;position:relative;cursor:auto} +a:hover{cursor:pointer} +img,object,embed{max-width:100%;height:auto} +object,embed{height:100%} +img{-ms-interpolation-mode:bicubic} +#map_canvas img,#map_canvas embed,#map_canvas object,.map_canvas img,.map_canvas embed,.map_canvas object{max-width:none!important} +.left{float:left!important} +.right{float:right!important} +.text-left{text-align:left!important} +.text-right{text-align:right!important} +.text-center{text-align:center!important} +.text-justify{text-align:justify!important} +.hide{display:none} +.antialiased,body{-webkit-font-smoothing:antialiased} +img{display:inline-block;vertical-align:middle} +textarea{height:auto;min-height:50px} +select{width:100%} +p.lead,.paragraph.lead>p,#preamble>.sectionbody>.paragraph:first-of-type p{font-size:1.21875em;line-height:1.6} +.subheader,.admonitionblock td.content>.title,.audioblock>.title,.exampleblock>.title,.imageblock>.title,.listingblock>.title,.literalblock>.title,.stemblock>.title,.openblock>.title,.paragraph>.title,.quoteblock>.title,table.tableblock>.title,.verseblock>.title,.videoblock>.title,.dlist>.title,.olist>.title,.ulist>.title,.qlist>.title,.hdlist>.title{line-height:1.45;color:#7a2518;font-weight:400;margin-top:0;margin-bottom:.25em} +div,dl,dt,dd,ul,ol,li,h1,h2,h3,#toctitle,.sidebarblock>.content>.title,h4,h5,h6,pre,form,p,blockquote,th,td{margin:0;padding:0;direction:ltr} +a{color:#2156a5;text-decoration:underline;line-height:inherit} +a:hover,a:focus{color:#1d4b8f} +a img{border:none} +p{font-family:inherit;font-weight:400;font-size:1em;line-height:1.6;margin-bottom:1.25em;text-rendering:optimizeLegibility} +p aside{font-size:.875em;line-height:1.35;font-style:italic} +h1,h2,h3,#toctitle,.sidebarblock>.content>.title,h4,h5,h6{font-family:"Open Sans","DejaVu Sans",sans-serif;font-weight:300;font-style:normal;color:#ba3925;text-rendering:optimizeLegibility;margin-top:1em;margin-bottom:.5em;line-height:1.0125em} +h1 small,h2 small,h3 small,#toctitle small,.sidebarblock>.content>.title small,h4 small,h5 small,h6 small{font-size:60%;color:#e99b8f;line-height:0} +h1{font-size:2.125em} +h2{font-size:1.6875em} +h3,#toctitle,.sidebarblock>.content>.title{font-size:1.375em} +h4,h5{font-size:1.125em} +h6{font-size:1em} +hr{border:solid #ddddd8;border-width:1px 0 0;clear:both;margin:1.25em 0 1.1875em;height:0} +em,i{font-style:italic;line-height:inherit} +strong,b{font-weight:bold;line-height:inherit} +small{font-size:60%;line-height:inherit} +code{font-family:"Droid Sans Mono","DejaVu Sans Mono",monospace;font-weight:400;color:rgba(0,0,0,.9)} +ul,ol,dl{font-size:1em;line-height:1.6;margin-bottom:1.25em;list-style-position:outside;font-family:inherit} +ul,ol,ul.no-bullet,ol.no-bullet{margin-left:1.5em} +ul li ul,ul li ol{margin-left:1.25em;margin-bottom:0;font-size:1em} +ul.square li ul,ul.circle li ul,ul.disc li ul{list-style:inherit} +ul.square{list-style-type:square} +ul.circle{list-style-type:circle} +ul.disc{list-style-type:disc} +ul.no-bullet{list-style:none} +ol li ul,ol li ol{margin-left:1.25em;margin-bottom:0} +dl dt{margin-bottom:.3125em;font-weight:bold} +dl dd{margin-bottom:1.25em} +abbr,acronym{text-transform:uppercase;font-size:90%;color:rgba(0,0,0,.8);border-bottom:1px dotted #ddd;cursor:help} +abbr{text-transform:none} +blockquote{margin:0 0 1.25em;padding:.5625em 1.25em 0 1.1875em;border-left:1px solid #ddd} +blockquote cite{display:block;font-size:.9375em;color:rgba(0,0,0,.6)} +blockquote cite:before{content:"\2014 \0020"} +blockquote cite a,blockquote cite a:visited{color:rgba(0,0,0,.6)} +blockquote,blockquote p{line-height:1.6;color:rgba(0,0,0,.85)} +@media only screen and (min-width:768px){h1,h2,h3,#toctitle,.sidebarblock>.content>.title,h4,h5,h6{line-height:1.2} +h1{font-size:2.75em} +h2{font-size:2.3125em} +h3,#toctitle,.sidebarblock>.content>.title{font-size:1.6875em} +h4{font-size:1.4375em}}table{background:#fff;margin-bottom:1.25em;border:solid 1px #dedede} +table thead,table tfoot{background:#f7f8f7;font-weight:bold} +table thead tr th,table thead tr td,table tfoot tr th,table tfoot tr td{padding:.5em .625em .625em;font-size:inherit;color:rgba(0,0,0,.8);text-align:left} +table tr th,table tr td{padding:.5625em .625em;font-size:inherit;color:rgba(0,0,0,.8)} +table tr.even,table tr.alt,table tr:nth-of-type(even){background:#f8f8f7} +table thead tr th,table tfoot tr th,table tbody tr td,table tr td,table tfoot tr td{display:table-cell;line-height:1.6} +h1,h2,h3,#toctitle,.sidebarblock>.content>.title,h4,h5,h6{line-height:1.2;word-spacing:-.05em} +h1 strong,h2 strong,h3 strong,#toctitle strong,.sidebarblock>.content>.title strong,h4 strong,h5 strong,h6 strong{font-weight:400} +.clearfix:before,.clearfix:after,.float-group:before,.float-group:after{content:" ";display:table} +.clearfix:after,.float-group:after{clear:both} +*:not(pre)>code{font-size:.9375em;font-style:normal!important;letter-spacing:0;padding:.1em .5ex;word-spacing:-.15em;background-color:#f7f7f8;-webkit-border-radius:4px;border-radius:4px;line-height:1.45;text-rendering:optimizeSpeed} +pre,pre>code{line-height:1.45;color:rgba(0,0,0,.9);font-family:"Droid Sans Mono","DejaVu Sans Mono",monospace;font-weight:400;text-rendering:optimizeSpeed} +.keyseq{color:rgba(51,51,51,.8)} +kbd{display:inline-block;color:rgba(0,0,0,.8);font-size:.75em;line-height:1.4;background-color:#f7f7f7;border:1px solid #ccc;-webkit-border-radius:3px;border-radius:3px;-webkit-box-shadow:0 1px 0 rgba(0,0,0,.2),0 0 0 .1em white inset;box-shadow:0 1px 0 rgba(0,0,0,.2),0 0 0 .1em #fff inset;margin:-.15em .15em 0 .15em;padding:.2em .6em .2em .5em;vertical-align:middle;white-space:nowrap} +.keyseq kbd:first-child{margin-left:0} +.keyseq kbd:last-child{margin-right:0} +.menuseq,.menu{color:rgba(0,0,0,.8)} +b.button:before,b.button:after{position:relative;top:-1px;font-weight:400} +b.button:before{content:"[";padding:0 3px 0 2px} +b.button:after{content:"]";padding:0 2px 0 3px} +p a>code:hover{color:rgba(0,0,0,.9)} +#header,#content,#footnotes,#footer{width:100%;margin-left:auto;margin-right:auto;margin-top:0;margin-bottom:0;max-width:62.5em;*zoom:1;position:relative;padding-left:.9375em;padding-right:.9375em} +#header:before,#header:after,#content:before,#content:after,#footnotes:before,#footnotes:after,#footer:before,#footer:after{content:" ";display:table} +#header:after,#content:after,#footnotes:after,#footer:after{clear:both} +#content{margin-top:1.25em} +#content:before{content:none} +#header>h1:first-child{color:rgba(0,0,0,.85);margin-top:2.25rem;margin-bottom:0} +#header>h1:first-child+#toc{margin-top:8px;border-top:1px solid #ddddd8} +#header>h1:only-child,body.toc2 #header>h1:nth-last-child(2){border-bottom:1px solid #ddddd8;padding-bottom:8px} +#header .details{border-bottom:1px solid #ddddd8;line-height:1.45;padding-top:.25em;padding-bottom:.25em;padding-left:.25em;color:rgba(0,0,0,.6);display:-ms-flexbox;display:-webkit-flex;display:flex;-ms-flex-flow:row wrap;-webkit-flex-flow:row wrap;flex-flow:row wrap} +#header .details span:first-child{margin-left:-.125em} +#header .details span.email a{color:rgba(0,0,0,.85)} +#header .details br{display:none} +#header .details br+span:before{content:"\00a0\2013\00a0"} +#header .details br+span.author:before{content:"\00a0\22c5\00a0";color:rgba(0,0,0,.85)} +#header .details br+span#revremark:before{content:"\00a0|\00a0"} +#header #revnumber{text-transform:capitalize} +#header #revnumber:after{content:"\00a0"} +#content>h1:first-child:not([class]){color:rgba(0,0,0,.85);border-bottom:1px solid #ddddd8;padding-bottom:8px;margin-top:0;padding-top:1rem;margin-bottom:1.25rem} +#toc{border-bottom:1px solid #efefed;padding-bottom:.5em} +#toc>ul{margin-left:.125em} +#toc ul.sectlevel0>li>a{font-style:italic} +#toc ul.sectlevel0 ul.sectlevel1{margin:.5em 0} +#toc ul{font-family:"Open Sans","DejaVu Sans",sans-serif;list-style-type:none} +#toc a{text-decoration:none} +#toc a:active{text-decoration:underline} +#toctitle{color:#7a2518;font-size:1.2em} +@media only screen and (min-width:768px){#toctitle{font-size:1.375em} +body.toc2{padding-left:15em;padding-right:0} +#toc.toc2{margin-top:0!important;background-color:#f8f8f7;position:fixed;width:15em;left:0;top:0;border-right:1px solid #efefed;border-top-width:0!important;border-bottom-width:0!important;z-index:1000;padding:1.25em 1em;height:100%;overflow:auto} +#toc.toc2 #toctitle{margin-top:0;font-size:1.2em} +#toc.toc2>ul{font-size:.9em;margin-bottom:0} +#toc.toc2 ul ul{margin-left:0;padding-left:1em} +#toc.toc2 ul.sectlevel0 ul.sectlevel1{padding-left:0;margin-top:.5em;margin-bottom:.5em} +body.toc2.toc-right{padding-left:0;padding-right:15em} +body.toc2.toc-right #toc.toc2{border-right-width:0;border-left:1px solid #efefed;left:auto;right:0}}@media only screen and (min-width:1280px){body.toc2{padding-left:20em;padding-right:0} +#toc.toc2{width:20em} +#toc.toc2 #toctitle{font-size:1.375em} +#toc.toc2>ul{font-size:.95em} +#toc.toc2 ul ul{padding-left:1.25em} +body.toc2.toc-right{padding-left:0;padding-right:20em}}#content #toc{border-style:solid;border-width:1px;border-color:#e0e0dc;margin-bottom:1.25em;padding:1.25em;background:#f8f8f7;-webkit-border-radius:4px;border-radius:4px} +#content #toc>:first-child{margin-top:0} +#content #toc>:last-child{margin-bottom:0} +#footer{max-width:100%;background-color:rgba(0,0,0,.8);padding:1.25em} +#footer-text{color:rgba(255,255,255,.8);line-height:1.44} +.sect1{padding-bottom:.625em} +@media only screen and (min-width:768px){.sect1{padding-bottom:1.25em}}.sect1+.sect1{border-top:1px solid #efefed} +#content h1>a.anchor,h2>a.anchor,h3>a.anchor,#toctitle>a.anchor,.sidebarblock>.content>.title>a.anchor,h4>a.anchor,h5>a.anchor,h6>a.anchor{position:absolute;z-index:1001;width:1.5ex;margin-left:-1.5ex;display:block;text-decoration:none!important;visibility:hidden;text-align:center;font-weight:400} +#content h1>a.anchor:before,h2>a.anchor:before,h3>a.anchor:before,#toctitle>a.anchor:before,.sidebarblock>.content>.title>a.anchor:before,h4>a.anchor:before,h5>a.anchor:before,h6>a.anchor:before{content:"\00A7";font-size:.85em;display:block;padding-top:.1em} +#content h1:hover>a.anchor,#content h1>a.anchor:hover,h2:hover>a.anchor,h2>a.anchor:hover,h3:hover>a.anchor,#toctitle:hover>a.anchor,.sidebarblock>.content>.title:hover>a.anchor,h3>a.anchor:hover,#toctitle>a.anchor:hover,.sidebarblock>.content>.title>a.anchor:hover,h4:hover>a.anchor,h4>a.anchor:hover,h5:hover>a.anchor,h5>a.anchor:hover,h6:hover>a.anchor,h6>a.anchor:hover{visibility:visible} +#content h1>a.link,h2>a.link,h3>a.link,#toctitle>a.link,.sidebarblock>.content>.title>a.link,h4>a.link,h5>a.link,h6>a.link{color:#ba3925;text-decoration:none} +#content h1>a.link:hover,h2>a.link:hover,h3>a.link:hover,#toctitle>a.link:hover,.sidebarblock>.content>.title>a.link:hover,h4>a.link:hover,h5>a.link:hover,h6>a.link:hover{color:#a53221} +.audioblock,.imageblock,.literalblock,.listingblock,.stemblock,.videoblock{margin-bottom:1.25em} +.admonitionblock td.content>.title,.audioblock>.title,.exampleblock>.title,.imageblock>.title,.listingblock>.title,.literalblock>.title,.stemblock>.title,.openblock>.title,.paragraph>.title,.quoteblock>.title,table.tableblock>.title,.verseblock>.title,.videoblock>.title,.dlist>.title,.olist>.title,.ulist>.title,.qlist>.title,.hdlist>.title{text-rendering:optimizeLegibility;text-align:left;font-family:"Noto Serif","DejaVu Serif",serif;font-size:1rem;font-style:italic} +table.tableblock>caption.title{white-space:nowrap;overflow:visible;max-width:0} +.paragraph.lead>p,#preamble>.sectionbody>.paragraph:first-of-type p{color:rgba(0,0,0,.85)} +table.tableblock #preamble>.sectionbody>.paragraph:first-of-type p{font-size:inherit} +.admonitionblock>table{border-collapse:separate;border:0;background:none;width:100%} +.admonitionblock>table td.icon{text-align:center;width:80px} +.admonitionblock>table td.icon img{max-width:none} +.admonitionblock>table td.icon .title{font-weight:bold;font-family:"Open Sans","DejaVu Sans",sans-serif;text-transform:uppercase} +.admonitionblock>table td.content{padding-left:1.125em;padding-right:1.25em;border-left:1px solid #ddddd8;color:rgba(0,0,0,.6)} +.admonitionblock>table td.content>:last-child>:last-child{margin-bottom:0} +.exampleblock>.content{border-style:solid;border-width:1px;border-color:#e6e6e6;margin-bottom:1.25em;padding:1.25em;background:#fff;-webkit-border-radius:4px;border-radius:4px} +.exampleblock>.content>:first-child{margin-top:0} +.exampleblock>.content>:last-child{margin-bottom:0} +.sidebarblock{border-style:solid;border-width:1px;border-color:#e0e0dc;margin-bottom:1.25em;padding:1.25em;background:#f8f8f7;-webkit-border-radius:4px;border-radius:4px} +.sidebarblock>:first-child{margin-top:0} +.sidebarblock>:last-child{margin-bottom:0} +.sidebarblock>.content>.title{color:#7a2518;margin-top:0;text-align:center} +.exampleblock>.content>:last-child>:last-child,.exampleblock>.content .olist>ol>li:last-child>:last-child,.exampleblock>.content .ulist>ul>li:last-child>:last-child,.exampleblock>.content .qlist>ol>li:last-child>:last-child,.sidebarblock>.content>:last-child>:last-child,.sidebarblock>.content .olist>ol>li:last-child>:last-child,.sidebarblock>.content .ulist>ul>li:last-child>:last-child,.sidebarblock>.content .qlist>ol>li:last-child>:last-child{margin-bottom:0} +.literalblock pre,.listingblock pre:not(.highlight),.listingblock pre[class="highlight"],.listingblock pre[class^="highlight "],.listingblock pre.CodeRay,.listingblock pre.prettyprint{background:#f7f7f8} +.sidebarblock .literalblock pre,.sidebarblock .listingblock pre:not(.highlight),.sidebarblock .listingblock pre[class="highlight"],.sidebarblock .listingblock pre[class^="highlight "],.sidebarblock .listingblock pre.CodeRay,.sidebarblock .listingblock pre.prettyprint{background:#f2f1f1} +.literalblock pre,.literalblock pre[class],.listingblock pre,.listingblock pre[class]{-webkit-border-radius:4px;border-radius:4px;word-wrap:break-word;padding:1em;font-size:.8125em} +.literalblock pre.nowrap,.literalblock pre[class].nowrap,.listingblock pre.nowrap,.listingblock pre[class].nowrap{overflow-x:auto;white-space:pre;word-wrap:normal} +@media only screen and (min-width:768px){.literalblock pre,.literalblock pre[class],.listingblock pre,.listingblock pre[class]{font-size:.90625em}}@media only screen and (min-width:1280px){.literalblock pre,.literalblock pre[class],.listingblock pre,.listingblock pre[class]{font-size:1em}}.literalblock.output pre{color:#f7f7f8;background-color:rgba(0,0,0,.9)} +.listingblock pre.highlightjs{padding:0} +.listingblock pre.highlightjs>code{padding:1em;-webkit-border-radius:4px;border-radius:4px} +.listingblock pre.prettyprint{border-width:0} +.listingblock>.content{position:relative} +.listingblock code[data-lang]:before{display:none;content:attr(data-lang);position:absolute;font-size:.75em;top:.425rem;right:.5rem;line-height:1;text-transform:uppercase;color:#999} +.listingblock:hover code[data-lang]:before{display:block} +.listingblock.terminal pre .command:before{content:attr(data-prompt);padding-right:.5em;color:#999} +.listingblock.terminal pre .command:not([data-prompt]):before{content:"$"} +table.pyhltable{border-collapse:separate;border:0;margin-bottom:0;background:none} +table.pyhltable td{vertical-align:top;padding-top:0;padding-bottom:0} +table.pyhltable td.code{padding-left:.75em;padding-right:0} +pre.pygments .lineno,table.pyhltable td:not(.code){color:#999;padding-left:0;padding-right:.5em;border-right:1px solid #ddddd8} +pre.pygments .lineno{display:inline-block;margin-right:.25em} +table.pyhltable .linenodiv{background:none!important;padding-right:0!important} +.quoteblock{margin:0 1em 1.25em 1.5em;display:table} +.quoteblock>.title{margin-left:-1.5em;margin-bottom:.75em} +.quoteblock blockquote,.quoteblock blockquote p{color:rgba(0,0,0,.85);font-size:1.15rem;line-height:1.75;word-spacing:.1em;letter-spacing:0;font-style:italic;text-align:justify} +.quoteblock blockquote{margin:0;padding:0;border:0} +.quoteblock blockquote:before{content:"\201c";float:left;font-size:2.75em;font-weight:bold;line-height:.6em;margin-left:-.6em;color:#7a2518;text-shadow:0 1px 2px rgba(0,0,0,.1)} +.quoteblock blockquote>.paragraph:last-child p{margin-bottom:0} +.quoteblock .attribution{margin-top:.5em;margin-right:.5ex;text-align:right} +.quoteblock .quoteblock{margin-left:0;margin-right:0;padding:.5em 0;border-left:3px solid rgba(0,0,0,.6)} +.quoteblock .quoteblock blockquote{padding:0 0 0 .75em} +.quoteblock .quoteblock blockquote:before{display:none} +.verseblock{margin:0 1em 1.25em 1em} +.verseblock pre{font-family:"Open Sans","DejaVu Sans",sans;font-size:1.15rem;color:rgba(0,0,0,.85);font-weight:300;text-rendering:optimizeLegibility} +.verseblock pre strong{font-weight:400} +.verseblock .attribution{margin-top:1.25rem;margin-left:.5ex} +.quoteblock .attribution,.verseblock .attribution{font-size:.9375em;line-height:1.45;font-style:italic} +.quoteblock .attribution br,.verseblock .attribution br{display:none} +.quoteblock .attribution cite,.verseblock .attribution cite{display:block;letter-spacing:-.05em;color:rgba(0,0,0,.6)} +.quoteblock.abstract{margin:0 0 1.25em 0;display:block} +.quoteblock.abstract blockquote,.quoteblock.abstract blockquote p{text-align:left;word-spacing:0} +.quoteblock.abstract blockquote:before,.quoteblock.abstract blockquote p:first-of-type:before{display:none} +table.tableblock{max-width:100%;border-collapse:separate} +table.tableblock td>.paragraph:last-child p>p:last-child,table.tableblock th>p:last-child,table.tableblock td>p:last-child{margin-bottom:0} +table.spread{width:100%} +table.tableblock,th.tableblock,td.tableblock{border:0 solid #dedede} +table.grid-all th.tableblock,table.grid-all td.tableblock{border-width:0 1px 1px 0} +table.grid-all tfoot>tr>th.tableblock,table.grid-all tfoot>tr>td.tableblock{border-width:1px 1px 0 0} +table.grid-cols th.tableblock,table.grid-cols td.tableblock{border-width:0 1px 0 0} +table.grid-all *>tr>.tableblock:last-child,table.grid-cols *>tr>.tableblock:last-child{border-right-width:0} +table.grid-rows th.tableblock,table.grid-rows td.tableblock{border-width:0 0 1px 0} +table.grid-all tbody>tr:last-child>th.tableblock,table.grid-all tbody>tr:last-child>td.tableblock,table.grid-all thead:last-child>tr>th.tableblock,table.grid-rows tbody>tr:last-child>th.tableblock,table.grid-rows tbody>tr:last-child>td.tableblock,table.grid-rows thead:last-child>tr>th.tableblock{border-bottom-width:0} +table.grid-rows tfoot>tr>th.tableblock,table.grid-rows tfoot>tr>td.tableblock{border-width:1px 0 0 0} +table.frame-all{border-width:1px} +table.frame-sides{border-width:0 1px} +table.frame-topbot{border-width:1px 0} +th.halign-left,td.halign-left{text-align:left} +th.halign-right,td.halign-right{text-align:right} +th.halign-center,td.halign-center{text-align:center} +th.valign-top,td.valign-top{vertical-align:top} +th.valign-bottom,td.valign-bottom{vertical-align:bottom} +th.valign-middle,td.valign-middle{vertical-align:middle} +table thead th,table tfoot th{font-weight:bold} +tbody tr th{display:table-cell;line-height:1.6;background:#f7f8f7} +tbody tr th,tbody tr th p,tfoot tr th,tfoot tr th p{color:rgba(0,0,0,.8);font-weight:bold} +p.tableblock>code:only-child{background:none;padding:0} +p.tableblock{font-size:1em} +td>div.verse{white-space:pre} +ol{margin-left:1.75em} +ul li ol{margin-left:1.5em} +dl dd{margin-left:1.125em} +dl dd:last-child,dl dd:last-child>:last-child{margin-bottom:0} +ol>li p,ul>li p,ul dd,ol dd,.olist .olist,.ulist .ulist,.ulist .olist,.olist .ulist{margin-bottom:.625em} +ul.unstyled,ol.unnumbered,ul.checklist,ul.none{list-style-type:none} +ul.unstyled,ol.unnumbered,ul.checklist{margin-left:.625em} +ul.checklist li>p:first-child>.fa-square-o:first-child,ul.checklist li>p:first-child>.fa-check-square-o:first-child{width:1em;font-size:.85em} +ul.checklist li>p:first-child>input[type="checkbox"]:first-child{width:1em;position:relative;top:1px} +ul.inline{margin:0 auto .625em auto;margin-left:-1.375em;margin-right:0;padding:0;list-style:none;overflow:hidden} +ul.inline>li{list-style:none;float:left;margin-left:1.375em;display:block} +ul.inline>li>*{display:block} +.unstyled dl dt{font-weight:400;font-style:normal} +ol.arabic{list-style-type:decimal} +ol.decimal{list-style-type:decimal-leading-zero} +ol.loweralpha{list-style-type:lower-alpha} +ol.upperalpha{list-style-type:upper-alpha} +ol.lowerroman{list-style-type:lower-roman} +ol.upperroman{list-style-type:upper-roman} +ol.lowergreek{list-style-type:lower-greek} +.hdlist>table,.colist>table{border:0;background:none} +.hdlist>table>tbody>tr,.colist>table>tbody>tr{background:none} +td.hdlist1{padding-right:.75em;font-weight:bold} +td.hdlist1,td.hdlist2{vertical-align:top} +.literalblock+.colist,.listingblock+.colist{margin-top:-.5em} +.colist>table tr>td:first-of-type{padding:0 .75em;line-height:1} +.colist>table tr>td:last-of-type{padding:.25em 0} +.thumb,.th{line-height:0;display:inline-block;border:solid 4px #fff;-webkit-box-shadow:0 0 0 1px #ddd;box-shadow:0 0 0 1px #ddd} +.imageblock.left,.imageblock[style*="float: left"]{margin:.25em .625em 1.25em 0} +.imageblock.right,.imageblock[style*="float: right"]{margin:.25em 0 1.25em .625em} +.imageblock>.title{margin-bottom:0} +.imageblock.thumb,.imageblock.th{border-width:6px} +.imageblock.thumb>.title,.imageblock.th>.title{padding:0 .125em} +.image.left,.image.right{margin-top:.25em;margin-bottom:.25em;display:inline-block;line-height:0} +.image.left{margin-right:.625em} +.image.right{margin-left:.625em} +a.image{text-decoration:none} +span.footnote,span.footnoteref{vertical-align:super;font-size:.875em} +span.footnote a,span.footnoteref a{text-decoration:none} +span.footnote a:active,span.footnoteref a:active{text-decoration:underline} +#footnotes{padding-top:.75em;padding-bottom:.75em;margin-bottom:.625em} +#footnotes hr{width:20%;min-width:6.25em;margin:-.25em 0 .75em 0;border-width:1px 0 0 0} +#footnotes .footnote{padding:0 .375em;line-height:1.3;font-size:.875em;margin-left:1.2em;text-indent:-1.2em;margin-bottom:.2em} +#footnotes .footnote a:first-of-type{font-weight:bold;text-decoration:none} +#footnotes .footnote:last-of-type{margin-bottom:0} +#content #footnotes{margin-top:-.625em;margin-bottom:0;padding:.75em 0} +.gist .file-data>table{border:0;background:#fff;width:100%;margin-bottom:0} +.gist .file-data>table td.line-data{width:99%} +div.unbreakable{page-break-inside:avoid} +.big{font-size:larger} +.small{font-size:smaller} +.underline{text-decoration:underline} +.overline{text-decoration:overline} +.line-through{text-decoration:line-through} +.aqua{color:#00bfbf} +.aqua-background{background-color:#00fafa} +.black{color:#000} +.black-background{background-color:#000} +.blue{color:#0000bf} +.blue-background{background-color:#0000fa} +.fuchsia{color:#bf00bf} +.fuchsia-background{background-color:#fa00fa} +.gray{color:#606060} +.gray-background{background-color:#7d7d7d} +.green{color:#006000} +.green-background{background-color:#007d00} +.lime{color:#00bf00} +.lime-background{background-color:#00fa00} +.maroon{color:#600000} +.maroon-background{background-color:#7d0000} +.navy{color:#000060} +.navy-background{background-color:#00007d} +.olive{color:#606000} +.olive-background{background-color:#7d7d00} +.purple{color:#600060} +.purple-background{background-color:#7d007d} +.red{color:#bf0000} +.red-background{background-color:#fa0000} +.silver{color:#909090} +.silver-background{background-color:#bcbcbc} +.teal{color:#006060} +.teal-background{background-color:#007d7d} +.white{color:#bfbfbf} +.white-background{background-color:#fafafa} +.yellow{color:#bfbf00} +.yellow-background{background-color:#fafa00} +span.icon>.fa{cursor:default} +.admonitionblock td.icon [class^="fa icon-"]{font-size:2.5em;text-shadow:1px 1px 2px rgba(0,0,0,.5);cursor:default} +.admonitionblock td.icon .icon-note:before{content:"\f05a";color:#19407c} +.admonitionblock td.icon .icon-tip:before{content:"\f0eb";text-shadow:1px 1px 2px rgba(155,155,0,.8);color:#111} +.admonitionblock td.icon .icon-warning:before{content:"\f071";color:#bf6900} +.admonitionblock td.icon .icon-caution:before{content:"\f06d";color:#bf3400} +.admonitionblock td.icon .icon-important:before{content:"\f06a";color:#bf0000} +.conum[data-value]{display:inline-block;color:#fff!important;background-color:rgba(0,0,0,.8);-webkit-border-radius:100px;border-radius:100px;text-align:center;font-size:.75em;width:1.67em;height:1.67em;line-height:1.67em;font-family:"Open Sans","DejaVu Sans",sans-serif;font-style:normal;font-weight:bold} +.conum[data-value] *{color:#fff!important} +.conum[data-value]+b{display:none} +.conum[data-value]:after{content:attr(data-value)} +pre .conum[data-value]{position:relative;top:-.125em} +b.conum *{color:inherit!important} +.conum:not([data-value]):empty{display:none} +h1,h2{letter-spacing:-.01em} +dt,th.tableblock,td.content{text-rendering:optimizeLegibility} +p,td.content{letter-spacing:-.01em} +p strong,td.content strong{letter-spacing:-.005em} +p,blockquote,dt,td.content{font-size:1.0625rem} +p{margin-bottom:1.25rem} +.sidebarblock p,.sidebarblock dt,.sidebarblock td.content,p.tableblock{font-size:1em} +.exampleblock>.content{background-color:#fffef7;border-color:#e0e0dc;-webkit-box-shadow:0 1px 4px #e0e0dc;box-shadow:0 1px 4px #e0e0dc} +.print-only{display:none!important} +@media print{@page{margin:1.25cm .75cm} +*{-webkit-box-shadow:none!important;box-shadow:none!important;text-shadow:none!important} +a{color:inherit!important;text-decoration:underline!important} +a.bare,a[href^="#"],a[href^="mailto:"]{text-decoration:none!important} +a[href^="http:"]:not(.bare):after,a[href^="https:"]:not(.bare):after{content:"(" attr(href) ")";display:inline-block;font-size:.875em;padding-left:.25em} +abbr[title]:after{content:" (" attr(title) ")"} +pre,blockquote,tr,img{page-break-inside:avoid} +thead{display:table-header-group} +img{max-width:100%!important} +p,blockquote,dt,td.content{font-size:1em;orphans:3;widows:3} +h2,h3,#toctitle,.sidebarblock>.content>.title{page-break-after:avoid} +#toc,.sidebarblock,.exampleblock>.content{background:none!important} +#toc{border-bottom:1px solid #ddddd8!important;padding-bottom:0!important} +.sect1{padding-bottom:0!important} +.sect1+.sect1{border:0!important} +#header>h1:first-child{margin-top:1.25rem} +body.book #header{text-align:center} +body.book #header>h1:first-child{border:0!important;margin:2.5em 0 1em 0} +body.book #header .details{border:0!important;display:block;padding:0!important} +body.book #header .details span:first-child{margin-left:0!important} +body.book #header .details br{display:block} +body.book #header .details br+span:before{content:none!important} +body.book #toc{border:0!important;text-align:left!important;padding:0!important;margin:0!important} +body.book #toc,body.book #preamble,body.book h1.sect0,body.book .sect1>h2{page-break-before:always} +.listingblock code[data-lang]:before{display:block} +#footer{background:none!important;padding:0 .9375em} +#footer-text{color:rgba(0,0,0,.6)!important;font-size:.9em} +.hide-on-print{display:none!important} +.print-only{display:block!important} +.hide-for-print{display:none!important} +.show-for-print{display:inherit!important}} \ No newline at end of file diff --git a/src/main/asciidoc/book.adoc b/src/main/asciidoc/book.adoc index 0ea9a6a04b5..d030c38f234 100644 --- a/src/main/asciidoc/book.adoc +++ b/src/main/asciidoc/book.adoc @@ -19,26 +19,36 @@ */ //// -= Apache HBase (TM) Reference Guide image:hbase_logo.png[] image:jumping-orca_rotated_25percent.png[] += Apache HBase (TM) Reference Guide :Author: Apache HBase Team :Email: :doctype: book +:Version: {docVersion} +:revnumber: {docVersion} +// Logo for PDF -- doesn't render in HTML +:title-logo: hbase_logo_with_orca.png :numbered: :toc: left :toclevels: 1 :toc-title: Contents +:sectanchors: :icons: font :iconsdir: icons :linkcss: :experimental: :source-language: java - - :leveloffset: 0 +// Logo for HTML -- doesn't render in PDF +++++ +

      + Apache HBase Logo +
      +++++ + // The directory is called _chapters because asciidoctor skips direct // processing of files found in directories starting with an _. This -// prevents each chapter being built as its own book.j +// prevents each chapter being built as its own book. include::_chapters/preface.adoc[] diff --git a/src/main/asciidoc/images b/src/main/asciidoc/images new file mode 120000 index 00000000000..06d04d0edbb --- /dev/null +++ b/src/main/asciidoc/images @@ -0,0 +1 @@ +../site/resources/images \ No newline at end of file diff --git a/src/main/site/resources/images/hbase_logo_with_orca.png b/src/main/site/resources/images/hbase_logo_with_orca.png new file mode 100644 index 00000000000..7ed60e227c6 Binary files /dev/null and b/src/main/site/resources/images/hbase_logo_with_orca.png differ diff --git a/src/main/site/resources/images/hbase_logo_with_orca.xcf b/src/main/site/resources/images/hbase_logo_with_orca.xcf new file mode 100644 index 00000000000..8d88da2ac9f Binary files /dev/null and b/src/main/site/resources/images/hbase_logo_with_orca.xcf differ diff --git a/src/main/site/resources/images/jumping-orca_transparent_rotated.xcf b/src/main/site/resources/images/jumping-orca_transparent_rotated.xcf new file mode 100644 index 00000000000..be9e3d91140 Binary files /dev/null and b/src/main/site/resources/images/jumping-orca_transparent_rotated.xcf differ diff --git a/src/main/site/resources/images/region_split_process.png b/src/main/site/resources/images/region_split_process.png new file mode 100644 index 00000000000..27176173c85 Binary files /dev/null and b/src/main/site/resources/images/region_split_process.png differ diff --git a/src/main/site/site.xml b/src/main/site/site.xml index 81c931571cc..b7debd35c50 100644 --- a/src/main/site/site.xml +++ b/src/main/site/site.xml @@ -64,29 +64,30 @@ - - - + + + + - - - - - - - - - + + + + + + + + + - - - + + + - - - + + + diff --git a/src/main/site/xdoc/index.xml b/src/main/site/xdoc/index.xml index a40ab4b4670..d7e1e4edfd5 100644 --- a/src/main/site/xdoc/index.xml +++ b/src/main/site/xdoc/index.xml @@ -17,17 +17,21 @@ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/XDOC/2.0 http://maven.apache.org/xsd/xdoc-2.0.xsd"> - Apache HBase™ Home + Apache HBase™ Home -
      -

      Apache HBase™ is the Hadoop database, a distributed, scalable, big data store. +

      +

      Apache HBase™ is the Hadoop database, a distributed, scalable, big data store. +

      +

      Download Apache HBase™

      +

      + Click here to download Apache HBase™.

      When Would I Use Apache HBase?

      - Use Apache HBase when you need random, realtime read/write access to your Big Data. + Use Apache HBase™ when you need random, realtime read/write access to your Big Data. This project's goal is the hosting of very large tables -- billions of rows X millions of columns -- atop clusters of commodity hardware. Apache HBase is an open-source, distributed, versioned, non-relational database modeled after Google's Bigtable: A Distributed Storage System for Structured Data by Chang et al. Just as Bigtable leverages the distributed data storage provided by the Google File System, Apache HBase provides Bigtable-like capabilities on top of Hadoop and HDFS.