2013-05-03 16:30:44 -04:00
|
|
|
#!/usr/bin/env bash
|
|
|
|
#
|
|
|
|
#/**
|
|
|
|
# * Licensed to the Apache Software Foundation (ASF) under one
|
|
|
|
# * or more contributor license agreements. See the NOTICE file
|
|
|
|
# * distributed with this work for additional information
|
|
|
|
# * regarding copyright ownership. The ASF licenses this file
|
|
|
|
# * to you under the Apache License, Version 2.0 (the
|
|
|
|
# * "License"); you may not use this file except in compliance
|
|
|
|
# * with the License. You may obtain a copy of the License at
|
|
|
|
# *
|
|
|
|
# * http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
# *
|
|
|
|
# * Unless required by applicable law or agreed to in writing, software
|
|
|
|
# * distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# * See the License for the specific language governing permissions and
|
|
|
|
# * limitations under the License.
|
|
|
|
# */
|
|
|
|
#
|
|
|
|
# cleans hbase related data from zookeeper and hdfs if no hbase process is alive.
|
|
|
|
#
|
|
|
|
# Environment Variables
|
|
|
|
#
|
|
|
|
# HBASE_REGIONSERVERS File naming remote hosts.
|
|
|
|
# Default is ${HADOOP_CONF_DIR}/regionservers
|
|
|
|
# HADOOP_CONF_DIR Alternate conf dir. Default is ${HADOOP_HOME}/conf.
|
|
|
|
# HBASE_CONF_DIR Alternate hbase conf dir. Default is ${HBASE_HOME}/conf.
|
2014-03-12 18:16:28 -04:00
|
|
|
# HBASE_SLAVE_SLEEP Seconds to sleep between spawning remote commands.
|
|
|
|
# HBASE_SLAVE_TIMEOUT Seconds to wait for timing out a remote command.
|
|
|
|
# HBASE_SSH_OPTS Options passed to ssh when running remote commands.
|
2013-05-03 16:30:44 -04:00
|
|
|
#
|
|
|
|
|
2015-03-12 20:06:27 -04:00
|
|
|
usage="Usage: hbase-cleanup.sh (--cleanZk|--cleanHdfs|--cleanAll|--cleanAcls)"
|
2013-05-03 16:30:44 -04:00
|
|
|
|
|
|
|
bin=`dirname "$0"`
|
|
|
|
bin=`cd "$bin">/dev/null; pwd`
|
|
|
|
|
|
|
|
# This will set HBASE_HOME, etc.
|
|
|
|
. "$bin"/hbase-config.sh
|
|
|
|
|
|
|
|
case $1 in
|
2015-03-12 20:06:27 -04:00
|
|
|
--cleanZk|--cleanHdfs|--cleanAll|--cleanAcls)
|
2013-05-03 16:30:44 -04:00
|
|
|
matches="yes" ;;
|
|
|
|
*) ;;
|
|
|
|
esac
|
|
|
|
if [ $# -ne 1 -o "$matches" = "" ]; then
|
|
|
|
echo $usage
|
|
|
|
exit 1;
|
|
|
|
fi
|
|
|
|
|
|
|
|
format_option=$1;
|
|
|
|
|
|
|
|
zparent=`$bin/hbase org.apache.hadoop.hbase.util.HBaseConfTool zookeeper.znode.parent`
|
|
|
|
if [ "$zparent" == "null" ]; then zparent="/hbase"; fi
|
|
|
|
|
|
|
|
hrootdir=`$bin/hbase org.apache.hadoop.hbase.util.HBaseConfTool hbase.rootdir`
|
|
|
|
if [ "$hrootdir" == "null" ]; then hrootdir="file:///tmp/hbase-${USER}/hbase"; fi
|
|
|
|
|
|
|
|
check_for_znodes() {
|
|
|
|
command=$1;
|
|
|
|
case $command in
|
|
|
|
regionservers)
|
|
|
|
zchild=`$bin/hbase org.apache.hadoop.hbase.util.HBaseConfTool zookeeper.znode.rs`
|
|
|
|
if [ "$zchild" == "null" ]; then zchild="rs"; fi
|
|
|
|
;;
|
|
|
|
backupmasters)
|
|
|
|
zchild=`$bin/hbase org.apache.hadoop.hbase.util.HBaseConfTool zookeeper.znode.backup.masters`
|
|
|
|
if [ "$zchild" == "null" ]; then zchild="backup-masters"; fi
|
|
|
|
;;
|
|
|
|
esac
|
|
|
|
znodes=`"$bin"/hbase zkcli ls $zparent/$zchild 2>&1 | tail -1 | sed "s/\[//" | sed "s/\]//"`
|
|
|
|
if [ "$znodes" != "" ]; then
|
|
|
|
echo -n "ZNode(s) [${znodes}] of $command are not expired. Exiting without cleaning hbase data."
|
|
|
|
echo #force a newline
|
|
|
|
exit 1;
|
|
|
|
else
|
|
|
|
echo -n "All ZNode(s) of $command are expired."
|
|
|
|
fi
|
|
|
|
echo #force a newline
|
|
|
|
}
|
|
|
|
|
|
|
|
execute_zk_command() {
|
|
|
|
command=$1;
|
|
|
|
"$bin"/hbase zkcli $command 2>&1
|
|
|
|
}
|
|
|
|
|
|
|
|
execute_hdfs_command() {
|
|
|
|
command=$1;
|
|
|
|
"$bin"/hbase org.apache.hadoop.fs.FsShell $command 2>&1
|
|
|
|
}
|
|
|
|
|
2015-03-12 20:06:27 -04:00
|
|
|
execute_clean_acls() {
|
|
|
|
command=$1;
|
|
|
|
"$bin"/hbase org.apache.hadoop.hbase.zookeeper.ZkAclReset $command 2>&1
|
|
|
|
}
|
|
|
|
|
2013-05-03 16:30:44 -04:00
|
|
|
clean_up() {
|
|
|
|
case $1 in
|
|
|
|
--cleanZk)
|
2021-05-31 01:31:45 -04:00
|
|
|
execute_zk_command "deleteall ${zparent}";
|
2013-05-03 16:30:44 -04:00
|
|
|
;;
|
|
|
|
--cleanHdfs)
|
2017-10-31 15:58:36 -04:00
|
|
|
execute_hdfs_command "-rm -R ${hrootdir}"
|
2013-05-03 16:30:44 -04:00
|
|
|
;;
|
|
|
|
--cleanAll)
|
2021-05-31 01:31:45 -04:00
|
|
|
execute_zk_command "deleteall ${zparent}";
|
2017-10-31 15:58:36 -04:00
|
|
|
execute_hdfs_command "-rm -R ${hrootdir}"
|
2013-05-03 16:30:44 -04:00
|
|
|
;;
|
2015-03-12 20:06:27 -04:00
|
|
|
--cleanAcls)
|
|
|
|
execute_clean_acls;
|
|
|
|
;;
|
2013-05-03 16:30:44 -04:00
|
|
|
*)
|
|
|
|
;;
|
|
|
|
esac
|
|
|
|
}
|
|
|
|
|
|
|
|
check_znode_exists() {
|
|
|
|
command=$1
|
|
|
|
"$bin"/hbase zkcli stat $command 2>&1 | grep "Node does not exist\|Connection refused"
|
|
|
|
}
|
|
|
|
|
|
|
|
check_znode_exists $zparent
|
|
|
|
if [ $? -ne 0 ]; then
|
|
|
|
# make sure the online region server(s) znode(s) have been deleted before continuing
|
|
|
|
check_for_znodes regionservers
|
|
|
|
# make sure the backup master(s) znode(s) has been deleted before continuing
|
|
|
|
check_for_znodes backupmasters
|
|
|
|
# make sure the master znode has been deleted before continuing
|
|
|
|
zmaster=`$bin/hbase org.apache.hadoop.hbase.util.HBaseConfTool zookeeper.znode.master`
|
|
|
|
if [ "$zmaster" == "null" ]; then zmaster="master"; fi
|
|
|
|
zmaster=$zparent/$zmaster
|
|
|
|
check_znode_exists $zmaster
|
|
|
|
if [ $? -ne 0 ]; then
|
|
|
|
echo -n "Master ZNode is not expired. Exiting without cleaning hbase data."
|
|
|
|
echo #force a new line
|
|
|
|
exit 1
|
|
|
|
else
|
|
|
|
echo "Active Master ZNode also expired."
|
|
|
|
fi
|
|
|
|
echo #force a newline
|
|
|
|
else
|
|
|
|
echo "HBase parent znode ${zparent} does not exist."
|
|
|
|
fi
|
|
|
|
|
|
|
|
# cleans zookeeper and/or hdfs data.
|
|
|
|
clean_up $format_option
|