HADOOP-403 Fix build after move of hbase in svn

Removed mention of all of the HADOOP_* environment variables. Made
HBASE_* equivs.  hbase-env.sh is not near dup of hadoop-env.sh.
Removed options on scripts that used take both hbase and hadoop
conf and home. Now we just work w/ hbase.  Fixed logging (logs
dir now under hbase).


git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@618765 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2008-02-05 20:09:24 +00:00
parent 6aacbfb18d
commit 24b065cc91
8 changed files with 65 additions and 100 deletions

View File

@ -38,15 +38,10 @@
#
# HBASE_ROOT_LOGGER The root appender. Default is INFO,console
#
# HADOOP_CONF_DIR Alternate conf dir. Default is ${HADOOP_HOME}/conf.
#
# HADOOP_HOME Hadoop home directory.
#
bin=`dirname "$0"`
bin=`cd "$bin"; pwd`
# This will set HBASE_HOME, HADOOP_HOME, etc.
# This will set HBASE_HOME, etc.
. "$bin"/hbase-config.sh
cygwin=false
@ -56,7 +51,7 @@ esac
# if no args specified, show usage
if [ $# = 0 ]; then
echo "Usage: hbase [--hadoop=hadoopdir] <command>"
echo "Usage: hbase <command>"
echo "where <command> is one of:"
echo " shell run the Hbase shell"
echo " master run an Hbase HMaster node"
@ -101,10 +96,6 @@ if [ "$HBASE_HEAPSIZE" != "" ]; then
fi
# CLASSPATH initially contains $HBASE_CONF_DIR
# Add HADOOP_CONF_DIR if its been defined.
if [ ! "$HADOOP_CONF_DIR" = "" ]; then
CLASSPATH="${CLASSPATH}:${HADOOP_CONF_DIR}"
fi
CLASSPATH="${CLASSPATH}:${HBASE_CONF_DIR}"
CLASSPATH=${CLASSPATH}:$JAVA_HOME/lib/tools.jar
@ -143,7 +134,6 @@ done
# default log directory & file
# TODO: Should we log to hadoop or under hbase?
if [ "$HBASE_LOG_DIR" = "" ]; then
HBASE_LOG_DIR="$HBASE_HOME/logs"
fi
@ -154,9 +144,8 @@ fi
# cygwin path translation
if $cygwin; then
CLASSPATH=`cygpath -p -w "$CLASSPATH"`
HADOOP_HOME=`cygpath -d "$HADOOP_HOME"`
HBASE_HOME=`cygpath -d "$HBASE_HOME"`
HADOOP_LOG_DIR=`cygpath -d "$HADOOP_LOG_DIR"`
HBASE_LOG_DIR=`cygpath -d "$HBASE_LOG_DIR"`
fi
# cygwin path translation
@ -189,10 +178,10 @@ fi
# 'cost' to have this flag enabled. Its a 1.6 flag only. See:
# http://blogs.sun.com/alanb/entry/outofmemoryerror_looks_a_bit_better
HBASE_OPTS="$HBASE_OPTS -XX:+HeapDumpOnOutOfMemoryError"
HBASE_OPTS="$HBASE_OPTS -Dhadoop.log.dir=$HADOOP_LOG_DIR"
HBASE_OPTS="$HBASE_OPTS -Dhadoop.log.file=$HADOOP_LOGFILE"
HBASE_OPTS="$HBASE_OPTS -Dhadoop.home.dir=$HADOOP_HOME"
HBASE_OPTS="$HBASE_OPTS -Dhadoop.id.str=$HADOOP_IDENT_STRING"
HBASE_OPTS="$HBASE_OPTS -Dhbase.log.dir=$HBASE_LOG_DIR"
HBASE_OPTS="$HBASE_OPTS -Dhbase.log.file=$HBASE_LOGFILE"
HBASE_OPTS="$HBASE_OPTS -Dhbase.home.dir=$HBASE_HOME"
HBASE_OPTS="$HBASE_OPTS -Dhbase.id.str=$HBASE_IDENT_STRING"
HBASE_OPTS="$HBASE_OPTS -Dhbase.root.logger=${HBASE_ROOT_LOGGER:-INFO,console}"
HBASE_OPTS="$HBASE_OPTS -Dhbase.home.dir=$HBASE_HOME"
if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then

View File

@ -46,27 +46,15 @@ this="$bin/$script"
# the root of the hbase installation
export HBASE_HOME=`dirname "$this"`/..
#check to see if the conf dir or hadoop home are given as an optional arguments
#check to see if the conf dir or hbase home are given as an optional arguments
while [ $# -gt 1 ]
do
if [ "--config" = "$1" ]
then
shift
confdir=$1
shift
HADOOP_CONF_DIR=$confdir
elif [ "--hbaseconfig" = "$1" ]
then
shift
confdir=$1
shift
HBASE_CONF_DIR=$confdir
elif [ "--hadoop" = "$1" ]
then
shift
home=$1
shift
HADOOP_HOME=$home
elif [ "--hosts" = "$1" ]
then
shift
@ -79,21 +67,6 @@ do
fi
done
# If no hadoop home specified, then we assume its above this directory.
# Can be in one of two places. If we've been packaged, then it'll be
# two levels above us. If we are running from src at src/contrib/hbase
# or from the build directory at build/contrib/hbase, then its three
# levels up. Look for the hadoop script.
if [ "$HADOOP_HOME" = "" ]; then
if [ -f "$HBASE_HOME/../../bin/hadoop" ]; then
HADOOP_HOME="$HBASE_HOME/../../"
else
HADOOP_HOME="$HBASE_HOME/../../../"
fi
fi
# Allow alternate hadoop conf dir location.
HADOOP_CONF_DIR="${HADOOP_CONF_DIR:-$HADOOP_HOME/conf}"
# Allow alternate hbase conf dir location.
HBASE_CONF_DIR="${HBASE_CONF_DIR:-$HBASE_HOME/conf}"
# List of hbase regions servers.

View File

@ -24,17 +24,16 @@
#
# Environment Variables
#
# HADOOP_CONF_DIR Alternate conf dir. Default is ${HADOOP_HOME}/conf.
# HBASE_CONF_DIR Alternate hbase conf dir. Default is ${HBASE_HOME}/conf.
# HADOOP_LOG_DIR Where log files are stored. PWD by default.
# HADOOP_PID_DIR The pid files are stored. /tmp by default.
# HADOOP_IDENT_STRING A string representing this instance of hadoop. $USER by default
# HADOOP_NICENESS The scheduling priority for daemons. Defaults to 0.
# HBASE_CONF_DIR Alternate hbase conf dir. Default is ${HBASE_HOME}/conf.
# HBASE_LOG_DIR Where log files are stored. PWD by default.
# HBASE_PID_DIR The pid files are stored. /tmp by default.
# HBASE_IDENT_STRING A string representing this instance of hadoop. $USER by default
# HBASE_NICENESS The scheduling priority for daemons. Defaults to 0.
#
# Modelled after $HADOOP_HOME/bin/hadoop-daemon.sh
usage="Usage: hbase-daemon.sh [--config <hadoop-conf-dir>]\
[--hbaseconfig <hbase-conf-dir>] (start|stop) <hbase-command> \
usage="Usage: hbase-daemon.sh [--config <conf-dir>]\
(start|stop) <hbase-command> \
<args...>"
# if no args specified, show usage
@ -72,36 +71,33 @@ hbase_rotate_log ()
fi
}
if [ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]; then
. "${HADOOP_CONF_DIR}/hadoop-env.sh"
fi
if [ -f "${HBASE_CONF_DIR}/hbase-env.sh" ]; then
. "${HBASE_CONF_DIR}/hbase-env.sh"
fi
# get log directory
if [ "$HADOOP_LOG_DIR" = "" ]; then
export HADOOP_LOG_DIR="$HADOOP_HOME/logs"
if [ "$HBASE_LOG_DIR" = "" ]; then
export HBASE_LOG_DIR="$HBASE_HOME/logs"
fi
mkdir -p "$HADOOP_LOG_DIR"
mkdir -p "$HBASE_LOG_DIR"
if [ "$HADOOP_PID_DIR" = "" ]; then
HADOOP_PID_DIR=/tmp
if [ "$HBASE_PID_DIR" = "" ]; then
HBASE_PID_DIR=/tmp
fi
if [ "$HADOOP_IDENT_STRING" = "" ]; then
export HADOOP_IDENT_STRING="$USER"
if [ "$HBASE_IDENT_STRING" = "" ]; then
export HBASE_IDENT_STRING="$USER"
fi
# some variables
export HADOOP_LOGFILE=hbase-$HADOOP_IDENT_STRING-$command-$HOSTNAME.log
export HADOOP_ROOT_LOGGER="INFO,DRFA"
log=$HADOOP_LOG_DIR/hbase-$HADOOP_IDENT_STRING-$command-$HOSTNAME.out
pid=$HADOOP_PID_DIR/hbase-$HADOOP_IDENT_STRING-$command.pid
export HBASE_LOGFILE=hbase-$HBASE_IDENT_STRING-$command-$HOSTNAME.log
export HBASE_ROOT_LOGGER="INFO,DRFA"
log=$HBASE_LOG_DIR/hbase-$HBASE_IDENT_STRING-$command-$HOSTNAME.out
pid=$HBASE_PID_DIR/hbase-$HBASE_IDENT_STRING-$command.pid
# Set default scheduling priority
if [ "$HADOOP_NICENESS" = "" ]; then
export HADOOP_NICENESS=0
if [ "$HBASE_NICENESS" = "" ]; then
export HBASE_NICENESS=0
fi
case $startStop in
@ -116,9 +112,8 @@ case $startStop in
hbase_rotate_log $log
echo starting $command, logging to $log
nohup nice -n $HADOOP_NICENESS "$HBASE_HOME"/bin/hbase \
--hadoop "${HADOOP_HOME}" \
--config "${HADOOP_CONF_DIR}" --hbaseconfig "${HBASE_CONF_DIR}" \
nohup nice -n $HBASE_NICENESS "$HBASE_HOME"/bin/hbase \
--config "${HBASE_CONF_DIR}" \
$command $startStop "$@" > "$log" 2>&1 < /dev/null &
echo $! > $pid
sleep 1; head "$log"
@ -129,9 +124,8 @@ case $startStop in
if kill -0 `cat $pid` > /dev/null 2>&1; then
echo -n stopping $command
if [ "$command" = "master" ]; then
nohup nice -n $HADOOP_NICENESS "$HBASE_HOME"/bin/hbase \
--hadoop "${HADOOP_HOME}" \
--config "${HADOOP_CONF_DIR}" --hbaseconfig "${HBASE_CONF_DIR}" \
nohup nice -n $HBASE_NICENESS "$HBASE_HOME"/bin/hbase \
--config "${HBASE_CONF_DIR}" \
$command $startStop "$@" > "$log" 2>&1 < /dev/null &
else
kill `cat $pid` > /dev/null 2>&1

View File

@ -20,13 +20,11 @@
# * limitations under the License.
# */
#
# Run a Hadoop hbase command on all slave hosts.
# Run a hbase command on all slave hosts.
# Modelled after $HADOOP_HOME/bin/hadoop-daemons.sh
usage="Usage: hbase-daemons.sh [--hadoop <hadoop-home>]
[--config <hadoop-confdir>] [--hbase <hbase-home>]\
[--hbaseconfig <hbase-confdir>] [--hosts regionserversfile]\
[start|stop] command args..."
usage="Usage: hbase-daemons.sh [--config <hbase-confdir>] \
[--hosts regionserversfile] [start|stop] command args..."
# if no args specified, show usage
if [ $# -le 1 ]; then
@ -39,8 +37,6 @@ bin=`cd "$bin"; pwd`
. $bin/hbase-config.sh
exec "$bin/regionservers.sh" --config "${HADOOP_CONF_DIR}" \
--hbaseconfig "${HBASE_CONF_DIR}" --hadoop "${HADOOP_HOME}" \
exec "$bin/regionservers.sh" --config "${HBASE_CONF_DIR}" \
cd "${HBASE_HOME}" \; \
"$bin/hbase-daemon.sh" --config "${HADOOP_CONF_DIR}" \
--hbaseconfig "${HBASE_CONF_DIR}" --hadoop "${HADOOP_HOME}" "$@"
"$bin/hbase-daemon.sh" --config "${HBASE_CONF_DIR}" "$@"

View File

@ -33,8 +33,7 @@
#
# Modelled after $HADOOP_HOME/bin/slaves.sh.
usage="Usage: regionservers [--config <hadoop-confdir>]\
[--hbaseconfig <hbase-confdir>] command..."
usage="Usage: regionservers [--config <hbase-confdir>] command..."
# if no args specified, show usage
if [ $# -le 0 ]; then
@ -52,9 +51,6 @@ bin=`cd "$bin"; pwd`
# hbase-env.sh. Save it here.
HOSTLIST=$HBASE_REGIONSERVERS
if [ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]; then
. "${HADOOP_CONF_DIR}/hadoop-env.sh"
fi
if [ -f "${HBASE_CONF_DIR}/hbase-env.sh" ]; then
. "${HBASE_CONF_DIR}/hbase-env.sh"
fi
@ -68,10 +64,10 @@ if [ "$HOSTLIST" = "" ]; then
fi
for regionserver in `cat "$HOSTLIST"`; do
ssh $HADOOP_SSH_OPTS $regionserver $"${@// /\\ }" \
ssh $HBASE_SSH_OPTS $regionserver $"${@// /\\ }" \
2>&1 | sed "s/^/$regionserver: /" &
if [ "$HADOOP_SLAVE_SLEEP" != "" ]; then
sleep $HADOOP_SLAVE_SLEEP
if [ "$HBASE_SLAVE_SLEEP" != "" ]; then
sleep $HBASE_SLAVE_SLEEP
fi
done

View File

@ -38,8 +38,6 @@ if [ $errCode -ne 0 ]
then
exit $errCode
fi
"$bin"/hbase-daemon.sh --config "${HADOOP_CONF_DIR}" \
--hbaseconfig "${HBASE_CONF_DIR}" start master
"$bin"/hbase-daemons.sh --config "${HADOOP_CONF_DIR}" \
--hbaseconfig "${HBASE_CONF_DIR}" --hadoop "${HADOOP_HOME}" \
--hosts "${HBASE_REGIONSERVERS}" start regionserver
"$bin"/hbase-daemon.sh --config "${HBASE_CONF_DIR}" start master
"$bin"/hbase-daemons.sh --config "${HBASE_CONF_DIR}" \
--hosts "${HBASE_REGIONSERVERS}" start regionserver

View File

@ -29,5 +29,4 @@ bin=`cd "$bin"; pwd`
. "$bin"/hbase-config.sh
"$bin"/hbase-daemon.sh --config "${HADOOP_CONF_DIR}" \
--hbaseconfig "${HBASE_CONF_DIR}" stop master
"$bin"/hbase-daemon.sh --config "${HBASE_CONF_DIR}" stop master

View File

@ -35,3 +35,23 @@
# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.
# export HBASE_REGIONSERVERS=${HBASE_HOME}/conf/regionservers
# Extra ssh options. Empty by default.
# export HBASE_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR"
# Where log files are stored. $HBASE_HOME/logs by default.
# export HBASE_LOG_DIR=${HBASE_HOME}/logs
# A string representing this instance of hbase. $USER by default.
# export HBASE_IDENT_STRING=$USER
# The scheduling priority for daemon processes. See 'man nice'.
# export HBASE_NICENESS=10
# The directory where pid files are stored. /tmp by default.
# export HBASE_PID_DIR=/var/hadoop/pids
# Seconds to sleep between slave commands. Unset by default. This
# can be useful in large clusters, where, e.g., slave rsyncs can
# otherwise arrive faster than the master can service them.
# export HBASE_SLAVE_SLEEP=0.1