HADOOP-1465 Add cluster stop/start scripts for hbase

git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk/src/contrib/hbase@547427 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Jim Kellerman 2007-06-14 22:08:56 +00:00
parent 13c1e48253
commit e78644ed10
16 changed files with 377 additions and 135 deletions

View File

@ -32,3 +32,5 @@ Trunk (unreleased changes)
17. HADOOP-1476 Distributed version of 'Performance Evaluation' script
18. HADOOP-1469 Asychronous table creation
19. HADOOP-1415 Integrate BSD licensed bloom filter implementation.
20. HADOOP-1465 Add cluster stop/start scripts for hbase

3
bin/hbase-config.sh Executable file → Normal file
View File

@ -1,6 +1,7 @@
# included in all the hbase scripts with source command
# should not be executable directly
# also should not be passed any arguments, since we need original $*
# Modelled after $HADOOP_HOME/bin/hadoop-env.sh.
# resolve links - $0 may be a softlink
@ -42,7 +43,7 @@ do
shift
;;
--hosts=*)
regionservers=`echo $1|sed 's/[^=]*=\(.*\)/\1/'`
HBASE_REGIONSERVERS=`echo $1|sed 's/[^=]*=\(.*\)/\1/'`
shift
;;

120
bin/hbase-daemon.sh Executable file
View File

@ -0,0 +1,120 @@
#!/bin/sh
#
# Runs a Hadoop hbase command as a daemon.
#
# Environment Variables
#
# HADOOP_CONF_DIR Alternate conf dir. Default is ${HADOOP_HOME}/conf.
# HBASE_CONF_DIR Alternate hbase conf dir. Default is ${HBASE_HOME}/conf.
# HADOOP_LOG_DIR Where log files are stored. PWD by default.
# HADOOP_PID_DIR The pid files are stored. /tmp by default.
# HADOOP_IDENT_STRING A string representing this instance of hadoop. $USER by default
# HADOOP_NICENESS The scheduling priority for daemons. Defaults to 0.
#
# Modelled after $HADOOP_HOME/bin/hadoop-daemon.sh
usage="Usage: hbase-daemon.sh [--config=<hadoop-conf-dir>] [--hbaseconfig=<hbase-conf-dir>] <hbase-command> (start|stop) <args...>"
# if no args specified, show usage
if [ $# -le 1 ]; then
echo $usage
exit 1
fi
bin=`dirname "$0"`
bin=`cd "$bin"; pwd`
. "$bin"/hbase-config.sh
# get arguments
command=$1
shift
startStop=$1
shift
hbase_rotate_log ()
{
log=$1;
num=5;
if [ -n "$2" ]; then
num=$2
fi
if [ -f "$log" ]; then # rotate logs
while [ $num -gt 1 ]; do
prev=`expr $num - 1`
[ -f "$log.$prev" ] && mv "$log.$prev" "$log.$num"
num=$prev
done
mv "$log" "$log.$num";
fi
}
if [ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]; then
. "${HADOOP_CONF_DIR}/hadoop-env.sh"
fi
if [ -f "${HBASE_CONF_DIR}/hbase-env.sh" ]; then
. "${HBASE_CONF_DIR}/hbase-env.sh"
fi
# get log directory
if [ "$HADOOP_LOG_DIR" = "" ]; then
export HADOOP_LOG_DIR="$HADOOP_HOME/logs"
fi
mkdir -p "$HADOOP_LOG_DIR"
if [ "$HADOOP_PID_DIR" = "" ]; then
HADOOP_PID_DIR=/tmp
fi
if [ "$HADOOP_IDENT_STRING" = "" ]; then
export HADOOP_IDENT_STRING="$USER"
fi
# some variables
export HADOOP_LOGFILE=hbase-$HADOOP_IDENT_STRING-$command-`hostname`.log
export HADOOP_ROOT_LOGGER="INFO,DRFA"
log=$HADOOP_LOG_DIR/hbase-$HADOOP_IDENT_STRING-$command-`hostname`.out
pid=$HADOOP_PID_DIR/hbase-$HADOOP_IDENT_STRING-$command.pid
# Set default scheduling priority
if [ "$HADOOP_NICENESS" = "" ]; then
export HADOOP_NICENESS=0
fi
case $startStop in
(start)
if [ -f $pid ]; then
if kill -0 `cat $pid` > /dev/null 2>&1; then
echo $command running as process `cat $pid`. Stop it first.
exit 1
fi
fi
hbase_rotate_log $log
echo starting $command, logging to $log
nohup nice -n $HADOOP_NICENESS "$HBASE_HOME"/bin/hbase --config="${HADOOP_CONF_DIR}" --hbaseconfig="${HBASE_CONF_DIR}" $command $startStop "$@" > "$log" 2>&1 < /dev/null &
echo $! > $pid
sleep 1; head "$log"
;;
(stop)
if [ -f $pid ]; then
if kill -0 `cat $pid` > /dev/null 2>&1; then
echo stopping $command
nohup nice -n $HADOOP_NICENESS "$HBASE_HOME"/bin/hbase --config="${HADOOP_CONF_DIR}" --hbaseconfig="${HBASE_CONF_DIR}" $command $startStop "$@" > "$log" 2>&1 < /dev/null &
else
echo no $command to stop
fi
else
echo no $command to stop
fi
;;
(*)
echo $usage
exit 1
;;
esac

19
bin/hbase-daemons.sh Executable file
View File

@ -0,0 +1,19 @@
#!/bin/sh
#
# Run a Hadoop hbase command on all slave hosts.
# Modelled after $HADOOP_HOME/bin/hadoop-daemons.sh
usage="Usage: hbase-daemons.sh [--config=<confdir>] [--hbaseconfig=<hbase-confdir>] [--hosts=regionserversfile] command [start|stop] args..."
# if no args specified, show usage
if [ $# -le 1 ]; then
echo $usage
exit 1
fi
bin=`dirname "$0"`
bin=`cd "$bin"; pwd`
. $bin/hbase-config.sh
exec "$bin/regionservers.sh" --config="${HADOOP_CONF_DIR}" --hbaseconfig="${HBASE_CONF_DIR}" cd "$HBASE_HOME" \; "$bin/hbase-daemon.sh" --config="${HADOOP_CONF_DIR}" --hbaseconfig="${HBASE_CONF_DIR}" "$@"

57
bin/regionservers.sh Executable file
View File

@ -0,0 +1,57 @@
#!/bin/bash
#
# Run a shell command on all regionserver hosts.
#
# Environment Variables
#
# HBASE_REGIONSERVERS File naming remote hosts.
# Default is ${HADOOP_CONF_DIR}/regionservers
# HADOOP_CONF_DIR Alternate conf dir. Default is ${HADOOP_HOME}/conf.
# HBASE_CONF_DIR Alternate hbase conf dir. Default is ${HBASE_HOME}/conf.
# HADOOP_SLAVE_SLEEP Seconds to sleep between spawning remote commands.
# HADOOP_SSH_OPTS Options passed to ssh when running remote commands.
#
# Modelled after $HADOOP_HOME/bin/slaves.sh.
usage="Usage: regionservers [--config=<confdir>] [--hbaseconfig=<hbase-confdir>] command..."
# if no args specified, show usage
if [ $# -le 0 ]; then
echo $usage
exit 1
fi
bin=`dirname "$0"`
bin=`cd "$bin"; pwd`
. "$bin"/hbase-config.sh
# If the regionservers file is specified in the command line,
# then it takes precedence over the definition in
# hbase-env.sh. Save it here.
HOSTLIST=$HBASE_REGIONSERVERS
if [ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]; then
. "${HADOOP_CONF_DIR}/hadoop-env.sh"
fi
if [ -f "${HBASE_CONF_DIR}/hbase-env.sh" ]; then
. "${HBASE_CONF_DIR}/hbase-env.sh"
fi
if [ "$HOSTLIST" = "" ]; then
if [ "$HBASE_REGIONSERVERS" = "" ]; then
export HOSTLIST="${HBASE_CONF_DIR}/regionservers"
else
export HOSTLIST="${HBASE_REGIONSERVERS}"
fi
fi
for regionserver in `cat "$HOSTLIST"`; do
ssh $HADOOP_SSH_OPTS $regionserver $"${@// /\\ }" \
2>&1 | sed "s/^/$regionserver: /" &
if [ "$HADOOP_SLAVE_SLEEP" != "" ]; then
sleep $HADOOP_SLAVE_SLEEP
fi
done
wait

15
bin/start-hbase.sh Executable file
View File

@ -0,0 +1,15 @@
#!/bin/sh
# Modelled after $HADOOP_HOME/bin/start-hbase.sh.
# Start hadoop hbase daemons.
# Run this on master node.
usage="Usage: start-hbase.sh"
bin=`dirname "$0"`
bin=`cd "$bin"; pwd`
. "$bin"/hbase-config.sh
# start hbase daemons
"$bin"/hbase-daemon.sh --config="${HADOOP_CONF_DIR}" --hbaseconfig="${HBASE_CONF_DIR}" master start
"$bin"/hbase-daemons.sh --config="${HADOOP_CONF_DIR}" --hbaseconfig="${HBASE_CONF_DIR}" regionserver start

11
bin/stop-hbase.sh Executable file
View File

@ -0,0 +1,11 @@
#!/bin/sh
# Modelled after $HADOOP_HOME/bin/stop-hbase.sh.
# Stop hadoop hbase daemons. Run this on master node.
bin=`dirname "$0"`
bin=`cd "$bin"; pwd`
. "$bin"/hbase-config.sh
"$bin"/hbase-daemon.sh --config="${HADOOP_CONF_DIR}" --hbaseconfig="${HBASE_CONF_DIR}" master stop

View File

@ -3,14 +3,14 @@
<configuration>
<property>
<name>hbase.master</name>
<value>localhost:60000</value>
<value>0.0.0.0:60000</value>
<description>The host and port that the HBase master runs at.
TODO: Support 'local' (All running in single context).
</description>
</property>
<property>
<name>hbase.regionserver</name>
<value>localhost:60010</value>
<value>0.0.0.0:60010</value>
<description>The host and port a HBase region server runs at.
</description>
</property>

21
conf/hbase-env.sh Normal file
View File

@ -0,0 +1,21 @@
# Set HBase-specific environment variables here.
# The only required environment variable is JAVA_HOME. All others are
# optional. When running a distributed configuration it is best to
# set JAVA_HOME in this file, so that it is correctly defined on
# remote nodes.
# The java implementation to use. Required.
# export JAVA_HOME=/usr/lib/j2sdk1.5-sun
# Extra Java CLASSPATH elements. Optional.
# export HBASE_CLASSPATH=
# The maximum amount of heap to use, in MB. Default is 1000.
# export HBASE_HEAPSIZE=1000
# Extra Java runtime options. Empty by default.
# export HBASE_OPTS=-server
# File naming remote slave hosts. $HADOOP_HOME/conf/slaves by default.
# export HBASE_REGIONSERVERS=${HBASE_HOME}/conf/regionservers

View File

@ -38,14 +38,16 @@ public interface HConstants {
/** Parameter name for master address */
static final String MASTER_ADDRESS = "hbase.master";
static final String DEFAULT_HOST = "0.0.0.0";
/** Default master address */
static final String DEFAULT_MASTER_ADDRESS = "localhost:60000";
static final String DEFAULT_MASTER_ADDRESS = DEFAULT_HOST + ":60000";
/** Parameter name for hbase.regionserver address. */
static final String REGIONSERVER_ADDRESS = "hbase.regionserver";
/** Default region server address */
static final String DEFAULT_REGIONSERVER_ADDRESS = "localhost:60010";
static final String DEFAULT_REGIONSERVER_ADDRESS = DEFAULT_HOST + ":60010";
/** Parameter name for how often threads should wake up */
static final String THREAD_WAKE_FREQUENCY = "hbase.server.thread.wakefrequency";

View File

@ -818,16 +818,12 @@ public class HMaster implements HConstants, HMasterInterface,
public void regionServerStartup(HServerInfo serverInfo) throws IOException {
String s = serverInfo.getServerAddress().toString().trim();
HServerInfo storedInfo = null;
if(LOG.isDebugEnabled()) {
LOG.debug("received start message from: " + s);
}
LOG.info("received start message from: " + s);
// If we get the startup message but there's an old server by that
// name, then we can timeout the old one right away and register
// the new one.
storedInfo = serversToServerInfo.remove(s);
if(storedInfo != null && !closed) {
synchronized(msgQueue) {
msgQueue.addLast(new PendingServerShutdown(storedInfo));
@ -836,9 +832,7 @@ public class HMaster implements HConstants, HMasterInterface,
}
// Either way, record the new server
serversToServerInfo.put(s, serverInfo);
if(!closed) {
Text serverLabel = new Text(s);
LOG.debug("Created lease for " + serverLabel);
@ -1101,11 +1095,8 @@ public class HMaster implements HConstants, HMasterInterface,
}
// Figure out what the RegionServer ought to do, and write back.
if(unassignedRegions.size() > 0) {
// Open new regions as necessary
int targetForServer = (int) Math.ceil(unassignedRegions.size()
/ (1.0 * serversToServerInfo.size()));

View File

@ -241,8 +241,8 @@ class HRegion implements HConstants {
TreeMap<Text, Long> rowsToLocks = new TreeMap<Text, Long>();
TreeMap<Long, Text> locksToRows = new TreeMap<Long, Text>();
TreeMap<Text, HStore> stores = new TreeMap<Text, HStore>();
TreeMap<Long, TreeMap<Text, BytesWritable>> targetColumns
= new TreeMap<Long, TreeMap<Text, BytesWritable>>();
Map<Long, TreeMap<Text, BytesWritable>> targetColumns
= new HashMap<Long, TreeMap<Text, BytesWritable>>();
HMemcache memcache;
@ -1068,7 +1068,7 @@ class HRegion implements HConstants {
checkColumn(targetCol);
Text row = getRowFromLock(lockid);
if(row == null) {
if (row == null) {
throw new LockException("No write lock for lockid " + lockid);
}
@ -1078,15 +1078,15 @@ class HRegion implements HConstants {
synchronized(row) {
// This check makes sure that another thread from the client
// hasn't aborted/committed the write-operation.
if(row != getRowFromLock(lockid)) {
if (row != getRowFromLock(lockid)) {
throw new LockException("Locking error: put operation on lock " +
lockid + " unexpected aborted by another thread");
}
TreeMap<Text, BytesWritable> targets = targetColumns.get(lockid);
if(targets == null) {
TreeMap<Text, BytesWritable> targets = this.targetColumns.get(lockid);
if (targets == null) {
targets = new TreeMap<Text, BytesWritable>();
targetColumns.put(lockid, targets);
this.targetColumns.put(lockid, targets);
}
targets.put(targetCol, val);
}
@ -1117,7 +1117,7 @@ class HRegion implements HConstants {
+ lockid + " unexpected aborted by another thread");
}
targetColumns.remove(lockid);
this.targetColumns.remove(lockid);
releaseRowLock(row);
}
}
@ -1144,12 +1144,15 @@ class HRegion implements HConstants {
synchronized(row) {
// Add updates to the log and add values to the memcache.
long commitTimestamp = System.currentTimeMillis();
log.append(regionInfo.regionName, regionInfo.tableDesc.getName(), row,
targetColumns.get(Long.valueOf(lockid)), commitTimestamp);
memcache.add(row, targetColumns.get(Long.valueOf(lockid)),
commitTimestamp);
TreeMap<Text, BytesWritable> columns =
this.targetColumns.get(lockid);
if (columns != null && columns.size() > 0) {
log.append(regionInfo.regionName, regionInfo.tableDesc.getName(),
row, columns, commitTimestamp);
memcache.add(row, columns, commitTimestamp);
// OK, all done!
targetColumns.remove(Long.valueOf(lockid));
}
targetColumns.remove(lockid);
releaseRowLock(row);
}
recentCommits++;

View File

@ -16,6 +16,7 @@
package org.apache.hadoop.hbase;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
@ -37,6 +38,7 @@ import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.retry.RetryProxy;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.net.DNS;
import org.apache.hadoop.util.StringUtils;
/*******************************************************************************
@ -59,29 +61,29 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
static final Log LOG = LogFactory.getLog(HRegionServer.class);
volatile boolean stopRequested;
volatile boolean abortRequested;
private Path rootDir;
HServerInfo info;
Configuration conf;
private Random rand;
protected volatile boolean stopRequested;
protected volatile boolean abortRequested;
private final Path rootDir;
protected final HServerInfo serverInfo;
protected final Configuration conf;
private final Random rand;
// region name -> HRegion
SortedMap<Text, HRegion> onlineRegions;
Map<Text, HRegion> retiringRegions = new HashMap<Text, HRegion>();
protected final SortedMap<Text, HRegion> onlineRegions;
protected final Map<Text, HRegion> retiringRegions = new HashMap<Text, HRegion>();
final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
private Vector<HMsg> outboundMsgs;
protected final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
private final Vector<HMsg> outboundMsgs;
int numRetries;
long threadWakeFrequency;
private long msgInterval;
protected final long threadWakeFrequency;
private final long msgInterval;
// Check to see if regions should be split
long splitOrCompactCheckFrequency;
private SplitOrCompactChecker splitOrCompactChecker;
private Thread splitOrCompactCheckerThread;
Integer splitOrCompactLock = Integer.valueOf(0);
protected final long splitOrCompactCheckFrequency;
private final SplitOrCompactChecker splitOrCompactChecker;
private final Thread splitOrCompactCheckerThread;
protected final Integer splitOrCompactLock = new Integer(0);
/**
* Interface used by the {@link org.apache.hadoop.io.retry} mechanism.
@ -211,7 +213,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
region.getRegionName());
for (int i = 0; i < newRegions.length; i++) {
HRegion.addRegionToMETA(client, tableToUpdate, newRegions[i],
info.getServerAddress(), info.getStartCode());
serverInfo.getServerAddress(), serverInfo.getStartCode());
}
// Now tell the master about the new regions
@ -247,9 +249,9 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
}
// Cache flushing
private Flusher cacheFlusher;
private Thread cacheFlusherThread;
Integer cacheFlusherLock = Integer.valueOf(0);
private final Flusher cacheFlusher;
private final Thread cacheFlusherThread;
protected final Integer cacheFlusherLock = new Integer(0);
/** Runs periodically to flush the memcache */
class Flusher implements Runnable {
/* (non-Javadoc)
@ -308,10 +310,10 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
// Logging
HLog log;
private LogRoller logRoller;
private Thread logRollerThread;
Integer logRollerLock = Integer.valueOf(0);
protected final HLog log;
private final LogRoller logRoller;
private final Thread logRollerThread;
protected final Integer logRollerLock = new Integer(0);
/** Runs periodically to determine if the log should be rolled */
class LogRoller implements Runnable {
@ -369,7 +371,8 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
*/
public HRegionServer(Configuration conf) throws IOException {
this(new Path(conf.get(HBASE_DIR, DEFAULT_HBASE_DIR)),
new HServerAddress(conf.get(REGIONSERVER_ADDRESS, "localhost:0")),
new HServerAddress(conf.get(REGIONSERVER_ADDRESS,
DEFAULT_REGIONSERVER_ADDRESS)),
conf);
}
@ -420,28 +423,33 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
try {
// Server to handle client requests
this.server = RPC.getServer(this, address.getBindAddress(),
address.getPort(), conf.getInt("hbase.regionserver.handler.count", 10),
false, conf);
this.info = new HServerInfo(new HServerAddress(server.getListenerAddress()),
// Use configured nameserver & interface to get local hostname.
// 'serverInfo' is sent to master. Should have name of this host rather than
// 'localhost' or 0.0.0.0 or 127.0.0.1 in it.
String localHostname = DNS.getDefaultHost(
conf.get("dfs.datanode.dns.interface","default"),
conf.get("dfs.datanode.dns.nameserver","default"));
InetSocketAddress hostnameAddress = new InetSocketAddress(localHostname,
server.getListenerAddress().getPort());
this.serverInfo = new HServerInfo(new HServerAddress(hostnameAddress),
this.rand.nextLong());
// Local file paths
String serverName =
this.info.getServerAddress().getBindAddress() + "_"
+ this.info.getServerAddress().getPort();
String serverName = localHostname + "_" +
this.serverInfo.getServerAddress().getPort();
Path logdir = new Path(rootDir, "log" + "_" + serverName);
// Logging
this.fs = FileSystem.get(conf);
if(fs.exists(logdir)) {
throw new RegionServerRunningException("region server already running at "
+ this.info.getServerAddress().toString());
throw new RegionServerRunningException("region server already running at " +
this.serverInfo.getServerAddress().toString() + " because logdir " +
" exists");
}
this.log = new HLog(fs, logdir, conf);
@ -449,12 +457,10 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
this.logRollerThread = new Thread(logRoller);
// Remote HMaster
this.hbaseMaster = (HMasterRegionInterface)RPC.waitForProxy(
HMasterRegionInterface.class, HMasterRegionInterface.versionID,
new HServerAddress(conf.get(MASTER_ADDRESS)).getInetSocketAddress(),
conf);
} catch(IOException e) {
this.stopRequested = true;
throw e;
@ -512,7 +518,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
// continue
}
LOG.info("HRegionServer stopped at: " +
info.getServerAddress().toString());
serverInfo.getServerAddress().toString());
}
/**
@ -541,8 +547,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
try {
this.server.start();
LOG.info("HRegionServer started at: " + info.getServerAddress().toString());
LOG.info("HRegionServer started at: " + serverInfo.getServerAddress().toString());
} catch(IOException e) {
LOG.error(e);
stopRequested = true;
@ -558,7 +563,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
LOG.debug("Telling master we are up");
}
hbaseMaster.regionServerStartup(info);
hbaseMaster.regionServerStartup(serverInfo);
if (LOG.isDebugEnabled()) {
LOG.debug("Done telling master we are up");
@ -590,7 +595,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
}
try {
HMsg msgs[] = hbaseMaster.regionServerReport(info, outboundArray);
HMsg msgs[] = hbaseMaster.regionServerReport(serverInfo, outboundArray);
lastMsg = System.currentTimeMillis();
// Queue up the HMaster's instruction stream for processing
@ -679,7 +684,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
} catch(IOException e) {
LOG.warn(e);
}
LOG.info("aborting server at: " + info.getServerAddress().toString());
LOG.info("aborting server at: " + serverInfo.getServerAddress().toString());
} else {
Vector<HRegion> closedRegions = closeAllRegions();
@ -701,14 +706,14 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
}
LOG.info("telling master that region server is shutting down at: "
+info.getServerAddress().toString());
+ serverInfo.getServerAddress().toString());
hbaseMaster.regionServerReport(info, exitMsg);
hbaseMaster.regionServerReport(serverInfo, exitMsg);
} catch(IOException e) {
LOG.warn(e);
}
LOG.info("stopping server at: " + info.getServerAddress().toString());
LOG.info("stopping server at: " + serverInfo.getServerAddress().toString());
}
join();

View File

@ -17,28 +17,32 @@ Set <code>JAVA_HOME</code> to the root of your Java installation</li>
<h2>Getting Started</h2>
<p>First, you need a working instance of Hadoop. Download a recent release from
<a href="http://www.apache.org/dyn/closer.cgi/lucene/hadoop/">Hadoop downloads</a>.
Unpack the release and connect to its top-level directory. Edit the file
<code>conf/hadoop-env.sh</code> to define at least <code>JAVA_HOME</code>. Also,
add site-particular customizations to the file <code>conf/hadoop-site.xml</code>.
Try the following command:
<pre>bin/hadoop
</pre>
This will display the documentation for the Hadoop command script.
</p>
<p>Next, start hbase servers. Currently each server -- the master server and the
'slave' regionservers -- must be started manually (FIX).
<pre>src/contrib/hbase/bin/hbase master start
src/contrib/hbase/bin/hbase regionserver start
Unpack the release and connect to its top-level directory. Let this be
<code>${HADOOP_HOME}. Edit the file <code>${HADOOP_HOME}/conf/hadoop-env.sh</code>
to define at least <code>JAVA_HOME</code>. Also, add site-particular
customizations to the file <code>${HADOOP_HOME}/conf/hadoop-site.xml</code>.
Try the following command: <pre>bin/hadoop
</pre>
</p>
<p>As for hadoop, local customizations can be added to
<code>src/contrib/hbase/conf/hbase-site.xml</code>.
<p>Next, change to the hbase root. Let this be <code>${HBASE_HOME}</code> It is
usually located at <code>${HADOOP_HOME}/src/contrib/hbase</code>. Configure hbase.
Edit <code>${HBASE_HOME}/conf/hbase-env.sh</code> and
<code>${HBASE_HOME}/conf/hbase-site.xml</code> to make site particular settings.
List the hosts running regionservers in <code>${HBASE_HOME}/conf/regionservers</code>.
</p>
<p>
Here is how to start and then stop hbase:
<pre>${HBASE_HOME}/bin/start-hbase.sh
${HBASE_HOME}/bin/stop-hbase.sh
</pre>
Logs can be found in ${HADOOP_LOG_DIR}.
</p>
<h2>Related Documentation</h2>
<ul>
<li><a href="http://wiki.apache.org/lucene-hadoop/Hbase/HbaseArchitecture">Hbase/HbaseArchitecture</a>
<li><a href="http://wiki.apache.org/lucene-hadoop/Hbase">HBase Home Page</a>
<li><a href="http://wiki.apache.org/lucene-hadoop/Hbase/HbaseArchitecture">Hbase Architecture</a>
</ul>
</body>

View File

@ -45,8 +45,10 @@ public class MiniHBaseCluster implements HConstants {
*
* @param conf
* @param nRegionNodes
* @throws IOException
*/
public MiniHBaseCluster(Configuration conf, int nRegionNodes) {
public MiniHBaseCluster(Configuration conf, int nRegionNodes)
throws IOException {
this(conf, nRegionNodes, true);
}
@ -56,9 +58,11 @@ public class MiniHBaseCluster implements HConstants {
* @param conf
* @param nRegionNodes
* @param dfsCluster
* @throws IOException
*/
public MiniHBaseCluster(Configuration conf, int nRegionNodes,
MiniDFSCluster dfsCluster) {
MiniDFSCluster dfsCluster)
throws IOException {
this.conf = conf;
this.cluster = dfsCluster;
@ -72,15 +76,16 @@ public class MiniHBaseCluster implements HConstants {
* @param miniHdfsFilesystem If true, set the hbase mini
* cluster atop a mini hdfs cluster. Otherwise, use the
* filesystem configured in <code>conf</code>.
* @throws IOException
*/
public MiniHBaseCluster(Configuration conf, int nRegionNodes,
final boolean miniHdfsFilesystem) {
final boolean miniHdfsFilesystem)
throws IOException {
this.conf = conf;
if (miniHdfsFilesystem) {
try {
this.cluster = new MiniDFSCluster(this.conf, 2, true, (String[])null);
} catch(Throwable t) {
LOG.error("Failed setup of mini dfs cluster", t);
t.printStackTrace();
@ -90,14 +95,14 @@ public class MiniHBaseCluster implements HConstants {
init(nRegionNodes);
}
private void init(int nRegionNodes) {
private void init(int nRegionNodes) throws IOException {
try {
try {
this.fs = FileSystem.get(conf);
this.parentdir = new Path(conf.get(HBASE_DIR, DEFAULT_HBASE_DIR));
fs.mkdirs(parentdir);
} catch(Throwable e) {
} catch(IOException e) {
LOG.error("Failed setup of FileSystem", e);
throw e;
}
@ -118,18 +123,17 @@ public class MiniHBaseCluster implements HConstants {
String address = master.getMasterAddress().toString();
this.conf.set(MASTER_ADDRESS, address);
// Start the HRegionServers
if(this.conf.get(REGIONSERVER_ADDRESS) == null) {
this.conf.set(REGIONSERVER_ADDRESS, "localhost:0");
// Start the HRegionServers. If > 1 region servers,need to set
// port to '0'.
if(this.conf.get(REGIONSERVER_ADDRESS) == null || nRegionNodes > 1) {
this.conf.set(REGIONSERVER_ADDRESS, DEFAULT_HOST + ":0");
}
LOG.info("Starting HRegionServers");
startRegionServers(this.conf, nRegionNodes);
} catch(Throwable e) {
e.printStackTrace();
} catch(IOException e) {
shutdown();
throw e;
}
}
@ -183,12 +187,16 @@ public class MiniHBaseCluster implements HConstants {
public void shutdown() {
LOG.info("Shutting down the HBase Cluster");
for(int i = 0; i < regionServers.length; i++) {
if (regionServers[i] != null) {
regionServers[i].stop();
}
}
master.shutdown();
for(int i = 0; i < regionServers.length; i++) {
try {
if (regionThreads[i] != null) {
regionThreads[i].join();
}
} catch(InterruptedException e) {
// continue
}

View File

@ -28,32 +28,15 @@ public class TestCleanRegionServerExit extends HBaseClusterTestCase {
client = new HClient(conf);
}
/** The test */
public void testCleanRegionServerExit() {
try {
/** The test
* @throws IOException
* @throws InterruptedException */
public void testCleanRegionServerExit()
throws IOException, InterruptedException {
// When the META table can be opened, the region servers are running
client.openTable(HConstants.META_TABLE_NAME);
} catch(IOException e) {
e.printStackTrace();
fail();
}
// Shut down a region server cleanly
this.client.openTable(HConstants.META_TABLE_NAME);
this.cluster.stopRegionServer(0);
try {
this.cluster.regionThreads[0].join();
} catch(InterruptedException e) {
}
try {
Thread.sleep(60000); // Wait for cluster to adjust
} catch(InterruptedException e) {
}
}
}