HADOOP-1465 Add cluster stop/start scripts for hbase

git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk/src/contrib/hbase@547427 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Jim Kellerman 2007-06-14 22:08:56 +00:00
parent 13c1e48253
commit e78644ed10
16 changed files with 377 additions and 135 deletions

View File

@ -32,3 +32,5 @@ Trunk (unreleased changes)
17. HADOOP-1476 Distributed version of 'Performance Evaluation' script 17. HADOOP-1476 Distributed version of 'Performance Evaluation' script
18. HADOOP-1469 Asychronous table creation 18. HADOOP-1469 Asychronous table creation
19. HADOOP-1415 Integrate BSD licensed bloom filter implementation. 19. HADOOP-1415 Integrate BSD licensed bloom filter implementation.
20. HADOOP-1465 Add cluster stop/start scripts for hbase

3
bin/hbase-config.sh Executable file → Normal file
View File

@ -1,6 +1,7 @@
# included in all the hbase scripts with source command # included in all the hbase scripts with source command
# should not be executable directly # should not be executable directly
# also should not be passed any arguments, since we need original $* # also should not be passed any arguments, since we need original $*
# Modelled after $HADOOP_HOME/bin/hadoop-env.sh.
# resolve links - $0 may be a softlink # resolve links - $0 may be a softlink
@ -42,7 +43,7 @@ do
shift shift
;; ;;
--hosts=*) --hosts=*)
regionservers=`echo $1|sed 's/[^=]*=\(.*\)/\1/'` HBASE_REGIONSERVERS=`echo $1|sed 's/[^=]*=\(.*\)/\1/'`
shift shift
;; ;;

120
bin/hbase-daemon.sh Executable file
View File

@ -0,0 +1,120 @@
#!/bin/sh
#
# Runs a Hadoop hbase command as a daemon.
#
# Environment Variables
#
# HADOOP_CONF_DIR Alternate conf dir. Default is ${HADOOP_HOME}/conf.
# HBASE_CONF_DIR Alternate hbase conf dir. Default is ${HBASE_HOME}/conf.
# HADOOP_LOG_DIR Where log files are stored. PWD by default.
# HADOOP_PID_DIR The pid files are stored. /tmp by default.
# HADOOP_IDENT_STRING A string representing this instance of hadoop. $USER by default
# HADOOP_NICENESS The scheduling priority for daemons. Defaults to 0.
#
# Modelled after $HADOOP_HOME/bin/hadoop-daemon.sh
usage="Usage: hbase-daemon.sh [--config=<hadoop-conf-dir>] [--hbaseconfig=<hbase-conf-dir>] <hbase-command> (start|stop) <args...>"
# if no args specified, show usage
if [ $# -le 1 ]; then
echo $usage
exit 1
fi
bin=`dirname "$0"`
bin=`cd "$bin"; pwd`
. "$bin"/hbase-config.sh
# get arguments
command=$1
shift
startStop=$1
shift
hbase_rotate_log ()
{
log=$1;
num=5;
if [ -n "$2" ]; then
num=$2
fi
if [ -f "$log" ]; then # rotate logs
while [ $num -gt 1 ]; do
prev=`expr $num - 1`
[ -f "$log.$prev" ] && mv "$log.$prev" "$log.$num"
num=$prev
done
mv "$log" "$log.$num";
fi
}
if [ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]; then
. "${HADOOP_CONF_DIR}/hadoop-env.sh"
fi
if [ -f "${HBASE_CONF_DIR}/hbase-env.sh" ]; then
. "${HBASE_CONF_DIR}/hbase-env.sh"
fi
# get log directory
if [ "$HADOOP_LOG_DIR" = "" ]; then
export HADOOP_LOG_DIR="$HADOOP_HOME/logs"
fi
mkdir -p "$HADOOP_LOG_DIR"
if [ "$HADOOP_PID_DIR" = "" ]; then
HADOOP_PID_DIR=/tmp
fi
if [ "$HADOOP_IDENT_STRING" = "" ]; then
export HADOOP_IDENT_STRING="$USER"
fi
# some variables
export HADOOP_LOGFILE=hbase-$HADOOP_IDENT_STRING-$command-`hostname`.log
export HADOOP_ROOT_LOGGER="INFO,DRFA"
log=$HADOOP_LOG_DIR/hbase-$HADOOP_IDENT_STRING-$command-`hostname`.out
pid=$HADOOP_PID_DIR/hbase-$HADOOP_IDENT_STRING-$command.pid
# Set default scheduling priority
if [ "$HADOOP_NICENESS" = "" ]; then
export HADOOP_NICENESS=0
fi
case $startStop in
(start)
if [ -f $pid ]; then
if kill -0 `cat $pid` > /dev/null 2>&1; then
echo $command running as process `cat $pid`. Stop it first.
exit 1
fi
fi
hbase_rotate_log $log
echo starting $command, logging to $log
nohup nice -n $HADOOP_NICENESS "$HBASE_HOME"/bin/hbase --config="${HADOOP_CONF_DIR}" --hbaseconfig="${HBASE_CONF_DIR}" $command $startStop "$@" > "$log" 2>&1 < /dev/null &
echo $! > $pid
sleep 1; head "$log"
;;
(stop)
if [ -f $pid ]; then
if kill -0 `cat $pid` > /dev/null 2>&1; then
echo stopping $command
nohup nice -n $HADOOP_NICENESS "$HBASE_HOME"/bin/hbase --config="${HADOOP_CONF_DIR}" --hbaseconfig="${HBASE_CONF_DIR}" $command $startStop "$@" > "$log" 2>&1 < /dev/null &
else
echo no $command to stop
fi
else
echo no $command to stop
fi
;;
(*)
echo $usage
exit 1
;;
esac

19
bin/hbase-daemons.sh Executable file
View File

@ -0,0 +1,19 @@
#!/bin/sh
#
# Run a Hadoop hbase command on all slave hosts.
# Modelled after $HADOOP_HOME/bin/hadoop-daemons.sh
usage="Usage: hbase-daemons.sh [--config=<confdir>] [--hbaseconfig=<hbase-confdir>] [--hosts=regionserversfile] command [start|stop] args..."
# if no args specified, show usage
if [ $# -le 1 ]; then
echo $usage
exit 1
fi
bin=`dirname "$0"`
bin=`cd "$bin"; pwd`
. $bin/hbase-config.sh
exec "$bin/regionservers.sh" --config="${HADOOP_CONF_DIR}" --hbaseconfig="${HBASE_CONF_DIR}" cd "$HBASE_HOME" \; "$bin/hbase-daemon.sh" --config="${HADOOP_CONF_DIR}" --hbaseconfig="${HBASE_CONF_DIR}" "$@"

57
bin/regionservers.sh Executable file
View File

@ -0,0 +1,57 @@
#!/bin/bash
#
# Run a shell command on all regionserver hosts.
#
# Environment Variables
#
# HBASE_REGIONSERVERS File naming remote hosts.
# Default is ${HADOOP_CONF_DIR}/regionservers
# HADOOP_CONF_DIR Alternate conf dir. Default is ${HADOOP_HOME}/conf.
# HBASE_CONF_DIR Alternate hbase conf dir. Default is ${HBASE_HOME}/conf.
# HADOOP_SLAVE_SLEEP Seconds to sleep between spawning remote commands.
# HADOOP_SSH_OPTS Options passed to ssh when running remote commands.
#
# Modelled after $HADOOP_HOME/bin/slaves.sh.
usage="Usage: regionservers [--config=<confdir>] [--hbaseconfig=<hbase-confdir>] command..."
# if no args specified, show usage
if [ $# -le 0 ]; then
echo $usage
exit 1
fi
bin=`dirname "$0"`
bin=`cd "$bin"; pwd`
. "$bin"/hbase-config.sh
# If the regionservers file is specified in the command line,
# then it takes precedence over the definition in
# hbase-env.sh. Save it here.
HOSTLIST=$HBASE_REGIONSERVERS
if [ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]; then
. "${HADOOP_CONF_DIR}/hadoop-env.sh"
fi
if [ -f "${HBASE_CONF_DIR}/hbase-env.sh" ]; then
. "${HBASE_CONF_DIR}/hbase-env.sh"
fi
if [ "$HOSTLIST" = "" ]; then
if [ "$HBASE_REGIONSERVERS" = "" ]; then
export HOSTLIST="${HBASE_CONF_DIR}/regionservers"
else
export HOSTLIST="${HBASE_REGIONSERVERS}"
fi
fi
for regionserver in `cat "$HOSTLIST"`; do
ssh $HADOOP_SSH_OPTS $regionserver $"${@// /\\ }" \
2>&1 | sed "s/^/$regionserver: /" &
if [ "$HADOOP_SLAVE_SLEEP" != "" ]; then
sleep $HADOOP_SLAVE_SLEEP
fi
done
wait

15
bin/start-hbase.sh Executable file
View File

@ -0,0 +1,15 @@
#!/bin/sh
# Modelled after $HADOOP_HOME/bin/start-hbase.sh.
# Start hadoop hbase daemons.
# Run this on master node.
usage="Usage: start-hbase.sh"
bin=`dirname "$0"`
bin=`cd "$bin"; pwd`
. "$bin"/hbase-config.sh
# start hbase daemons
"$bin"/hbase-daemon.sh --config="${HADOOP_CONF_DIR}" --hbaseconfig="${HBASE_CONF_DIR}" master start
"$bin"/hbase-daemons.sh --config="${HADOOP_CONF_DIR}" --hbaseconfig="${HBASE_CONF_DIR}" regionserver start

11
bin/stop-hbase.sh Executable file
View File

@ -0,0 +1,11 @@
#!/bin/sh
# Modelled after $HADOOP_HOME/bin/stop-hbase.sh.
# Stop hadoop hbase daemons. Run this on master node.
bin=`dirname "$0"`
bin=`cd "$bin"; pwd`
. "$bin"/hbase-config.sh
"$bin"/hbase-daemon.sh --config="${HADOOP_CONF_DIR}" --hbaseconfig="${HBASE_CONF_DIR}" master stop

View File

@ -3,14 +3,14 @@
<configuration> <configuration>
<property> <property>
<name>hbase.master</name> <name>hbase.master</name>
<value>localhost:60000</value> <value>0.0.0.0:60000</value>
<description>The host and port that the HBase master runs at. <description>The host and port that the HBase master runs at.
TODO: Support 'local' (All running in single context). TODO: Support 'local' (All running in single context).
</description> </description>
</property> </property>
<property> <property>
<name>hbase.regionserver</name> <name>hbase.regionserver</name>
<value>localhost:60010</value> <value>0.0.0.0:60010</value>
<description>The host and port a HBase region server runs at. <description>The host and port a HBase region server runs at.
</description> </description>
</property> </property>

21
conf/hbase-env.sh Normal file
View File

@ -0,0 +1,21 @@
# Set HBase-specific environment variables here.
# The only required environment variable is JAVA_HOME. All others are
# optional. When running a distributed configuration it is best to
# set JAVA_HOME in this file, so that it is correctly defined on
# remote nodes.
# The java implementation to use. Required.
# export JAVA_HOME=/usr/lib/j2sdk1.5-sun
# Extra Java CLASSPATH elements. Optional.
# export HBASE_CLASSPATH=
# The maximum amount of heap to use, in MB. Default is 1000.
# export HBASE_HEAPSIZE=1000
# Extra Java runtime options. Empty by default.
# export HBASE_OPTS=-server
# File naming remote slave hosts. $HADOOP_HOME/conf/slaves by default.
# export HBASE_REGIONSERVERS=${HBASE_HOME}/conf/regionservers

View File

@ -38,14 +38,16 @@ public interface HConstants {
/** Parameter name for master address */ /** Parameter name for master address */
static final String MASTER_ADDRESS = "hbase.master"; static final String MASTER_ADDRESS = "hbase.master";
static final String DEFAULT_HOST = "0.0.0.0";
/** Default master address */ /** Default master address */
static final String DEFAULT_MASTER_ADDRESS = "localhost:60000"; static final String DEFAULT_MASTER_ADDRESS = DEFAULT_HOST + ":60000";
/** Parameter name for hbase.regionserver address. */ /** Parameter name for hbase.regionserver address. */
static final String REGIONSERVER_ADDRESS = "hbase.regionserver"; static final String REGIONSERVER_ADDRESS = "hbase.regionserver";
/** Default region server address */ /** Default region server address */
static final String DEFAULT_REGIONSERVER_ADDRESS = "localhost:60010"; static final String DEFAULT_REGIONSERVER_ADDRESS = DEFAULT_HOST + ":60010";
/** Parameter name for how often threads should wake up */ /** Parameter name for how often threads should wake up */
static final String THREAD_WAKE_FREQUENCY = "hbase.server.thread.wakefrequency"; static final String THREAD_WAKE_FREQUENCY = "hbase.server.thread.wakefrequency";

View File

@ -818,16 +818,12 @@ public class HMaster implements HConstants, HMasterInterface,
public void regionServerStartup(HServerInfo serverInfo) throws IOException { public void regionServerStartup(HServerInfo serverInfo) throws IOException {
String s = serverInfo.getServerAddress().toString().trim(); String s = serverInfo.getServerAddress().toString().trim();
HServerInfo storedInfo = null; HServerInfo storedInfo = null;
LOG.info("received start message from: " + s);
if(LOG.isDebugEnabled()) {
LOG.debug("received start message from: " + s);
}
// If we get the startup message but there's an old server by that // If we get the startup message but there's an old server by that
// name, then we can timeout the old one right away and register // name, then we can timeout the old one right away and register
// the new one. // the new one.
storedInfo = serversToServerInfo.remove(s); storedInfo = serversToServerInfo.remove(s);
if(storedInfo != null && !closed) { if(storedInfo != null && !closed) {
synchronized(msgQueue) { synchronized(msgQueue) {
msgQueue.addLast(new PendingServerShutdown(storedInfo)); msgQueue.addLast(new PendingServerShutdown(storedInfo));
@ -836,9 +832,7 @@ public class HMaster implements HConstants, HMasterInterface,
} }
// Either way, record the new server // Either way, record the new server
serversToServerInfo.put(s, serverInfo); serversToServerInfo.put(s, serverInfo);
if(!closed) { if(!closed) {
Text serverLabel = new Text(s); Text serverLabel = new Text(s);
LOG.debug("Created lease for " + serverLabel); LOG.debug("Created lease for " + serverLabel);
@ -1101,11 +1095,8 @@ public class HMaster implements HConstants, HMasterInterface,
} }
// Figure out what the RegionServer ought to do, and write back. // Figure out what the RegionServer ought to do, and write back.
if(unassignedRegions.size() > 0) { if(unassignedRegions.size() > 0) {
// Open new regions as necessary // Open new regions as necessary
int targetForServer = (int) Math.ceil(unassignedRegions.size() int targetForServer = (int) Math.ceil(unassignedRegions.size()
/ (1.0 * serversToServerInfo.size())); / (1.0 * serversToServerInfo.size()));

View File

@ -241,8 +241,8 @@ class HRegion implements HConstants {
TreeMap<Text, Long> rowsToLocks = new TreeMap<Text, Long>(); TreeMap<Text, Long> rowsToLocks = new TreeMap<Text, Long>();
TreeMap<Long, Text> locksToRows = new TreeMap<Long, Text>(); TreeMap<Long, Text> locksToRows = new TreeMap<Long, Text>();
TreeMap<Text, HStore> stores = new TreeMap<Text, HStore>(); TreeMap<Text, HStore> stores = new TreeMap<Text, HStore>();
TreeMap<Long, TreeMap<Text, BytesWritable>> targetColumns Map<Long, TreeMap<Text, BytesWritable>> targetColumns
= new TreeMap<Long, TreeMap<Text, BytesWritable>>(); = new HashMap<Long, TreeMap<Text, BytesWritable>>();
HMemcache memcache; HMemcache memcache;
@ -1068,7 +1068,7 @@ class HRegion implements HConstants {
checkColumn(targetCol); checkColumn(targetCol);
Text row = getRowFromLock(lockid); Text row = getRowFromLock(lockid);
if(row == null) { if (row == null) {
throw new LockException("No write lock for lockid " + lockid); throw new LockException("No write lock for lockid " + lockid);
} }
@ -1078,15 +1078,15 @@ class HRegion implements HConstants {
synchronized(row) { synchronized(row) {
// This check makes sure that another thread from the client // This check makes sure that another thread from the client
// hasn't aborted/committed the write-operation. // hasn't aborted/committed the write-operation.
if(row != getRowFromLock(lockid)) { if (row != getRowFromLock(lockid)) {
throw new LockException("Locking error: put operation on lock " + throw new LockException("Locking error: put operation on lock " +
lockid + " unexpected aborted by another thread"); lockid + " unexpected aborted by another thread");
} }
TreeMap<Text, BytesWritable> targets = targetColumns.get(lockid); TreeMap<Text, BytesWritable> targets = this.targetColumns.get(lockid);
if(targets == null) { if (targets == null) {
targets = new TreeMap<Text, BytesWritable>(); targets = new TreeMap<Text, BytesWritable>();
targetColumns.put(lockid, targets); this.targetColumns.put(lockid, targets);
} }
targets.put(targetCol, val); targets.put(targetCol, val);
} }
@ -1117,7 +1117,7 @@ class HRegion implements HConstants {
+ lockid + " unexpected aborted by another thread"); + lockid + " unexpected aborted by another thread");
} }
targetColumns.remove(lockid); this.targetColumns.remove(lockid);
releaseRowLock(row); releaseRowLock(row);
} }
} }
@ -1144,12 +1144,15 @@ class HRegion implements HConstants {
synchronized(row) { synchronized(row) {
// Add updates to the log and add values to the memcache. // Add updates to the log and add values to the memcache.
long commitTimestamp = System.currentTimeMillis(); long commitTimestamp = System.currentTimeMillis();
log.append(regionInfo.regionName, regionInfo.tableDesc.getName(), row, TreeMap<Text, BytesWritable> columns =
targetColumns.get(Long.valueOf(lockid)), commitTimestamp); this.targetColumns.get(lockid);
memcache.add(row, targetColumns.get(Long.valueOf(lockid)), if (columns != null && columns.size() > 0) {
commitTimestamp); log.append(regionInfo.regionName, regionInfo.tableDesc.getName(),
row, columns, commitTimestamp);
memcache.add(row, columns, commitTimestamp);
// OK, all done! // OK, all done!
targetColumns.remove(Long.valueOf(lockid)); }
targetColumns.remove(lockid);
releaseRowLock(row); releaseRowLock(row);
} }
recentCommits++; recentCommits++;

View File

@ -16,6 +16,7 @@
package org.apache.hadoop.hbase; package org.apache.hadoop.hbase;
import java.io.IOException; import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collections; import java.util.Collections;
import java.util.HashMap; import java.util.HashMap;
@ -37,6 +38,7 @@ import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.retry.RetryProxy; import org.apache.hadoop.io.retry.RetryProxy;
import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.Server; import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.net.DNS;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
/******************************************************************************* /*******************************************************************************
@ -59,29 +61,29 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
static final Log LOG = LogFactory.getLog(HRegionServer.class); static final Log LOG = LogFactory.getLog(HRegionServer.class);
volatile boolean stopRequested; protected volatile boolean stopRequested;
volatile boolean abortRequested; protected volatile boolean abortRequested;
private Path rootDir; private final Path rootDir;
HServerInfo info; protected final HServerInfo serverInfo;
Configuration conf; protected final Configuration conf;
private Random rand; private final Random rand;
// region name -> HRegion // region name -> HRegion
SortedMap<Text, HRegion> onlineRegions; protected final SortedMap<Text, HRegion> onlineRegions;
Map<Text, HRegion> retiringRegions = new HashMap<Text, HRegion>(); protected final Map<Text, HRegion> retiringRegions = new HashMap<Text, HRegion>();
final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); protected final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
private Vector<HMsg> outboundMsgs; private final Vector<HMsg> outboundMsgs;
int numRetries; int numRetries;
long threadWakeFrequency; protected final long threadWakeFrequency;
private long msgInterval; private final long msgInterval;
// Check to see if regions should be split // Check to see if regions should be split
long splitOrCompactCheckFrequency; protected final long splitOrCompactCheckFrequency;
private SplitOrCompactChecker splitOrCompactChecker; private final SplitOrCompactChecker splitOrCompactChecker;
private Thread splitOrCompactCheckerThread; private final Thread splitOrCompactCheckerThread;
Integer splitOrCompactLock = Integer.valueOf(0); protected final Integer splitOrCompactLock = new Integer(0);
/** /**
* Interface used by the {@link org.apache.hadoop.io.retry} mechanism. * Interface used by the {@link org.apache.hadoop.io.retry} mechanism.
@ -211,7 +213,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
region.getRegionName()); region.getRegionName());
for (int i = 0; i < newRegions.length; i++) { for (int i = 0; i < newRegions.length; i++) {
HRegion.addRegionToMETA(client, tableToUpdate, newRegions[i], HRegion.addRegionToMETA(client, tableToUpdate, newRegions[i],
info.getServerAddress(), info.getStartCode()); serverInfo.getServerAddress(), serverInfo.getStartCode());
} }
// Now tell the master about the new regions // Now tell the master about the new regions
@ -247,9 +249,9 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
} }
// Cache flushing // Cache flushing
private Flusher cacheFlusher; private final Flusher cacheFlusher;
private Thread cacheFlusherThread; private final Thread cacheFlusherThread;
Integer cacheFlusherLock = Integer.valueOf(0); protected final Integer cacheFlusherLock = new Integer(0);
/** Runs periodically to flush the memcache */ /** Runs periodically to flush the memcache */
class Flusher implements Runnable { class Flusher implements Runnable {
/* (non-Javadoc) /* (non-Javadoc)
@ -308,10 +310,10 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
// Logging // Logging
HLog log; protected final HLog log;
private LogRoller logRoller; private final LogRoller logRoller;
private Thread logRollerThread; private final Thread logRollerThread;
Integer logRollerLock = Integer.valueOf(0); protected final Integer logRollerLock = new Integer(0);
/** Runs periodically to determine if the log should be rolled */ /** Runs periodically to determine if the log should be rolled */
class LogRoller implements Runnable { class LogRoller implements Runnable {
@ -369,7 +371,8 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
*/ */
public HRegionServer(Configuration conf) throws IOException { public HRegionServer(Configuration conf) throws IOException {
this(new Path(conf.get(HBASE_DIR, DEFAULT_HBASE_DIR)), this(new Path(conf.get(HBASE_DIR, DEFAULT_HBASE_DIR)),
new HServerAddress(conf.get(REGIONSERVER_ADDRESS, "localhost:0")), new HServerAddress(conf.get(REGIONSERVER_ADDRESS,
DEFAULT_REGIONSERVER_ADDRESS)),
conf); conf);
} }
@ -420,28 +423,33 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
try { try {
// Server to handle client requests // Server to handle client requests
this.server = RPC.getServer(this, address.getBindAddress(), this.server = RPC.getServer(this, address.getBindAddress(),
address.getPort(), conf.getInt("hbase.regionserver.handler.count", 10), address.getPort(), conf.getInt("hbase.regionserver.handler.count", 10),
false, conf); false, conf);
this.info = new HServerInfo(new HServerAddress(server.getListenerAddress()), // Use configured nameserver & interface to get local hostname.
// 'serverInfo' is sent to master. Should have name of this host rather than
// 'localhost' or 0.0.0.0 or 127.0.0.1 in it.
String localHostname = DNS.getDefaultHost(
conf.get("dfs.datanode.dns.interface","default"),
conf.get("dfs.datanode.dns.nameserver","default"));
InetSocketAddress hostnameAddress = new InetSocketAddress(localHostname,
server.getListenerAddress().getPort());
this.serverInfo = new HServerInfo(new HServerAddress(hostnameAddress),
this.rand.nextLong()); this.rand.nextLong());
// Local file paths // Local file paths
String serverName = localHostname + "_" +
String serverName = this.serverInfo.getServerAddress().getPort();
this.info.getServerAddress().getBindAddress() + "_"
+ this.info.getServerAddress().getPort();
Path logdir = new Path(rootDir, "log" + "_" + serverName); Path logdir = new Path(rootDir, "log" + "_" + serverName);
// Logging // Logging
this.fs = FileSystem.get(conf); this.fs = FileSystem.get(conf);
if(fs.exists(logdir)) { if(fs.exists(logdir)) {
throw new RegionServerRunningException("region server already running at " throw new RegionServerRunningException("region server already running at " +
+ this.info.getServerAddress().toString()); this.serverInfo.getServerAddress().toString() + " because logdir " +
" exists");
} }
this.log = new HLog(fs, logdir, conf); this.log = new HLog(fs, logdir, conf);
@ -449,12 +457,10 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
this.logRollerThread = new Thread(logRoller); this.logRollerThread = new Thread(logRoller);
// Remote HMaster // Remote HMaster
this.hbaseMaster = (HMasterRegionInterface)RPC.waitForProxy( this.hbaseMaster = (HMasterRegionInterface)RPC.waitForProxy(
HMasterRegionInterface.class, HMasterRegionInterface.versionID, HMasterRegionInterface.class, HMasterRegionInterface.versionID,
new HServerAddress(conf.get(MASTER_ADDRESS)).getInetSocketAddress(), new HServerAddress(conf.get(MASTER_ADDRESS)).getInetSocketAddress(),
conf); conf);
} catch(IOException e) { } catch(IOException e) {
this.stopRequested = true; this.stopRequested = true;
throw e; throw e;
@ -512,7 +518,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
// continue // continue
} }
LOG.info("HRegionServer stopped at: " + LOG.info("HRegionServer stopped at: " +
info.getServerAddress().toString()); serverInfo.getServerAddress().toString());
} }
/** /**
@ -541,8 +547,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
try { try {
this.server.start(); this.server.start();
LOG.info("HRegionServer started at: " + info.getServerAddress().toString()); LOG.info("HRegionServer started at: " + serverInfo.getServerAddress().toString());
} catch(IOException e) { } catch(IOException e) {
LOG.error(e); LOG.error(e);
stopRequested = true; stopRequested = true;
@ -558,7 +563,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
LOG.debug("Telling master we are up"); LOG.debug("Telling master we are up");
} }
hbaseMaster.regionServerStartup(info); hbaseMaster.regionServerStartup(serverInfo);
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
LOG.debug("Done telling master we are up"); LOG.debug("Done telling master we are up");
@ -590,7 +595,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
} }
try { try {
HMsg msgs[] = hbaseMaster.regionServerReport(info, outboundArray); HMsg msgs[] = hbaseMaster.regionServerReport(serverInfo, outboundArray);
lastMsg = System.currentTimeMillis(); lastMsg = System.currentTimeMillis();
// Queue up the HMaster's instruction stream for processing // Queue up the HMaster's instruction stream for processing
@ -679,7 +684,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
} catch(IOException e) { } catch(IOException e) {
LOG.warn(e); LOG.warn(e);
} }
LOG.info("aborting server at: " + info.getServerAddress().toString()); LOG.info("aborting server at: " + serverInfo.getServerAddress().toString());
} else { } else {
Vector<HRegion> closedRegions = closeAllRegions(); Vector<HRegion> closedRegions = closeAllRegions();
@ -701,14 +706,14 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
} }
LOG.info("telling master that region server is shutting down at: " LOG.info("telling master that region server is shutting down at: "
+info.getServerAddress().toString()); + serverInfo.getServerAddress().toString());
hbaseMaster.regionServerReport(info, exitMsg); hbaseMaster.regionServerReport(serverInfo, exitMsg);
} catch(IOException e) { } catch(IOException e) {
LOG.warn(e); LOG.warn(e);
} }
LOG.info("stopping server at: " + info.getServerAddress().toString()); LOG.info("stopping server at: " + serverInfo.getServerAddress().toString());
} }
join(); join();

View File

@ -17,28 +17,32 @@ Set <code>JAVA_HOME</code> to the root of your Java installation</li>
<h2>Getting Started</h2> <h2>Getting Started</h2>
<p>First, you need a working instance of Hadoop. Download a recent release from <p>First, you need a working instance of Hadoop. Download a recent release from
<a href="http://www.apache.org/dyn/closer.cgi/lucene/hadoop/">Hadoop downloads</a>. <a href="http://www.apache.org/dyn/closer.cgi/lucene/hadoop/">Hadoop downloads</a>.
Unpack the release and connect to its top-level directory. Edit the file Unpack the release and connect to its top-level directory. Let this be
<code>conf/hadoop-env.sh</code> to define at least <code>JAVA_HOME</code>. Also, <code>${HADOOP_HOME}. Edit the file <code>${HADOOP_HOME}/conf/hadoop-env.sh</code>
add site-particular customizations to the file <code>conf/hadoop-site.xml</code>. to define at least <code>JAVA_HOME</code>. Also, add site-particular
Try the following command: customizations to the file <code>${HADOOP_HOME}/conf/hadoop-site.xml</code>.
<pre>bin/hadoop Try the following command: <pre>bin/hadoop
</pre>
This will display the documentation for the Hadoop command script.
</p>
<p>Next, start hbase servers. Currently each server -- the master server and the
'slave' regionservers -- must be started manually (FIX).
<pre>src/contrib/hbase/bin/hbase master start
src/contrib/hbase/bin/hbase regionserver start
</pre> </pre>
</p> </p>
<p>As for hadoop, local customizations can be added to <p>Next, change to the hbase root. Let this be <code>${HBASE_HOME}</code> It is
<code>src/contrib/hbase/conf/hbase-site.xml</code>. usually located at <code>${HADOOP_HOME}/src/contrib/hbase</code>. Configure hbase.
Edit <code>${HBASE_HOME}/conf/hbase-env.sh</code> and
<code>${HBASE_HOME}/conf/hbase-site.xml</code> to make site particular settings.
List the hosts running regionservers in <code>${HBASE_HOME}/conf/regionservers</code>.
</p>
<p>
Here is how to start and then stop hbase:
<pre>${HBASE_HOME}/bin/start-hbase.sh
${HBASE_HOME}/bin/stop-hbase.sh
</pre>
Logs can be found in ${HADOOP_LOG_DIR}.
</p> </p>
<h2>Related Documentation</h2> <h2>Related Documentation</h2>
<ul> <ul>
<li><a href="http://wiki.apache.org/lucene-hadoop/Hbase/HbaseArchitecture">Hbase/HbaseArchitecture</a> <li><a href="http://wiki.apache.org/lucene-hadoop/Hbase">HBase Home Page</a>
<li><a href="http://wiki.apache.org/lucene-hadoop/Hbase/HbaseArchitecture">Hbase Architecture</a>
</ul> </ul>
</body> </body>

View File

@ -45,8 +45,10 @@ public class MiniHBaseCluster implements HConstants {
* *
* @param conf * @param conf
* @param nRegionNodes * @param nRegionNodes
* @throws IOException
*/ */
public MiniHBaseCluster(Configuration conf, int nRegionNodes) { public MiniHBaseCluster(Configuration conf, int nRegionNodes)
throws IOException {
this(conf, nRegionNodes, true); this(conf, nRegionNodes, true);
} }
@ -56,9 +58,11 @@ public class MiniHBaseCluster implements HConstants {
* @param conf * @param conf
* @param nRegionNodes * @param nRegionNodes
* @param dfsCluster * @param dfsCluster
* @throws IOException
*/ */
public MiniHBaseCluster(Configuration conf, int nRegionNodes, public MiniHBaseCluster(Configuration conf, int nRegionNodes,
MiniDFSCluster dfsCluster) { MiniDFSCluster dfsCluster)
throws IOException {
this.conf = conf; this.conf = conf;
this.cluster = dfsCluster; this.cluster = dfsCluster;
@ -72,15 +76,16 @@ public class MiniHBaseCluster implements HConstants {
* @param miniHdfsFilesystem If true, set the hbase mini * @param miniHdfsFilesystem If true, set the hbase mini
* cluster atop a mini hdfs cluster. Otherwise, use the * cluster atop a mini hdfs cluster. Otherwise, use the
* filesystem configured in <code>conf</code>. * filesystem configured in <code>conf</code>.
* @throws IOException
*/ */
public MiniHBaseCluster(Configuration conf, int nRegionNodes, public MiniHBaseCluster(Configuration conf, int nRegionNodes,
final boolean miniHdfsFilesystem) { final boolean miniHdfsFilesystem)
throws IOException {
this.conf = conf; this.conf = conf;
if (miniHdfsFilesystem) { if (miniHdfsFilesystem) {
try { try {
this.cluster = new MiniDFSCluster(this.conf, 2, true, (String[])null); this.cluster = new MiniDFSCluster(this.conf, 2, true, (String[])null);
} catch(Throwable t) { } catch(Throwable t) {
LOG.error("Failed setup of mini dfs cluster", t); LOG.error("Failed setup of mini dfs cluster", t);
t.printStackTrace(); t.printStackTrace();
@ -90,14 +95,14 @@ public class MiniHBaseCluster implements HConstants {
init(nRegionNodes); init(nRegionNodes);
} }
private void init(int nRegionNodes) { private void init(int nRegionNodes) throws IOException {
try { try {
try { try {
this.fs = FileSystem.get(conf); this.fs = FileSystem.get(conf);
this.parentdir = new Path(conf.get(HBASE_DIR, DEFAULT_HBASE_DIR)); this.parentdir = new Path(conf.get(HBASE_DIR, DEFAULT_HBASE_DIR));
fs.mkdirs(parentdir); fs.mkdirs(parentdir);
} catch(Throwable e) { } catch(IOException e) {
LOG.error("Failed setup of FileSystem", e); LOG.error("Failed setup of FileSystem", e);
throw e; throw e;
} }
@ -118,18 +123,17 @@ public class MiniHBaseCluster implements HConstants {
String address = master.getMasterAddress().toString(); String address = master.getMasterAddress().toString();
this.conf.set(MASTER_ADDRESS, address); this.conf.set(MASTER_ADDRESS, address);
// Start the HRegionServers // Start the HRegionServers. If > 1 region servers,need to set
// port to '0'.
if(this.conf.get(REGIONSERVER_ADDRESS) == null) { if(this.conf.get(REGIONSERVER_ADDRESS) == null || nRegionNodes > 1) {
this.conf.set(REGIONSERVER_ADDRESS, "localhost:0"); this.conf.set(REGIONSERVER_ADDRESS, DEFAULT_HOST + ":0");
} }
LOG.info("Starting HRegionServers"); LOG.info("Starting HRegionServers");
startRegionServers(this.conf, nRegionNodes); startRegionServers(this.conf, nRegionNodes);
} catch(IOException e) {
} catch(Throwable e) {
e.printStackTrace();
shutdown(); shutdown();
throw e;
} }
} }
@ -183,12 +187,16 @@ public class MiniHBaseCluster implements HConstants {
public void shutdown() { public void shutdown() {
LOG.info("Shutting down the HBase Cluster"); LOG.info("Shutting down the HBase Cluster");
for(int i = 0; i < regionServers.length; i++) { for(int i = 0; i < regionServers.length; i++) {
if (regionServers[i] != null) {
regionServers[i].stop(); regionServers[i].stop();
} }
}
master.shutdown(); master.shutdown();
for(int i = 0; i < regionServers.length; i++) { for(int i = 0; i < regionServers.length; i++) {
try { try {
if (regionThreads[i] != null) {
regionThreads[i].join(); regionThreads[i].join();
}
} catch(InterruptedException e) { } catch(InterruptedException e) {
// continue // continue
} }

View File

@ -28,32 +28,15 @@ public class TestCleanRegionServerExit extends HBaseClusterTestCase {
client = new HClient(conf); client = new HClient(conf);
} }
/** The test */ /** The test
public void testCleanRegionServerExit() { * @throws IOException
try { * @throws InterruptedException */
public void testCleanRegionServerExit()
throws IOException, InterruptedException {
// When the META table can be opened, the region servers are running // When the META table can be opened, the region servers are running
this.client.openTable(HConstants.META_TABLE_NAME);
client.openTable(HConstants.META_TABLE_NAME);
} catch(IOException e) {
e.printStackTrace();
fail();
}
// Shut down a region server cleanly
this.cluster.stopRegionServer(0); this.cluster.stopRegionServer(0);
try {
this.cluster.regionThreads[0].join(); this.cluster.regionThreads[0].join();
} catch(InterruptedException e) {
}
try {
Thread.sleep(60000); // Wait for cluster to adjust Thread.sleep(60000); // Wait for cluster to adjust
} catch(InterruptedException e) {
} }
}
} }