HBASE-1511 Pseudo distributed mode in LocalHBaseCluster

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@965672 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2010-07-19 22:59:51 +00:00
parent 6351d3c964
commit b6282f5508
8 changed files with 295 additions and 33 deletions

View File

@ -440,6 +440,9 @@ Release 0.21.0 - Unreleased
edits ordered by sequenceid edits ordered by sequenceid
HBASE-2843 Readd bloomfilter test over zealously removed by HBASE-2625 HBASE-2843 Readd bloomfilter test over zealously removed by HBASE-2625
HBASE-2846 Make rest server be same as thrift and avro servers HBASE-2846 Make rest server be same as thrift and avro servers
HBASE-1511 Pseudo distributed mode in LocalHBaseCluster
(Nicolas Spiegelberg via Stack)
IMPROVEMENTS IMPROVEMENTS
HBASE-1760 Cleanup TODOs in HTable HBASE-1760 Cleanup TODOs in HTable

View File

@ -0,0 +1,35 @@
#!/bin/sh
# This is used for starting multiple masters on the same machine.
# run it from hbase-dir/ just like 'bin/hbase'
# Supports up to 10 masters (limitation = overlapping ports)
bin=`dirname "$0"`
bin=`cd "$bin" >/dev/null && pwd`
if [ $# -lt 2 ]; then
S=`basename $0`
echo "Usage: $S [start|stop] offset(s)"
echo ""
echo " e.g. $S start 1"
exit
fi
# sanity check: make sure your master opts don't use ports [i.e. JMX/DBG]
export HBASE_MASTER_OPTS=" "
run_master () {
DN=$2
export HBASE_IDENT_STRING="$USER-$DN"
HBASE_MASTER_ARGS="\
-D hbase.master.port=`expr 60000 + $DN` \
-D hbase.master.info.port=`expr 60010 + $DN`"
"$bin"/hbase-daemon.sh $1 master $HBASE_MASTER_ARGS
}
cmd=$1
shift;
for i in $*
do
run_master $cmd $i
done

View File

@ -0,0 +1,35 @@
#!/bin/sh
# This is used for starting multiple regionservers on the same machine.
# run it from hbase-dir/ just like 'bin/hbase'
# Supports up to 100 regionservers (limitation = overlapping ports)
bin=`dirname "$0"`
bin=`cd "$bin" >/dev/null && pwd`
if [ $# -lt 2 ]; then
S=`basename $0`
echo "Usage: $S [start|stop] offset(s)"
echo ""
echo " e.g. $S start 1 2"
exit
fi
# sanity check: make sure your regionserver opts don't use ports [i.e. JMX/DBG]
export HBASE_REGIONSERVER_OPTS=" "
run_regionserver () {
DN=$2
export HBASE_IDENT_STRING="$USER-$DN"
HBASE_REGIONSERVER_ARGS="\
-D hbase.regionserver.port=`expr 60200 + $DN` \
-D hbase.regionserver.info.port=`expr 60300 + $DN`"
"$bin"/hbase-daemon.sh $1 regionserver $HBASE_REGIONSERVER_ARGS
}
cmd=$1
shift;
for i in $*
do
run_regionserver $cmd $i
done

View File

@ -0,0 +1,77 @@
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
/**
* Copyright 2009 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-->
<configuration>
<!-- NEEDED WHETHER OR NOT YOU ARE RUNNING OVER HDFS -->
<property>
<name>hbase.cluster.distributed</name>
<value>true</value>
<description>For psuedo-distributed, you want to set this to true.
false means that HBase tries to put Master + RegionServers in one process.
Pseudo-distributed = seperate processes/pids</description>
</property> <property>
<name>hbase.regionserver.hlog.replication</name>
<value>1</value>
<description>For HBase to offer good data durability, we roll logs if
filesystem replication falls below a certain amount. In psuedo-distributed
mode, you normally only have the local filesystem or 1 HDFS DataNode, so you
don't want to roll logs constantly.</description>
</property>
<property>
<name>hbase.tmp.dir</name>
<value>/tmp/hbase-testing</value>
<description>Temporary directory on the local filesystem.</description>
</property>
<!-- DEFAULT = use local filesystem, not HDFS
ADD THESE LINES if you have a copy of HDFS source and want to run HBase
psuedo-distributed over a psuedo-distributed HDFS cluster.
For HDFS psuedo-distributed setup, see their documentation:
http://hadoop.apache.org/common/docs/r0.20.2/quickstart.html#PseudoDistributed
<property>
<name>hbase.rootdir</name>
<value>hdfs://localhost:9000/hbase-testing</value>
<description>The directory shared by region servers.
Should be fully-qualified to include the filesystem to use.
E.g: hdfs://NAMENODE_SERVER:PORT/HBASE_ROOTDIR
</description>
</property>
-->
<!-- OPTIONAL: You might want to add these options depending upon your use case
<property>
<name>dfs.support.append</name>
<value>true</value>
<description>Allow append support (if you want to test data durability with HDFS)
</description>
</property>
-->
</configuration>

View File

@ -37,6 +37,10 @@ import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock; import java.util.concurrent.locks.ReentrantLock;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.GnuParser;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
@ -1199,6 +1203,7 @@ public class HMaster extends Thread implements HMasterInterface,
System.err.println(" stop Start cluster shutdown; Master signals RegionServer shutdown"); System.err.println(" stop Start cluster shutdown; Master signals RegionServer shutdown");
System.err.println(" where [opts] are:"); System.err.println(" where [opts] are:");
System.err.println(" --minServers=<servers> Minimum RegionServers needed to host user tables."); System.err.println(" --minServers=<servers> Minimum RegionServers needed to host user tables.");
System.err.println(" -D opt=<value> Override HBase configuration settings.");
System.exit(0); System.exit(0);
} }
@ -1250,20 +1255,34 @@ public class HMaster extends Thread implements HMasterInterface,
protected static void doMain(String [] args, protected static void doMain(String [] args,
Class<? extends HMaster> masterClass) { Class<? extends HMaster> masterClass) {
if (args.length < 1) {
printUsageAndExit();
}
Configuration conf = HBaseConfiguration.create(); Configuration conf = HBaseConfiguration.create();
// Process command-line args.
for (String cmd: args) {
if (cmd.startsWith("--minServers=")) { Options opt = new Options();
opt.addOption("minServers", true, "Minimum RegionServers needed to host user tables");
opt.addOption("D", true, "Override HBase Configuration Settings");
try {
CommandLine cmd = new GnuParser().parse(opt, args);
if (cmd.hasOption("minServers")) {
String val = cmd.getOptionValue("minServers");
conf.setInt("hbase.regions.server.count.min", conf.setInt("hbase.regions.server.count.min",
Integer.valueOf(cmd.substring(13))); Integer.valueOf(val));
continue; LOG.debug("minServers set to " + val);
} }
if (cmd.equalsIgnoreCase("start")) { if (cmd.hasOption("D")) {
for (String confOpt : cmd.getOptionValues("D")) {
String[] kv = confOpt.split("=", 2);
if (kv.length == 2) {
conf.set(kv[0], kv[1]);
LOG.debug("-D configuration override: " + kv[0] + "=" + kv[1]);
} else {
throw new ParseException("-D option format invalid: " + confOpt);
}
}
}
if (cmd.getArgList().contains("start")) {
try { try {
// Print out vm stats before starting up. // Print out vm stats before starting up.
RuntimeMXBean runtime = ManagementFactory.getRuntimeMXBean(); RuntimeMXBean runtime = ManagementFactory.getRuntimeMXBean();
@ -1312,10 +1331,7 @@ public class HMaster extends Thread implements HMasterInterface,
LOG.error("Failed to start master", t); LOG.error("Failed to start master", t);
System.exit(-1); System.exit(-1);
} }
break; } else if (cmd.getArgList().contains("stop")) {
}
if (cmd.equalsIgnoreCase("stop")) {
HBaseAdmin adm = null; HBaseAdmin adm = null;
try { try {
adm = new HBaseAdmin(conf); adm = new HBaseAdmin(conf);
@ -1329,10 +1345,12 @@ public class HMaster extends Thread implements HMasterInterface,
LOG.error("Failed to stop master", t); LOG.error("Failed to stop master", t);
System.exit(-1); System.exit(-1);
} }
break; } else {
throw new ParseException("Unknown argument(s): " +
org.apache.commons.lang.StringUtils.join(cmd.getArgs(), " "));
} }
} catch (ParseException e) {
// Print out usage if we get to here. LOG.error("Could not parse: ", e);
printUsageAndExit(); printUsageAndExit();
} }
} }

View File

@ -50,6 +50,10 @@ import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.GnuParser;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
@ -2433,7 +2437,7 @@ public class HRegionServer implements HRegionInterface,
if (message != null) { if (message != null) {
System.err.println(message); System.err.println(message);
} }
System.err.println("Usage: java org.apache.hbase.HRegionServer start|stop"); System.err.println("Usage: java org.apache.hbase.HRegionServer start|stop [-D <conf.param=value>]");
System.exit(0); System.exit(0);
} }
@ -2467,15 +2471,26 @@ public class HRegionServer implements HRegionInterface,
*/ */
protected static void doMain(final String [] args, protected static void doMain(final String [] args,
final Class<? extends HRegionServer> regionServerClass) { final Class<? extends HRegionServer> regionServerClass) {
if (args.length < 1) {
printUsageAndExit();
}
Configuration conf = HBaseConfiguration.create(); Configuration conf = HBaseConfiguration.create();
// Process command-line args. TODO: Better cmd-line processing Options opt = new Options();
// (but hopefully something not as painful as cli options). opt.addOption("D", true, "Override HBase Configuration Settings");
for (String cmd: args) { try {
if (cmd.equals("start")) { CommandLine cmd = new GnuParser().parse(opt, args);
if (cmd.hasOption("D")) {
for (String confOpt : cmd.getOptionValues("D")) {
String[] kv = confOpt.split("=", 2);
if (kv.length == 2) {
conf.set(kv[0], kv[1]);
LOG.debug("-D configuration override: " + kv[0] + "=" + kv[1]);
} else {
throw new ParseException("-D option format invalid: " + confOpt);
}
}
}
if (cmd.getArgList().contains("start")) {
try { try {
// If 'local', don't start a region server here. Defer to // If 'local', don't start a region server here. Defer to
// LocalHBaseCluster. It manages 'local' clusters. // LocalHBaseCluster. It manages 'local' clusters.
@ -2493,17 +2508,18 @@ public class HRegionServer implements HRegionInterface,
} catch (Throwable t) { } catch (Throwable t) {
LOG.error( "Can not start region server because "+ LOG.error( "Can not start region server because "+
StringUtils.stringifyException(t) ); StringUtils.stringifyException(t) );
System.exit(-1);
} }
break; } else if (cmd.getArgList().contains("stop")) {
} throw new ParseException("To shutdown the regionserver run " +
if (cmd.equals("stop")) {
printUsageAndExit("To shutdown the regionserver run " +
"bin/hbase-daemon.sh stop regionserver or send a kill signal to" + "bin/hbase-daemon.sh stop regionserver or send a kill signal to" +
"the regionserver pid"); "the regionserver pid");
} else {
throw new ParseException("Unknown argument(s): " +
org.apache.commons.lang.StringUtils.join(cmd.getArgs(), " "));
} }
} catch (ParseException e) {
// Print out usage if we get to here. LOG.error("Could not parse", e);
printUsageAndExit(); printUsageAndExit();
} }
} }

View File

@ -36,6 +36,7 @@
<item name="Metrics" href="metrics.html" /> <item name="Metrics" href="metrics.html" />
<item name="HBase on Windows" href="cygwin.html" /> <item name="HBase on Windows" href="cygwin.html" />
<item name="Cluster replication" href="replication.html" /> <item name="Cluster replication" href="replication.html" />
<item name="Pseudo-Distributed HBase" href="pseudo-distributed.html" />
</menu> </menu>
</body> </body>
<skin> <skin>

View File

@ -0,0 +1,77 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Copyright 2010 The Apache Software Foundation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<!DOCTYPE document PUBLIC "-//APACHE//DTD Documentation V2.0//EN"
"http://forrest.apache.org/dtd/document-v20.dtd">
<document xmlns="http://maven.apache.org/XDOC/2.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/XDOC/2.0 http://maven.apache.org/xsd/xdoc-2.0.xsd">
<properties>
<title>
Running HBase in pseudo-distributed mode
</title>
</properties>
<body>
<p>This document augments what is described in the HBase 'Getting Started' in the
<a href="http://hbase.apache.org/docs/current/api/overview-summary.html#distributed">Distributed Operation: Pseudo- and Fully-distributed modes</a> section.
In particular it describes scripts that allow you start extra masters and regionservers when running in pseudo-distributed mode.
</p>
<ol><li>Copy the psuedo-distributed suggested configuration file (feel free to take a peek and understand what it's doing)
<source>% cp conf/hbase-site.xml{.psuedo-distributed.template,}</source>
</li>
<li>(Optional) Start up <a href="http://hadoop.apache.org/common/docs/r0.20.2/quickstart.html#PseudoDistributed">Pseudo-distributed HDFS</a>.
<ol><li>If you do, go to conf/hbase-site.xml. Uncomment the 'hbase.rootdir' property.
</li>
<li>Additionally, if you want to test HBase with high data durability enabled, also uncomment the 'dfs.support.append' property.
</li>
</ol>
</li>
<li>Start up the initial HBase cluster
<source>% bin/start-hbase.sh</source>
<ol> <li>To start up an extra backup master(s) on the same server run
<source>% bin/local-master-backup.sh start 1</source>
Here the '1' means use ports 60001 &amp; 60011, and this backup master's logfile will be at <i>logs/hbase-${USER}-1-master-${HOSTNAME}.log</i>.
To startup multiple backup masters run <source>% bin/local-master-backup.sh start 2 3</source> You can start up to 9 backup masters (10 total).
</li>
<li>To start up more regionservers
<source>% bin/local-regionservers.sh start 1</source>
where '1' means use ports 60201 &amp; 60301 and its logfile will be at <i>logs/hbase-${USER}-1-regionserver-${HOSTNAME}.log</i>.
To add 4 more regionservers in addition to the one you just started by running <source>% bin/local-regionservers.sh start 2 3 4 5</source>
Supports up to 99 extra regionservers (100 total).
</li>
</ol>
</li>
<li>To stop the cluster
<ol>
<li>Assuming you want to stop master backup # 1, run
<source>% cat /tmp/hbase-${USER}-1-master.pid |xargs kill -9</source>
Note that bin/local-master-backup.sh stop 1 will try to stop the cluster along with the master
</li>
<li>To stop an individual regionserver, run
<source>% bin/local-regionservers.sh stop 1
</source>
</li>
</ol>
</li>
</ol>
</body>
</document>