HBASE-2977. Refactor master command line to a new class

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@995674 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Todd Lipcon 2010-09-10 05:48:39 +00:00
parent 9453a813d0
commit 94b78b4997
4 changed files with 236 additions and 152 deletions

View File

@ -901,6 +901,7 @@ Release 0.21.0 - Unreleased
HBaseObjectWritable (Gary Helmling via Andrew Purtell)
HBASE-2976 Running HFile tool passing fully-qualified filename I get
'IllegalArgumentException: Wrong FS'
HBASE-2977 Refactor master command line to a new class
NEW FEATURES
HBASE-1961 HBase EC2 scripts

View File

@ -21,8 +21,6 @@ package org.apache.hadoop.hbase.master;
import java.io.File;
import java.io.IOException;
import java.lang.management.ManagementFactory;
import java.lang.management.RuntimeMXBean;
import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException;
import java.net.UnknownHostException;
@ -31,13 +29,10 @@ import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicReference;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.GnuParser;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.ToolRunner;
import org.apache.hadoop.hbase.Chore;
import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.HBaseConfiguration;
@ -775,15 +770,6 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server {
return status;
}
private static void printUsageAndExit() {
System.err.println("Usage: Master [opts] start|stop");
System.err.println(" start Start Master. If local mode, start Master and RegionServer in same JVM");
System.err.println(" stop Start cluster shutdown; Master signals RegionServer shutdown");
System.err.println(" where [opts] are:");
System.err.println(" --minServers=<servers> Minimum RegionServers needed to host user tables.");
System.exit(0);
}
@Override
public void abort(final String msg, final Throwable t) {
if (t != null) LOG.fatal(msg, t);
@ -866,143 +852,16 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server {
}
}
/*
* Version of master that will shutdown the passed zk cluster on its way out.
*/
static class LocalHMaster extends HMaster {
private MiniZooKeeperCluster zkcluster = null;
public LocalHMaster(Configuration conf)
throws IOException, KeeperException, InterruptedException {
super(conf);
}
@Override
public void run() {
super.run();
if (this.zkcluster != null) {
try {
this.zkcluster.shutdown();
} catch (IOException e) {
e.printStackTrace();
}
}
}
void setZKCluster(final MiniZooKeeperCluster zkcluster) {
this.zkcluster = zkcluster;
}
}
protected static void doMain(String [] args,
Class<? extends HMaster> masterClass) {
Configuration conf = HBaseConfiguration.create();
Options opt = new Options();
opt.addOption("minServers", true, "Minimum RegionServers needed to host user tables");
opt.addOption("D", true, "Override HBase Configuration Settings");
opt.addOption("backup", false, "Do not try to become HMaster until the primary fails");
try {
CommandLine cmd = new GnuParser().parse(opt, args);
if (cmd.hasOption("minServers")) {
String val = cmd.getOptionValue("minServers");
conf.setInt("hbase.regions.server.count.min",
Integer.valueOf(val));
LOG.debug("minServers set to " + val);
}
if (cmd.hasOption("D")) {
for (String confOpt : cmd.getOptionValues("D")) {
String[] kv = confOpt.split("=", 2);
if (kv.length == 2) {
conf.set(kv[0], kv[1]);
LOG.debug("-D configuration override: " + kv[0] + "=" + kv[1]);
} else {
throw new ParseException("-D option format invalid: " + confOpt);
}
}
}
// check if we are the backup master - override the conf if so
if (cmd.hasOption("backup")) {
conf.setBoolean(HConstants.MASTER_TYPE_BACKUP, true);
}
if (cmd.getArgList().contains("start")) {
try {
// Print out vm stats before starting up.
RuntimeMXBean runtime = ManagementFactory.getRuntimeMXBean();
if (runtime != null) {
LOG.info("vmName=" + runtime.getVmName() + ", vmVendor=" +
runtime.getVmVendor() + ", vmVersion=" + runtime.getVmVersion());
LOG.info("vmInputArguments=" + runtime.getInputArguments());
}
// If 'local', defer to LocalHBaseCluster instance. Starts master
// and regionserver both in the one JVM.
if (LocalHBaseCluster.isLocal(conf)) {
final MiniZooKeeperCluster zooKeeperCluster =
new MiniZooKeeperCluster();
File zkDataPath = new File(conf.get("hbase.zookeeper.property.dataDir"));
int zkClientPort = conf.getInt("hbase.zookeeper.property.clientPort", 0);
if (zkClientPort == 0) {
throw new IOException("No config value for hbase.zookeeper.property.clientPort");
}
zooKeeperCluster.setTickTime(conf.getInt("hbase.zookeeper.property.tickTime", 3000));
zooKeeperCluster.setClientPort(zkClientPort);
int clientPort = zooKeeperCluster.startup(zkDataPath);
if (clientPort != zkClientPort) {
String errorMsg = "Couldnt start ZK at requested address of " +
zkClientPort + ", instead got: " + clientPort + ". Aborting. Why? " +
"Because clients (eg shell) wont be able to find this ZK quorum";
System.err.println(errorMsg);
throw new IOException(errorMsg);
}
conf.set("hbase.zookeeper.property.clientPort",
Integer.toString(clientPort));
// Need to have the zk cluster shutdown when master is shutdown.
// Run a subclass that does the zk cluster shutdown on its way out.
LocalHBaseCluster cluster = new LocalHBaseCluster(conf, 1,
LocalHMaster.class, HRegionServer.class);
((LocalHMaster)cluster.getMaster()).setZKCluster(zooKeeperCluster);
cluster.startup();
} else {
HMaster master = constructMaster(masterClass, conf);
if (master.isStopped()) {
LOG.info("Won't bring the Master up as a shutdown is requested");
return;
}
master.start();
}
} catch (Throwable t) {
LOG.error("Failed to start master", t);
System.exit(-1);
}
} else if (cmd.getArgList().contains("stop")) {
HBaseAdmin adm = null;
try {
adm = new HBaseAdmin(conf);
} catch (MasterNotRunningException e) {
LOG.error("Master not running");
System.exit(0);
} catch (ZooKeeperConnectionException e) {
LOG.error("ZooKeeper not available");
System.exit(0);
}
try {
adm.shutdown();
} catch (Throwable t) {
LOG.error("Failed to stop master", t);
System.exit(-1);
}
} else {
throw new ParseException("Unknown argument(s): " +
org.apache.commons.lang.StringUtils.join(cmd.getArgs(), " "));
}
} catch (ParseException e) {
LOG.error("Could not parse: ", e);
printUsageAndExit();
Class<? extends HMaster> masterClass) throws Exception {
int ret = ToolRunner.run(
HBaseConfiguration.create(),
new HMasterCommandLine(masterClass),
args);
if (ret != 0) {
System.exit(ret);
}
// Otherwise exit gracefully so other threads clean up
}
/**
@ -1010,7 +869,7 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server {
* @param args
* @throws IOException
*/
public static void main(String [] args) throws IOException {
public static void main(String [] args) throws Exception {
doMain(args, HMaster.class);
}
}

View File

@ -0,0 +1,224 @@
/**
* Copyright 2010 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.master;
import java.io.IOException;
import java.io.File;
import java.lang.management.RuntimeMXBean;
import java.lang.management.ManagementFactory;
import java.util.List;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.GnuParser;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.LocalHBaseCluster;
import org.apache.hadoop.hbase.MasterNotRunningException;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
import org.apache.zookeeper.KeeperException;
public class HMasterCommandLine extends Configured implements Tool {
private static final Log LOG = LogFactory.getLog(HMasterCommandLine.class);
private static final String USAGE =
"Usage: Master [opts] start|stop\n" +
" start Start Master. If local mode, start Master and RegionServer in same JVM\n" +
" stop Start cluster shutdown; Master signals RegionServer shutdown\n" +
" where [opts] are:\n" +
" --minServers=<servers> Minimum RegionServers needed to host user tables.\n" +
" --backup Master should start in backup mode";
private final Class<? extends HMaster> masterClass;
public HMasterCommandLine(Class<? extends HMaster> masterClass) {
this.masterClass = masterClass;
}
private void usage(String message) {
if (message != null) {
System.err.println(message);
System.err.println("\n");
}
System.err.println(USAGE);
}
private static void logJVMInfo() {
// Print out vm stats before starting up.
RuntimeMXBean runtime = ManagementFactory.getRuntimeMXBean();
if (runtime != null) {
LOG.info("vmName=" + runtime.getVmName() + ", vmVendor=" +
runtime.getVmVendor() + ", vmVersion=" + runtime.getVmVersion());
LOG.info("vmInputArguments=" + runtime.getInputArguments());
}
}
public int run(String args[]) throws Exception {
Options opt = new Options();
opt.addOption("minServers", true, "Minimum RegionServers needed to host user tables");
opt.addOption("backup", false, "Do not try to become HMaster until the primary fails");
CommandLine cmd;
try {
cmd = new GnuParser().parse(opt, args);
} catch (ParseException e) {
LOG.error("Could not parse: ", e);
usage(null);
return -1;
}
if (cmd.hasOption("minServers")) {
String val = cmd.getOptionValue("minServers");
getConf().setInt("hbase.regions.server.count.min",
Integer.valueOf(val));
LOG.debug("minServers set to " + val);
}
// check if we are the backup master - override the conf if so
if (cmd.hasOption("backup")) {
getConf().setBoolean(HConstants.MASTER_TYPE_BACKUP, true);
}
List<String> remainingArgs = cmd.getArgList();
if (remainingArgs.size() != 1) {
usage(null);
return -1;
}
String command = remainingArgs.get(0);
if ("start".equals(command)) {
return startMaster();
} else if ("stop".equals(command)) {
return stopMaster();
} else {
usage("Invalid command: " + command);
return -1;
}
}
private int startMaster() {
Configuration conf = getConf();
try {
// If 'local', defer to LocalHBaseCluster instance. Starts master
// and regionserver both in the one JVM.
if (LocalHBaseCluster.isLocal(conf)) {
final MiniZooKeeperCluster zooKeeperCluster =
new MiniZooKeeperCluster();
File zkDataPath = new File(conf.get("hbase.zookeeper.property.dataDir"));
int zkClientPort = conf.getInt("hbase.zookeeper.property.clientPort", 0);
if (zkClientPort == 0) {
throw new IOException("No config value for hbase.zookeeper.property.clientPort");
}
zooKeeperCluster.setTickTime(conf.getInt("hbase.zookeeper.property.tickTime", 3000));
zooKeeperCluster.setClientPort(zkClientPort);
int clientPort = zooKeeperCluster.startup(zkDataPath);
if (clientPort != zkClientPort) {
String errorMsg = "Couldnt start ZK at requested address of " +
zkClientPort + ", instead got: " + clientPort + ". Aborting. Why? " +
"Because clients (eg shell) wont be able to find this ZK quorum";
System.err.println(errorMsg);
throw new IOException(errorMsg);
}
conf.set("hbase.zookeeper.property.clientPort",
Integer.toString(clientPort));
// Need to have the zk cluster shutdown when master is shutdown.
// Run a subclass that does the zk cluster shutdown on its way out.
LocalHBaseCluster cluster = new LocalHBaseCluster(conf, 1,
LocalHMaster.class, HRegionServer.class);
((LocalHMaster)cluster.getMaster()).setZKCluster(zooKeeperCluster);
cluster.startup();
} else {
HMaster master = HMaster.constructMaster(masterClass, conf);
if (master.isStopped()) {
LOG.info("Won't bring the Master up as a shutdown is requested");
return -1;
}
master.start();
master.join();
}
} catch (Throwable t) {
LOG.error("Failed to start master", t);
return -1;
}
return 0;
}
private int stopMaster() {
HBaseAdmin adm = null;
try {
adm = new HBaseAdmin(getConf());
} catch (MasterNotRunningException e) {
LOG.error("Master not running");
return -1;
} catch (ZooKeeperConnectionException e) {
LOG.error("ZooKeeper not available");
return -1;
}
try {
adm.shutdown();
} catch (Throwable t) {
LOG.error("Failed to stop master", t);
return -1;
}
return 0;
}
/*
* Version of master that will shutdown the passed zk cluster on its way out.
*/
static class LocalHMaster extends HMaster {
private MiniZooKeeperCluster zkcluster = null;
public LocalHMaster(Configuration conf)
throws IOException, KeeperException, InterruptedException {
super(conf);
}
@Override
public void run() {
super.run();
if (this.zkcluster != null) {
try {
this.zkcluster.shutdown();
} catch (IOException e) {
e.printStackTrace();
}
}
}
void setZKCluster(final MiniZooKeeperCluster zkcluster) {
this.zkcluster = zkcluster;
}
}
}

View File

@ -56,7 +56,7 @@ public class OOMEHMaster extends HMaster {
* @param args
* @throws IOException
*/
public static void main(String[] args) throws IOException {
public static void main(String[] args) throws Exception {
doMain(args, OOMEHMaster.class);
}
}