HBASE-18846 Accommodate the hbase-indexer/lily/SEP consumer deploy-type
Patch to start a standalone RegionServer that register's itself and optionally stands up Services. Can work w/o a Master in the mix. Useful testing. Also can be used by hbase-indexer to put up a Replication sink that extends public-facing APIs w/o need to extend internals. See JIRA release note for detail. This patch adds booleans for whether to start Admin and Client Service. Other refactoring moves all thread and service start into the one fat location so we can ask to by-pass 'services' if we don't need them. See JIRA for an example hbase-server.xml that has config to shutdown WAL, cache, etc. Adds checks if a service/thread has been setup before going to use it. Renames the ExecutorService in HRegionServer from service to executorService. See JIRA too for example Connection implementation that makes use of Connection plugin point to receive a replication stream. The default replication sink catches the incoming replication stream, undoes the WALEdits and then creates a Table to call a batch with the edits; up on JIRA, an example Connection plugin (legit, supported) returns a Table with an overridden batch method where in we do index inserts returning appropriate results to keep the replication engine ticking over. Upsides: an unadulterated RegionServer that will keep replication metrics and even hosts a web UI if wanted. No hacks. Just ordained configs shutting down unused services. Injection of the indexing function at a blessed point with no pollution by hbase internals; only public imports. No user of Private nor LimitedPrivate classes.
This commit is contained in:
parent
37b29e909d
commit
456057ef90
|
@ -1,4 +1,4 @@
|
|||
/**
|
||||
/*
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -99,7 +99,6 @@ import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
|
|||
import org.apache.hadoop.hbase.master.assignment.MergeTableRegionsProcedure;
|
||||
import org.apache.hadoop.hbase.master.assignment.RegionStates;
|
||||
import org.apache.hadoop.hbase.master.assignment.RegionStates.RegionStateNode;
|
||||
import org.apache.hadoop.hbase.master.assignment.RegionStates.ServerStateNode;
|
||||
import org.apache.hadoop.hbase.master.balancer.BalancerChore;
|
||||
import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer;
|
||||
import org.apache.hadoop.hbase.master.balancer.ClusterStatusChore;
|
||||
|
@ -472,66 +471,73 @@ public class HMaster extends HRegionServer implements MasterServices {
|
|||
public HMaster(final Configuration conf, CoordinatedStateManager csm)
|
||||
throws IOException, KeeperException {
|
||||
super(conf, csm);
|
||||
this.rsFatals = new MemoryBoundedLogMessageBuffer(
|
||||
conf.getLong("hbase.master.buffer.for.rs.fatals", 1*1024*1024));
|
||||
try {
|
||||
this.rsFatals = new MemoryBoundedLogMessageBuffer(
|
||||
conf.getLong("hbase.master.buffer.for.rs.fatals", 1 * 1024 * 1024));
|
||||
|
||||
LOG.info("hbase.rootdir=" + getRootDir() +
|
||||
", hbase.cluster.distributed=" + this.conf.getBoolean(HConstants.CLUSTER_DISTRIBUTED, false));
|
||||
LOG.info("hbase.rootdir=" + getRootDir() +
|
||||
", hbase.cluster.distributed=" + this.conf.getBoolean(HConstants.CLUSTER_DISTRIBUTED, false));
|
||||
|
||||
// Disable usage of meta replicas in the master
|
||||
this.conf.setBoolean(HConstants.USE_META_REPLICAS, false);
|
||||
// Disable usage of meta replicas in the master
|
||||
this.conf.setBoolean(HConstants.USE_META_REPLICAS, false);
|
||||
|
||||
Replication.decorateMasterConfiguration(this.conf);
|
||||
Replication.decorateMasterConfiguration(this.conf);
|
||||
|
||||
// Hack! Maps DFSClient => Master for logs. HDFS made this
|
||||
// config param for task trackers, but we can piggyback off of it.
|
||||
if (this.conf.get("mapreduce.task.attempt.id") == null) {
|
||||
this.conf.set("mapreduce.task.attempt.id", "hb_m_" + this.serverName.toString());
|
||||
}
|
||||
|
||||
// should we check the compression codec type at master side, default true, HBASE-6370
|
||||
this.masterCheckCompression = conf.getBoolean("hbase.master.check.compression", true);
|
||||
|
||||
// should we check encryption settings at master side, default true
|
||||
this.masterCheckEncryption = conf.getBoolean("hbase.master.check.encryption", true);
|
||||
|
||||
this.metricsMaster = new MetricsMaster(new MetricsMasterWrapperImpl(this));
|
||||
|
||||
// preload table descriptor at startup
|
||||
this.preLoadTableDescriptors = conf.getBoolean("hbase.master.preload.tabledescriptors", true);
|
||||
|
||||
this.maxBlancingTime = getMaxBalancingTime();
|
||||
this.maxRitPercent = conf.getDouble(HConstants.HBASE_MASTER_BALANCER_MAX_RIT_PERCENT,
|
||||
HConstants.DEFAULT_HBASE_MASTER_BALANCER_MAX_RIT_PERCENT);
|
||||
|
||||
// Do we publish the status?
|
||||
|
||||
boolean shouldPublish = conf.getBoolean(HConstants.STATUS_PUBLISHED,
|
||||
HConstants.STATUS_PUBLISHED_DEFAULT);
|
||||
Class<? extends ClusterStatusPublisher.Publisher> publisherClass =
|
||||
conf.getClass(ClusterStatusPublisher.STATUS_PUBLISHER_CLASS,
|
||||
ClusterStatusPublisher.DEFAULT_STATUS_PUBLISHER_CLASS,
|
||||
ClusterStatusPublisher.Publisher.class);
|
||||
|
||||
if (shouldPublish) {
|
||||
if (publisherClass == null) {
|
||||
LOG.warn(HConstants.STATUS_PUBLISHED + " is true, but " +
|
||||
ClusterStatusPublisher.DEFAULT_STATUS_PUBLISHER_CLASS +
|
||||
" is not set - not publishing status");
|
||||
} else {
|
||||
clusterStatusPublisherChore = new ClusterStatusPublisher(this, conf, publisherClass);
|
||||
getChoreService().scheduleChore(clusterStatusPublisherChore);
|
||||
// Hack! Maps DFSClient => Master for logs. HDFS made this
|
||||
// config param for task trackers, but we can piggyback off of it.
|
||||
if (this.conf.get("mapreduce.task.attempt.id") == null) {
|
||||
this.conf.set("mapreduce.task.attempt.id", "hb_m_" + this.serverName.toString());
|
||||
}
|
||||
}
|
||||
|
||||
// Some unit tests don't need a cluster, so no zookeeper at all
|
||||
if (!conf.getBoolean("hbase.testing.nocluster", false)) {
|
||||
setInitLatch(new CountDownLatch(1));
|
||||
activeMasterManager = new ActiveMasterManager(zooKeeper, this.serverName, this);
|
||||
int infoPort = putUpJettyServer();
|
||||
startActiveMasterManager(infoPort);
|
||||
} else {
|
||||
activeMasterManager = null;
|
||||
// should we check the compression codec type at master side, default true, HBASE-6370
|
||||
this.masterCheckCompression = conf.getBoolean("hbase.master.check.compression", true);
|
||||
|
||||
// should we check encryption settings at master side, default true
|
||||
this.masterCheckEncryption = conf.getBoolean("hbase.master.check.encryption", true);
|
||||
|
||||
this.metricsMaster = new MetricsMaster(new MetricsMasterWrapperImpl(this));
|
||||
|
||||
// preload table descriptor at startup
|
||||
this.preLoadTableDescriptors = conf.getBoolean("hbase.master.preload.tabledescriptors", true);
|
||||
|
||||
this.maxBlancingTime = getMaxBalancingTime();
|
||||
this.maxRitPercent = conf.getDouble(HConstants.HBASE_MASTER_BALANCER_MAX_RIT_PERCENT,
|
||||
HConstants.DEFAULT_HBASE_MASTER_BALANCER_MAX_RIT_PERCENT);
|
||||
|
||||
// Do we publish the status?
|
||||
|
||||
boolean shouldPublish = conf.getBoolean(HConstants.STATUS_PUBLISHED,
|
||||
HConstants.STATUS_PUBLISHED_DEFAULT);
|
||||
Class<? extends ClusterStatusPublisher.Publisher> publisherClass =
|
||||
conf.getClass(ClusterStatusPublisher.STATUS_PUBLISHER_CLASS,
|
||||
ClusterStatusPublisher.DEFAULT_STATUS_PUBLISHER_CLASS,
|
||||
ClusterStatusPublisher.Publisher.class);
|
||||
|
||||
if (shouldPublish) {
|
||||
if (publisherClass == null) {
|
||||
LOG.warn(HConstants.STATUS_PUBLISHED + " is true, but " +
|
||||
ClusterStatusPublisher.DEFAULT_STATUS_PUBLISHER_CLASS +
|
||||
" is not set - not publishing status");
|
||||
} else {
|
||||
clusterStatusPublisherChore = new ClusterStatusPublisher(this, conf, publisherClass);
|
||||
getChoreService().scheduleChore(clusterStatusPublisherChore);
|
||||
}
|
||||
}
|
||||
|
||||
// Some unit tests don't need a cluster, so no zookeeper at all
|
||||
if (!conf.getBoolean("hbase.testing.nocluster", false)) {
|
||||
setInitLatch(new CountDownLatch(1));
|
||||
activeMasterManager = new ActiveMasterManager(zooKeeper, this.serverName, this);
|
||||
int infoPort = putUpJettyServer();
|
||||
startActiveMasterManager(infoPort);
|
||||
} else {
|
||||
activeMasterManager = null;
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
// Make sure we log the exception. HMaster is often started via reflection and the
|
||||
// cause of failed startup is lost.
|
||||
LOG.error("Failed construction of Master", t);
|
||||
throw t;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1100,15 +1106,15 @@ public class HMaster extends HRegionServer implements MasterServices {
|
|||
*/
|
||||
private void startServiceThreads() throws IOException{
|
||||
// Start the executor service pools
|
||||
this.service.startExecutorService(ExecutorType.MASTER_OPEN_REGION,
|
||||
this.executorService.startExecutorService(ExecutorType.MASTER_OPEN_REGION,
|
||||
conf.getInt("hbase.master.executor.openregion.threads", 5));
|
||||
this.service.startExecutorService(ExecutorType.MASTER_CLOSE_REGION,
|
||||
this.executorService.startExecutorService(ExecutorType.MASTER_CLOSE_REGION,
|
||||
conf.getInt("hbase.master.executor.closeregion.threads", 5));
|
||||
this.service.startExecutorService(ExecutorType.MASTER_SERVER_OPERATIONS,
|
||||
this.executorService.startExecutorService(ExecutorType.MASTER_SERVER_OPERATIONS,
|
||||
conf.getInt("hbase.master.executor.serverops.threads", 5));
|
||||
this.service.startExecutorService(ExecutorType.MASTER_META_SERVER_OPERATIONS,
|
||||
this.executorService.startExecutorService(ExecutorType.MASTER_META_SERVER_OPERATIONS,
|
||||
conf.getInt("hbase.master.executor.meta.serverops.threads", 5));
|
||||
this.service.startExecutorService(ExecutorType.M_LOG_REPLAY_OPS,
|
||||
this.executorService.startExecutorService(ExecutorType.M_LOG_REPLAY_OPS,
|
||||
conf.getInt("hbase.master.executor.logreplayops.threads", 10));
|
||||
|
||||
// We depend on there being only one instance of this executor running
|
||||
|
@ -1116,7 +1122,7 @@ public class HMaster extends HRegionServer implements MasterServices {
|
|||
// tables.
|
||||
// Any time changing this maxThreads to > 1, pls see the comment at
|
||||
// AccessController#postCompletedCreateTableAction
|
||||
this.service.startExecutorService(ExecutorType.MASTER_TABLE_OPERATIONS, 1);
|
||||
this.executorService.startExecutorService(ExecutorType.MASTER_TABLE_OPERATIONS, 1);
|
||||
startProcedureExecutor();
|
||||
|
||||
// Start log cleaner thread
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -314,6 +314,16 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
|
|||
|
||||
final AtomicBoolean clearCompactionQueues = new AtomicBoolean(false);
|
||||
|
||||
/**
|
||||
* Services launched in RSRpcServices. By default they are on but you can use the below
|
||||
* booleans to selectively enable/disable either Admin or Client Service (Rare is the case
|
||||
* where you would ever turn off one or the other).
|
||||
*/
|
||||
public static final String REGIONSERVER_ADMIN_SERVICE_CONFIG =
|
||||
"hbase.regionserver.admin.executorService";
|
||||
public static final String REGIONSERVER_CLIENT_SERVICE_CONFIG =
|
||||
"hbase.regionserver.client.executorService";
|
||||
|
||||
/**
|
||||
* An Rpc callback for closing a RegionScanner.
|
||||
*/
|
||||
|
@ -591,13 +601,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
|
|||
/**
|
||||
* Mutate a list of rows atomically.
|
||||
*
|
||||
* @param region
|
||||
* @param actions
|
||||
* @param cellScanner if non-null, the mutation data -- the Cell content.
|
||||
* @param row
|
||||
* @param family
|
||||
* @param qualifier
|
||||
* @param compareOp
|
||||
* @param comparator @throws IOException
|
||||
*/
|
||||
private boolean checkAndRowMutate(final HRegion region, final List<ClientProtos.Action> actions,
|
||||
|
@ -649,12 +653,8 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
|
|||
/**
|
||||
* Execute an append mutation.
|
||||
*
|
||||
* @param region
|
||||
* @param m
|
||||
* @param cellScanner
|
||||
* @return result to return to client if default operation should be
|
||||
* bypassed as indicated by RegionObserver, null otherwise
|
||||
* @throws IOException
|
||||
*/
|
||||
private Result append(final HRegion region, final OperationQuota quota,
|
||||
final MutationProto mutation, final CellScanner cellScanner, long nonceGroup,
|
||||
|
@ -1426,17 +1426,31 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
|
|||
}
|
||||
|
||||
/**
|
||||
* @return list of blocking services and their security info classes that this server supports
|
||||
* By default, put up an Admin and a Client Service.
|
||||
* Set booleans <code>hbase.regionserver.admin.executorService</code> and
|
||||
* <code>hbase.regionserver.client.executorService</code> if you want to enable/disable services.
|
||||
* Default is that both are enabled.
|
||||
* @return immutable list of blocking services and the security info classes that this server
|
||||
* supports
|
||||
*/
|
||||
protected List<BlockingServiceAndInterface> getServices() {
|
||||
List<BlockingServiceAndInterface> bssi = new ArrayList<>(2);
|
||||
bssi.add(new BlockingServiceAndInterface(
|
||||
boolean admin =
|
||||
getConfiguration().getBoolean(REGIONSERVER_ADMIN_SERVICE_CONFIG, true);
|
||||
boolean client =
|
||||
getConfiguration().getBoolean(REGIONSERVER_CLIENT_SERVICE_CONFIG, true);
|
||||
List<BlockingServiceAndInterface> bssi = new ArrayList<>();
|
||||
if (client) {
|
||||
bssi.add(new BlockingServiceAndInterface(
|
||||
ClientService.newReflectiveBlockingService(this),
|
||||
ClientService.BlockingInterface.class));
|
||||
bssi.add(new BlockingServiceAndInterface(
|
||||
}
|
||||
if (admin) {
|
||||
bssi.add(new BlockingServiceAndInterface(
|
||||
AdminService.newReflectiveBlockingService(this),
|
||||
AdminService.BlockingInterface.class));
|
||||
return bssi;
|
||||
}
|
||||
return new org.apache.hadoop.hbase.shaded.com.google.common.collect.
|
||||
ImmutableList.Builder<BlockingServiceAndInterface>().addAll(bssi).build();
|
||||
}
|
||||
|
||||
public InetSocketAddress getSocketAddress() {
|
||||
|
@ -1943,20 +1957,24 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
|
|||
}
|
||||
// If there is no action in progress, we can submit a specific handler.
|
||||
// Need to pass the expected version in the constructor.
|
||||
if (region.isMetaRegion()) {
|
||||
regionServer.service.submit(new OpenMetaHandler(
|
||||
regionServer, regionServer, region, htd, masterSystemTime));
|
||||
if (regionServer.executorService == null) {
|
||||
LOG.info("No executor executorService; skipping open request");
|
||||
} else {
|
||||
if (regionOpenInfo.getFavoredNodesCount() > 0) {
|
||||
regionServer.updateRegionFavoredNodesMapping(region.getEncodedName(),
|
||||
regionOpenInfo.getFavoredNodesList());
|
||||
}
|
||||
if (htd.getPriority() >= HConstants.ADMIN_QOS || region.getTable().isSystemTable()) {
|
||||
regionServer.service.submit(new OpenPriorityRegionHandler(
|
||||
regionServer, regionServer, region, htd, masterSystemTime));
|
||||
if (region.isMetaRegion()) {
|
||||
regionServer.executorService.submit(new OpenMetaHandler(
|
||||
regionServer, regionServer, region, htd, masterSystemTime));
|
||||
} else {
|
||||
regionServer.service.submit(new OpenRegionHandler(
|
||||
if (regionOpenInfo.getFavoredNodesCount() > 0) {
|
||||
regionServer.updateRegionFavoredNodesMapping(region.getEncodedName(),
|
||||
regionOpenInfo.getFavoredNodesList());
|
||||
}
|
||||
if (htd.getPriority() >= HConstants.ADMIN_QOS || region.getTable().isSystemTable()) {
|
||||
regionServer.executorService.submit(new OpenPriorityRegionHandler(
|
||||
regionServer, regionServer, region, htd, masterSystemTime));
|
||||
} else {
|
||||
regionServer.executorService.submit(new OpenRegionHandler(
|
||||
regionServer, regionServer, region, htd, masterSystemTime));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -263,7 +263,7 @@ public class TestRegionServerNoMaster {
|
|||
// Let's start the open handler
|
||||
TableDescriptor htd = getRS().tableDescriptors.get(hri.getTable());
|
||||
|
||||
getRS().service.submit(new OpenRegionHandler(getRS(), getRS(), hri, htd, -1));
|
||||
getRS().executorService.submit(new OpenRegionHandler(getRS(), getRS(), hri, htd, -1));
|
||||
|
||||
// The open handler should have removed the region from RIT but kept the region closed
|
||||
checkRegionIsClosed(HTU, getRS(), hri);
|
||||
|
|
Loading…
Reference in New Issue