diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseRpcServicesBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseRpcServicesBase.java new file mode 100644 index 00000000000..4bd0b3304d4 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseRpcServicesBase.java @@ -0,0 +1,386 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import java.io.IOException; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.net.BindException; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Optional; +import java.util.concurrent.ThreadLocalRandom; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.client.ConnectionUtils; +import org.apache.hadoop.hbase.conf.ConfigurationObserver; +import org.apache.hadoop.hbase.io.ByteBuffAllocator; +import org.apache.hadoop.hbase.ipc.HBaseRPCErrorHandler; +import org.apache.hadoop.hbase.ipc.PriorityFunction; +import org.apache.hadoop.hbase.ipc.QosPriority; +import org.apache.hadoop.hbase.ipc.RpcScheduler; +import org.apache.hadoop.hbase.ipc.RpcServer; +import org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface; +import org.apache.hadoop.hbase.ipc.RpcServerFactory; +import org.apache.hadoop.hbase.ipc.RpcServerInterface; +import org.apache.hadoop.hbase.namequeues.NamedQueuePayload; +import org.apache.hadoop.hbase.namequeues.NamedQueueRecorder; +import org.apache.hadoop.hbase.namequeues.RpcLogDetails; +import org.apache.hadoop.hbase.namequeues.request.NamedQueueGetRequest; +import org.apache.hadoop.hbase.namequeues.response.NamedQueueGetResponse; +import org.apache.hadoop.hbase.net.Address; +import org.apache.hadoop.hbase.regionserver.RpcSchedulerFactory; +import org.apache.hadoop.hbase.security.User; +import org.apache.hadoop.hbase.security.access.AccessChecker; +import org.apache.hadoop.hbase.security.access.NoopAccessChecker; +import org.apache.hadoop.hbase.security.access.Permission; +import org.apache.hadoop.hbase.security.access.ZKPermissionWatcher; +import org.apache.hadoop.hbase.util.DNS; +import org.apache.hadoop.hbase.util.OOMEChecker; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.zookeeper.KeeperException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hbase.thirdparty.com.google.protobuf.ByteString; +import org.apache.hbase.thirdparty.com.google.protobuf.Message; +import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; +import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; + +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearSlowLogResponseRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearSlowLogResponses; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SlowLogResponseRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SlowLogResponses; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegistryProtos.ClientMetaService; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegistryProtos.GetActiveMasterRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegistryProtos.GetActiveMasterResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegistryProtos.GetBootstrapNodesRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegistryProtos.GetBootstrapNodesResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegistryProtos.GetClusterIdRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegistryProtos.GetClusterIdResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegistryProtos.GetMastersRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegistryProtos.GetMastersResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegistryProtos.GetMastersResponseEntry; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegistryProtos.GetMetaRegionLocationsRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RegistryProtos.GetMetaRegionLocationsResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.TooSlowLog.SlowLogPayload; + +/** + * Base class for Master and RegionServer RpcServices. + */ +@InterfaceAudience.Private +public abstract class HBaseRpcServicesBase> + implements ClientMetaService.BlockingInterface, AdminService.BlockingInterface, + HBaseRPCErrorHandler, PriorityFunction, ConfigurationObserver { + + private static final Logger LOG = LoggerFactory.getLogger(HBaseRpcServicesBase.class); + + public static final String CLIENT_BOOTSTRAP_NODE_LIMIT = "hbase.client.bootstrap.node.limit"; + + public static final int DEFAULT_CLIENT_BOOTSTRAP_NODE_LIMIT = 10; + + protected final S server; + + // Server to handle client requests. + protected final RpcServer rpcServer; + + private final InetSocketAddress isa; + + protected final PriorityFunction priority; + + private AccessChecker accessChecker; + + private ZKPermissionWatcher zkPermissionWatcher; + + protected HBaseRpcServicesBase(S server, String processName) throws IOException { + this.server = server; + Configuration conf = server.getConfiguration(); + final RpcSchedulerFactory rpcSchedulerFactory; + try { + rpcSchedulerFactory = getRpcSchedulerFactoryClass(conf).asSubclass(RpcSchedulerFactory.class) + .getDeclaredConstructor().newInstance(); + } catch (NoSuchMethodException | InvocationTargetException | InstantiationException + | IllegalAccessException e) { + throw new IllegalArgumentException(e); + } + String hostname = DNS.getHostname(conf, getDNSServerType()); + int port = conf.getInt(getPortConfigName(), getDefaultPort()); + // Creation of a HSA will force a resolve. + final InetSocketAddress initialIsa = new InetSocketAddress(hostname, port); + final InetSocketAddress bindAddress = new InetSocketAddress(getHostname(conf, hostname), port); + if (initialIsa.getAddress() == null) { + throw new IllegalArgumentException("Failed resolve of " + initialIsa); + } + priority = createPriority(); + // Using Address means we don't get the IP too. Shorten it more even to just the host name + // w/o the domain. + final String name = processName + "/" + + Address.fromParts(initialIsa.getHostName(), initialIsa.getPort()).toStringWithoutDomain(); + server.setName(name); + // Set how many times to retry talking to another server over Connection. + ConnectionUtils.setServerSideHConnectionRetriesConfig(conf, name, LOG); + boolean reservoirEnabled = + conf.getBoolean(ByteBuffAllocator.ALLOCATOR_POOL_ENABLED_KEY, defaultReservoirEnabled()); + try { + // use final bindAddress for this server. + rpcServer = RpcServerFactory.createRpcServer(server, name, getServices(), bindAddress, conf, + rpcSchedulerFactory.create(conf, this, server), reservoirEnabled); + } catch (BindException be) { + throw new IOException(be.getMessage() + ". To switch ports use the '" + getPortConfigName() + + "' configuration property.", be.getCause() != null ? be.getCause() : be); + } + final InetSocketAddress address = rpcServer.getListenerAddress(); + if (address == null) { + throw new IOException("Listener channel is closed"); + } + // Set our address, however we need the final port that was given to rpcServer + isa = new InetSocketAddress(initialIsa.getHostName(), address.getPort()); + rpcServer.setErrorHandler(this); + } + + protected abstract boolean defaultReservoirEnabled(); + + protected abstract DNS.ServerType getDNSServerType(); + + protected abstract String getHostname(Configuration conf, String defaultHostname); + + protected abstract String getPortConfigName(); + + protected abstract int getDefaultPort(); + + protected abstract PriorityFunction createPriority(); + + protected abstract Class getRpcSchedulerFactoryClass(Configuration conf); + + protected abstract List getServices(); + + protected final void internalStart(ZKWatcher zkWatcher) { + if (AccessChecker.isAuthorizationSupported(getConfiguration())) { + accessChecker = new AccessChecker(getConfiguration()); + } else { + accessChecker = new NoopAccessChecker(getConfiguration()); + } + zkPermissionWatcher = + new ZKPermissionWatcher(zkWatcher, accessChecker.getAuthManager(), getConfiguration()); + try { + zkPermissionWatcher.start(); + } catch (KeeperException e) { + LOG.error("ZooKeeper permission watcher initialization failed", e); + } + rpcServer.start(); + } + + protected final void requirePermission(String request, Permission.Action perm) + throws IOException { + if (accessChecker != null) { + accessChecker.requirePermission(RpcServer.getRequestUser().orElse(null), request, null, perm); + } + } + + public AccessChecker getAccessChecker() { + return accessChecker; + } + + public ZKPermissionWatcher getZkPermissionWatcher() { + return zkPermissionWatcher; + } + + protected final void internalStop() { + if (zkPermissionWatcher != null) { + zkPermissionWatcher.close(); + } + rpcServer.stop(); + } + + public Configuration getConfiguration() { + return server.getConfiguration(); + } + + public S getServer() { + return server; + } + + public InetSocketAddress getSocketAddress() { + return isa; + } + + public RpcServerInterface getRpcServer() { + return rpcServer; + } + + public RpcScheduler getRpcScheduler() { + return rpcServer.getScheduler(); + } + + @Override + public int getPriority(RequestHeader header, Message param, User user) { + return priority.getPriority(header, param, user); + } + + @Override + public long getDeadline(RequestHeader header, Message param) { + return priority.getDeadline(header, param); + } + + /** + * Check if an OOME and, if so, abort immediately to avoid creating more objects. + * @return True if we OOME'd and are aborting. + */ + @Override + public boolean checkOOME(Throwable e) { + return OOMEChecker.exitIfOOME(e, getClass().getSimpleName()); + } + + @Override + public void onConfigurationChange(Configuration conf) { + rpcServer.onConfigurationChange(conf); + } + + @Override + public GetClusterIdResponse getClusterId(RpcController controller, GetClusterIdRequest request) + throws ServiceException { + return GetClusterIdResponse.newBuilder().setClusterId(server.getClusterId()).build(); + } + + @Override + public GetActiveMasterResponse getActiveMaster(RpcController controller, + GetActiveMasterRequest request) throws ServiceException { + GetActiveMasterResponse.Builder builder = GetActiveMasterResponse.newBuilder(); + server.getActiveMaster() + .ifPresent(name -> builder.setServerName(ProtobufUtil.toServerName(name))); + return builder.build(); + } + + @Override + public GetMastersResponse getMasters(RpcController controller, GetMastersRequest request) + throws ServiceException { + GetMastersResponse.Builder builder = GetMastersResponse.newBuilder(); + server.getActiveMaster() + .ifPresent(activeMaster -> builder.addMasterServers(GetMastersResponseEntry.newBuilder() + .setServerName(ProtobufUtil.toServerName(activeMaster)).setIsActive(true))); + server.getBackupMasters() + .forEach(backupMaster -> builder.addMasterServers(GetMastersResponseEntry.newBuilder() + .setServerName(ProtobufUtil.toServerName(backupMaster)).setIsActive(false))); + return builder.build(); + } + + @Override + public GetMetaRegionLocationsResponse getMetaRegionLocations(RpcController controller, + GetMetaRegionLocationsRequest request) throws ServiceException { + GetMetaRegionLocationsResponse.Builder builder = GetMetaRegionLocationsResponse.newBuilder(); + server.getMetaLocations() + .forEach(location -> builder.addMetaLocations(ProtobufUtil.toRegionLocation(location))); + return builder.build(); + } + + @Override + public final GetBootstrapNodesResponse getBootstrapNodes(RpcController controller, + GetBootstrapNodesRequest request) throws ServiceException { + List bootstrapNodes = new ArrayList<>(server.getRegionServers()); + Collections.shuffle(bootstrapNodes, ThreadLocalRandom.current()); + int maxNodeCount = server.getConfiguration().getInt(CLIENT_BOOTSTRAP_NODE_LIMIT, + DEFAULT_CLIENT_BOOTSTRAP_NODE_LIMIT); + GetBootstrapNodesResponse.Builder builder = GetBootstrapNodesResponse.newBuilder(); + bootstrapNodes.stream().limit(maxNodeCount).map(ProtobufUtil::toServerName) + .forEach(builder::addServerName); + return builder.build(); + } + + @Override + public UpdateConfigurationResponse updateConfiguration(RpcController controller, + UpdateConfigurationRequest request) throws ServiceException { + try { + requirePermission("updateConfiguration", Permission.Action.ADMIN); + this.server.updateConfiguration(); + } catch (Exception e) { + throw new ServiceException(e); + } + return UpdateConfigurationResponse.getDefaultInstance(); + } + + @Override + @QosPriority(priority = HConstants.ADMIN_QOS) + public ClearSlowLogResponses clearSlowLogsResponses(final RpcController controller, + final ClearSlowLogResponseRequest request) throws ServiceException { + try { + requirePermission("clearSlowLogsResponses", Permission.Action.ADMIN); + } catch (IOException e) { + throw new ServiceException(e); + } + final NamedQueueRecorder namedQueueRecorder = this.server.getNamedQueueRecorder(); + boolean slowLogsCleaned = Optional.ofNullable(namedQueueRecorder) + .map( + queueRecorder -> queueRecorder.clearNamedQueue(NamedQueuePayload.NamedQueueEvent.SLOW_LOG)) + .orElse(false); + ClearSlowLogResponses clearSlowLogResponses = + ClearSlowLogResponses.newBuilder().setIsCleaned(slowLogsCleaned).build(); + return clearSlowLogResponses; + } + + private List getSlowLogPayloads(SlowLogResponseRequest request, + NamedQueueRecorder namedQueueRecorder) { + if (namedQueueRecorder == null) { + return Collections.emptyList(); + } + List slowLogPayloads; + NamedQueueGetRequest namedQueueGetRequest = new NamedQueueGetRequest(); + namedQueueGetRequest.setNamedQueueEvent(RpcLogDetails.SLOW_LOG_EVENT); + namedQueueGetRequest.setSlowLogResponseRequest(request); + NamedQueueGetResponse namedQueueGetResponse = + namedQueueRecorder.getNamedQueueRecords(namedQueueGetRequest); + slowLogPayloads = namedQueueGetResponse != null ? namedQueueGetResponse.getSlowLogPayloads() : + Collections.emptyList(); + return slowLogPayloads; + } + + @Override + @QosPriority(priority = HConstants.ADMIN_QOS) + public HBaseProtos.LogEntry getLogEntries(RpcController controller, + HBaseProtos.LogRequest request) throws ServiceException { + try { + final String logClassName = request.getLogClassName(); + Class logClass = Class.forName(logClassName).asSubclass(Message.class); + Method method = logClass.getMethod("parseFrom", ByteString.class); + if (logClassName.contains("SlowLogResponseRequest")) { + SlowLogResponseRequest slowLogResponseRequest = + (SlowLogResponseRequest) method.invoke(null, request.getLogMessage()); + final NamedQueueRecorder namedQueueRecorder = this.server.getNamedQueueRecorder(); + final List slowLogPayloads = + getSlowLogPayloads(slowLogResponseRequest, namedQueueRecorder); + SlowLogResponses slowLogResponses = + SlowLogResponses.newBuilder().addAllSlowLogPayloads(slowLogPayloads).build(); + return HBaseProtos.LogEntry.newBuilder() + .setLogClassName(slowLogResponses.getClass().getName()) + .setLogMessage(slowLogResponses.toByteString()).build(); + } + } catch (ClassNotFoundException | NoSuchMethodException | IllegalAccessException + | InvocationTargetException e) { + LOG.error("Error while retrieving log entries.", e); + throw new ServiceException(e); + } + throw new ServiceException("Invalid request params"); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseServerBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseServerBase.java new file mode 100644 index 00000000000..316f1b6a7b2 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseServerBase.java @@ -0,0 +1,600 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import static org.apache.hadoop.hbase.HConstants.DEFAULT_HBASE_SPLIT_COORDINATED_BY_ZK; +import static org.apache.hadoop.hbase.HConstants.HBASE_SPLIT_WAL_COORDINATED_BY_ZK; + +import com.google.errorprone.annotations.RestrictedApi; +import java.io.IOException; +import java.lang.management.MemoryType; +import java.net.BindException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.util.concurrent.atomic.AtomicBoolean; +import javax.servlet.http.HttpServlet; +import org.apache.commons.lang3.StringUtils; +import org.apache.commons.lang3.SystemUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.client.AsyncClusterConnection; +import org.apache.hadoop.hbase.client.ClusterConnectionFactory; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.ConnectionRegistryEndpoint; +import org.apache.hadoop.hbase.conf.ConfigurationManager; +import org.apache.hadoop.hbase.conf.ConfigurationObserver; +import org.apache.hadoop.hbase.coordination.ZkCoordinatedStateManager; +import org.apache.hadoop.hbase.executor.ExecutorService; +import org.apache.hadoop.hbase.fs.HFileSystem; +import org.apache.hadoop.hbase.http.InfoServer; +import org.apache.hadoop.hbase.io.util.MemorySizeUtil; +import org.apache.hadoop.hbase.ipc.RpcServerInterface; +import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.namequeues.NamedQueueRecorder; +import org.apache.hadoop.hbase.regionserver.ChunkCreator; +import org.apache.hadoop.hbase.regionserver.HeapMemoryManager; +import org.apache.hadoop.hbase.regionserver.MemStoreLAB; +import org.apache.hadoop.hbase.security.Superusers; +import org.apache.hadoop.hbase.security.User; +import org.apache.hadoop.hbase.security.UserProvider; +import org.apache.hadoop.hbase.security.access.AccessChecker; +import org.apache.hadoop.hbase.security.access.ZKPermissionWatcher; +import org.apache.hadoop.hbase.util.Addressing; +import org.apache.hadoop.hbase.util.CommonFSUtils; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.util.FSTableDescriptors; +import org.apache.hadoop.hbase.util.NettyEventLoopGroupConfig; +import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.hbase.util.Sleeper; +import org.apache.hadoop.hbase.zookeeper.ClusterStatusTracker; +import org.apache.hadoop.hbase.zookeeper.ZKUtil; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import sun.misc.Signal; + +/** + * Base class for hbase services, such as master or region server. + */ +@InterfaceAudience.Private +public abstract class HBaseServerBase> extends Thread + implements Server, ConfigurationObserver, ConnectionRegistryEndpoint { + + private static final Logger LOG = LoggerFactory.getLogger(HBaseServerBase.class); + + protected final Configuration conf; + + // Go down hard. Used if file system becomes unavailable and also in + // debugging and unit tests. + protected final AtomicBoolean abortRequested = new AtomicBoolean(false); + + // Set when a report to the master comes back with a message asking us to + // shutdown. Also set by call to stop when debugging or running unit tests + // of HRegionServer in isolation. + protected volatile boolean stopped = false; + + /** + * This servers startcode. + */ + protected final long startcode; + + protected final UserProvider userProvider; + + // zookeeper connection and watcher + protected final ZKWatcher zooKeeper; + + /** + * The server name the Master sees us as. Its made from the hostname the master passes us, port, + * and server startcode. Gets set after registration against Master. + */ + protected ServerName serverName; + + protected final R rpcServices; + + /** + * hostname specified by hostname config + */ + protected final String useThisHostnameInstead; + + /** + * Provide online slow log responses from ringbuffer + */ + protected final NamedQueueRecorder namedQueueRecorder; + + /** + * Configuration manager is used to register/deregister and notify the configuration observers + * when the regionserver is notified that there was a change in the on disk configs. + */ + protected final ConfigurationManager configurationManager; + + /** + * ChoreService used to schedule tasks that we want to run periodically + */ + protected final ChoreService choreService; + + // Instance of the hbase executor executorService. + protected final ExecutorService executorService; + + // Cluster Status Tracker + protected final ClusterStatusTracker clusterStatusTracker; + + protected final CoordinatedStateManager csm; + + // Info server. Default access so can be used by unit tests. REGIONSERVER + // is name of the webapp and the attribute name used stuffing this instance + // into web context. + protected InfoServer infoServer; + + protected HFileSystem dataFs; + + protected HFileSystem walFs; + + protected Path dataRootDir; + + protected Path walRootDir; + + protected final int msgInterval; + + // A sleeper that sleeps for msgInterval. + protected final Sleeper sleeper; + + /** + * Go here to get table descriptors. + */ + protected TableDescriptors tableDescriptors; + + /** + * The asynchronous cluster connection to be shared by services. + */ + protected AsyncClusterConnection asyncClusterConnection; + + /** + * Cache for the meta region replica's locations. Also tracks their changes to avoid stale cache + * entries. Used for serving ClientMetaService. + */ + protected final MetaRegionLocationCache metaRegionLocationCache; + + protected final NettyEventLoopGroupConfig eventLoopGroupConfig; + + /** + * If running on Windows, do windows-specific setup. + */ + private static void setupWindows(final Configuration conf, ConfigurationManager cm) { + if (!SystemUtils.IS_OS_WINDOWS) { + Signal.handle(new Signal("HUP"), signal -> { + conf.reloadConfiguration(); + cm.notifyAllObservers(conf); + }); + } + } + + /** + * Setup our cluster connection if not already initialized. + */ + protected final synchronized void setupClusterConnection() throws IOException { + if (asyncClusterConnection == null) { + InetSocketAddress localAddress = + new InetSocketAddress(rpcServices.getSocketAddress().getAddress(), 0); + User user = userProvider.getCurrent(); + asyncClusterConnection = + ClusterConnectionFactory.createAsyncClusterConnection(this, conf, localAddress, user); + } + } + + protected final void initializeFileSystem() throws IOException { + // Get fs instance used by this RS. Do we use checksum verification in the hbase? If hbase + // checksum verification enabled, then automatically switch off hdfs checksum verification. + boolean useHBaseChecksum = conf.getBoolean(HConstants.HBASE_CHECKSUM_VERIFICATION, true); + String walDirUri = CommonFSUtils.getDirUri(this.conf, + new Path(conf.get(CommonFSUtils.HBASE_WAL_DIR, conf.get(HConstants.HBASE_DIR)))); + // set WAL's uri + if (walDirUri != null) { + CommonFSUtils.setFsDefault(this.conf, walDirUri); + } + // init the WALFs + this.walFs = new HFileSystem(this.conf, useHBaseChecksum); + this.walRootDir = CommonFSUtils.getWALRootDir(this.conf); + // Set 'fs.defaultFS' to match the filesystem on hbase.rootdir else + // underlying hadoop hdfs accessors will be going against wrong filesystem + // (unless all is set to defaults). + String rootDirUri = + CommonFSUtils.getDirUri(this.conf, new Path(conf.get(HConstants.HBASE_DIR))); + if (rootDirUri != null) { + CommonFSUtils.setFsDefault(this.conf, rootDirUri); + } + // init the filesystem + this.dataFs = new HFileSystem(this.conf, useHBaseChecksum); + this.dataRootDir = CommonFSUtils.getRootDir(this.conf); + this.tableDescriptors = new FSTableDescriptors(this.dataFs, this.dataRootDir, + !canUpdateTableDescriptor(), cacheTableDescriptor()); + } + + public HBaseServerBase(Configuration conf, String name) + throws ZooKeeperConnectionException, IOException { + super(name); // thread name + this.conf = conf; + this.eventLoopGroupConfig = + NettyEventLoopGroupConfig.setup(conf, getClass().getSimpleName() + "-EventLoopGroup"); + this.startcode = EnvironmentEdgeManager.currentTime(); + this.userProvider = UserProvider.instantiate(conf); + this.msgInterval = conf.getInt("hbase.regionserver.msginterval", 3 * 1000); + this.sleeper = new Sleeper(this.msgInterval, this); + this.namedQueueRecorder = createNamedQueueRecord(); + this.rpcServices = createRpcServices(); + useThisHostnameInstead = getUseThisHostnameInstead(conf); + InetSocketAddress addr = rpcServices.getSocketAddress(); + String hostName = StringUtils.isBlank(useThisHostnameInstead) ? addr.getHostName() : + this.useThisHostnameInstead; + serverName = ServerName.valueOf(hostName, addr.getPort(), this.startcode); + // login the zookeeper client principal (if using security) + ZKUtil.loginClient(this.conf, HConstants.ZK_CLIENT_KEYTAB_FILE, + HConstants.ZK_CLIENT_KERBEROS_PRINCIPAL, hostName); + // login the server principal (if using secure Hadoop) + login(userProvider, hostName); + // init superusers and add the server principal (if using security) + // or process owner as default super user. + Superusers.initialize(conf); + zooKeeper = + new ZKWatcher(conf, getProcessName() + ":" + addr.getPort(), this, canCreateBaseZNode()); + + this.configurationManager = new ConfigurationManager(); + setupWindows(conf, configurationManager); + + initializeFileSystem(); + + this.choreService = new ChoreService(getName(), true); + this.executorService = new ExecutorService(getName()); + + this.metaRegionLocationCache = new MetaRegionLocationCache(zooKeeper); + + if (clusterMode()) { + if (conf.getBoolean(HBASE_SPLIT_WAL_COORDINATED_BY_ZK, + DEFAULT_HBASE_SPLIT_COORDINATED_BY_ZK)) { + csm = new ZkCoordinatedStateManager(this); + } else { + csm = null; + } + clusterStatusTracker = new ClusterStatusTracker(zooKeeper, this); + clusterStatusTracker.start(); + } else { + csm = null; + clusterStatusTracker = null; + } + putUpWebUI(); + } + + /** + * Puts up the webui. + */ + private void putUpWebUI() throws IOException { + int port = + this.conf.getInt(HConstants.REGIONSERVER_INFO_PORT, HConstants.DEFAULT_REGIONSERVER_INFOPORT); + String addr = this.conf.get("hbase.regionserver.info.bindAddress", "0.0.0.0"); + + if (this instanceof HMaster) { + port = conf.getInt(HConstants.MASTER_INFO_PORT, HConstants.DEFAULT_MASTER_INFOPORT); + addr = this.conf.get("hbase.master.info.bindAddress", "0.0.0.0"); + } + // -1 is for disabling info server + if (port < 0) { + return; + } + + if (!Addressing.isLocalAddress(InetAddress.getByName(addr))) { + String msg = "Failed to start http info server. Address " + addr + + " does not belong to this host. Correct configuration parameter: " + + "hbase.regionserver.info.bindAddress"; + LOG.error(msg); + throw new IOException(msg); + } + // check if auto port bind enabled + boolean auto = this.conf.getBoolean(HConstants.REGIONSERVER_INFO_PORT_AUTO, false); + while (true) { + try { + this.infoServer = new InfoServer(getProcessName(), addr, port, false, this.conf); + infoServer.addPrivilegedServlet("dump", "/dump", getDumpServlet()); + configureInfoServer(infoServer); + this.infoServer.start(); + break; + } catch (BindException e) { + if (!auto) { + // auto bind disabled throw BindException + LOG.error("Failed binding http info server to port: " + port); + throw e; + } + // auto bind enabled, try to use another port + LOG.info("Failed binding http info server to port: " + port); + port++; + LOG.info("Retry starting http info server with port: " + port); + } + } + port = this.infoServer.getPort(); + conf.setInt(HConstants.REGIONSERVER_INFO_PORT, port); + int masterInfoPort = + conf.getInt(HConstants.MASTER_INFO_PORT, HConstants.DEFAULT_MASTER_INFOPORT); + conf.setInt("hbase.master.info.port.orig", masterInfoPort); + conf.setInt(HConstants.MASTER_INFO_PORT, port); + } + + /** + * Sets the abort state if not already set. + * @return True if abortRequested set to True successfully, false if an abort is already in + * progress. + */ + protected final boolean setAbortRequested() { + return abortRequested.compareAndSet(false, true); + } + + @Override + public boolean isStopped() { + return stopped; + } + + @Override + public boolean isAborted() { + return abortRequested.get(); + } + + @Override + public Configuration getConfiguration() { + return conf; + } + + @Override + public AsyncClusterConnection getAsyncClusterConnection() { + return asyncClusterConnection; + } + + @Override + public ZKWatcher getZooKeeper() { + return zooKeeper; + } + + protected final void shutdownChore(ScheduledChore chore) { + if (chore != null) { + chore.shutdown(); + } + } + + protected final void initializeMemStoreChunkCreator(HeapMemoryManager hMemManager) { + if (MemStoreLAB.isEnabled(conf)) { + // MSLAB is enabled. So initialize MemStoreChunkPool + // By this time, the MemstoreFlusher is already initialized. We can get the global limits from + // it. + Pair pair = MemorySizeUtil.getGlobalMemStoreSize(conf); + long globalMemStoreSize = pair.getFirst(); + boolean offheap = pair.getSecond() == MemoryType.NON_HEAP; + // When off heap memstore in use, take full area for chunk pool. + float poolSizePercentage = offheap ? 1.0F : + conf.getFloat(MemStoreLAB.CHUNK_POOL_MAXSIZE_KEY, MemStoreLAB.POOL_MAX_SIZE_DEFAULT); + float initialCountPercentage = conf.getFloat(MemStoreLAB.CHUNK_POOL_INITIALSIZE_KEY, + MemStoreLAB.POOL_INITIAL_SIZE_DEFAULT); + int chunkSize = conf.getInt(MemStoreLAB.CHUNK_SIZE_KEY, MemStoreLAB.CHUNK_SIZE_DEFAULT); + float indexChunkSizePercent = conf.getFloat(MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_KEY, + MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); + // init the chunkCreator + ChunkCreator.initialize(chunkSize, offheap, globalMemStoreSize, poolSizePercentage, + initialCountPercentage, hMemManager, indexChunkSizePercent); + } + } + + protected abstract void stopChores(); + + protected final void stopChoreService() { + // clean up the scheduled chores + if (choreService != null) { + LOG.info("Shutdown chores and chore service"); + stopChores(); + // cancel the remaining scheduled chores (in case we missed out any) + // TODO: cancel will not cleanup the chores, so we need make sure we do not miss any + choreService.shutdown(); + } + } + + protected final void stopExecutorService() { + if (executorService != null) { + LOG.info("Shutdown executor service"); + executorService.shutdown(); + } + } + + protected final void closeClusterConnection() { + if (asyncClusterConnection != null) { + LOG.info("Close async cluster connection"); + try { + this.asyncClusterConnection.close(); + } catch (IOException e) { + // Although the {@link Closeable} interface throws an {@link + // IOException}, in reality, the implementation would never do that. + LOG.warn("Attempt to close server's AsyncClusterConnection failed.", e); + } + } + } + + protected final void stopInfoServer() { + if (this.infoServer != null) { + LOG.info("Stop info server"); + try { + this.infoServer.stop(); + } catch (Exception e) { + LOG.error("Failed to stop infoServer", e); + } + } + } + + protected final void closeZooKeeper() { + if (this.zooKeeper != null) { + LOG.info("Close zookeeper"); + this.zooKeeper.close(); + } + } + + @Override + public ServerName getServerName() { + return serverName; + } + + @Override + public ChoreService getChoreService() { + return choreService; + } + + /** + * @return Return table descriptors implementation. + */ + public TableDescriptors getTableDescriptors() { + return this.tableDescriptors; + } + + public ExecutorService getExecutorService() { + return executorService; + } + + public AccessChecker getAccessChecker() { + return rpcServices.getAccessChecker(); + } + + public ZKPermissionWatcher getZKPermissionWatcher() { + return rpcServices.getZkPermissionWatcher(); + } + + @Override + public CoordinatedStateManager getCoordinatedStateManager() { + return csm; + } + + @Override + public Connection createConnection(Configuration conf) throws IOException { + User user = UserProvider.instantiate(conf).getCurrent(); + return ConnectionFactory.createConnection(conf, null, user); + } + + /** + * @return Return the rootDir. + */ + public Path getDataRootDir() { + return dataRootDir; + } + + @Override + public FileSystem getFileSystem() { + return dataFs; + } + + /** + * @return Return the walRootDir. + */ + public Path getWALRootDir() { + return walRootDir; + } + + /** + * @return Return the walFs. + */ + public FileSystem getWALFileSystem() { + return walFs; + } + + /** + * @return True if the cluster is up. + */ + public boolean isClusterUp() { + return !clusterMode() || this.clusterStatusTracker.isClusterUp(); + } + + /** + * @return time stamp in millis of when this server was started + */ + public long getStartcode() { + return this.startcode; + } + + public InfoServer getInfoServer() { + return infoServer; + } + + public int getMsgInterval() { + return msgInterval; + } + + /** + * get NamedQueue Provider to add different logs to ringbuffer + * @return NamedQueueRecorder + */ + public NamedQueueRecorder getNamedQueueRecorder() { + return this.namedQueueRecorder; + } + + public RpcServerInterface getRpcServer() { + return rpcServices.getRpcServer(); + } + + public NettyEventLoopGroupConfig getEventLoopGroupConfig() { + return eventLoopGroupConfig; + } + + public R getRpcServices() { + return rpcServices; + } + + @RestrictedApi(explanation = "Should only be called in tests", link = "", + allowedOnPath = ".*/src/test/.*") + public MetaRegionLocationCache getMetaRegionLocationCache() { + return this.metaRegionLocationCache; + } + + /** + * Reload the configuration from disk. + */ + public void updateConfiguration() { + LOG.info("Reloading the configuration from disk."); + // Reload the configuration from disk. + conf.reloadConfiguration(); + configurationManager.notifyAllObservers(conf); + } + + @Override + public String toString() { + return getServerName().toString(); + } + + protected abstract boolean canCreateBaseZNode(); + + protected abstract String getProcessName(); + + protected abstract R createRpcServices() throws IOException; + + protected abstract String getUseThisHostnameInstead(Configuration conf) throws IOException; + + protected abstract void login(UserProvider user, String host) throws IOException; + + protected abstract NamedQueueRecorder createNamedQueueRecord(); + + protected abstract void configureInfoServer(InfoServer infoServer); + + protected abstract Class getDumpServlet(); + + protected abstract boolean canUpdateTableDescriptor(); + + protected abstract boolean cacheTableDescriptor(); + + protected abstract boolean clusterMode(); +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java index 811b7c0c0f1..31b8226cd4a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java @@ -49,7 +49,9 @@ public interface Server extends Abortable, Stoppable { * Important note: this method returns a reference to Connection which is managed * by Server itself, so callers must NOT attempt to close connection obtained. */ - Connection getConnection(); + default Connection getConnection() { + return getAsyncConnection().toConnection(); + } Connection createConnection(Configuration conf) throws IOException; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/AnnotationReadingPriorityFunction.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/AnnotationReadingPriorityFunction.java index 109375b9323..a2d0169010e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/AnnotationReadingPriorityFunction.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/AnnotationReadingPriorityFunction.java @@ -21,8 +21,8 @@ import java.lang.reflect.Method; import java.util.HashMap; import java.util.Map; import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.hbase.HBaseRpcServicesBase; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.regionserver.RSRpcServices; import org.apache.hadoop.hbase.security.User; import org.apache.yetus.audience.InterfaceAudience; @@ -52,7 +52,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader // RegionSpecifier object. Methods can be invoked on the returned object // to figure out whether it is a meta region or not. @InterfaceAudience.Private -public abstract class AnnotationReadingPriorityFunction +public abstract class AnnotationReadingPriorityFunction> implements PriorityFunction { protected final Map annotatedQos; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java index b7d446bc0fe..a3ee71fc6fb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java @@ -24,6 +24,7 @@ import java.util.List; import java.util.concurrent.CountDownLatch; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.hadoop.hbase.HBaseServerBase; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.security.HBasePolicyProvider; @@ -79,7 +80,7 @@ public class NettyRpcServer extends RpcServer { EventLoopGroup eventLoopGroup; Class channelClass; if (server instanceof HRegionServer) { - NettyEventLoopGroupConfig config = ((HRegionServer) server).getEventLoopGroupConfig(); + NettyEventLoopGroupConfig config = ((HBaseServerBase) server).getEventLoopGroupConfig(); eventLoopGroup = config.group(); channelClass = config.serverChannelClass(); } else { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index fed3d06a907..9706149e82d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -60,7 +60,6 @@ import org.apache.hadoop.hbase.CatalogFamilyFormat; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellBuilderFactory; import org.apache.hadoop.hbase.CellBuilderType; -import org.apache.hadoop.hbase.client.BalanceRequest; import org.apache.hadoop.hbase.ClusterId; import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.ClusterMetrics.Option; @@ -68,7 +67,9 @@ import org.apache.hadoop.hbase.ClusterMetricsBuilder; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HBaseIOException; import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.hadoop.hbase.HBaseServerBase; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.InvalidFamilyOperationException; import org.apache.hadoop.hbase.MasterNotRunningException; import org.apache.hadoop.hbase.MetaTableAccessor; @@ -83,6 +84,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.UnknownRegionException; +import org.apache.hadoop.hbase.client.BalanceRequest; import org.apache.hadoop.hbase.client.BalanceResponse; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.CompactionState; @@ -102,6 +104,7 @@ import org.apache.hadoop.hbase.exceptions.MasterStoppedException; import org.apache.hadoop.hbase.executor.ExecutorType; import org.apache.hadoop.hbase.favored.FavoredNodesManager; import org.apache.hadoop.hbase.http.HttpServer; +import org.apache.hadoop.hbase.http.InfoServer; import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils; import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException; @@ -114,6 +117,7 @@ import org.apache.hadoop.hbase.master.assignment.RegionStateStore; import org.apache.hadoop.hbase.master.assignment.RegionStates; import org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure; import org.apache.hadoop.hbase.master.balancer.BalancerChore; +import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer; import org.apache.hadoop.hbase.master.balancer.ClusterStatusChore; import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory; import org.apache.hadoop.hbase.master.balancer.MaintenanceLoadBalancer; @@ -166,6 +170,7 @@ import org.apache.hadoop.hbase.mob.MobFileCompactionChore; import org.apache.hadoop.hbase.monitoring.MemoryBoundedLogMessageBuffer; import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.monitoring.TaskMonitor; +import org.apache.hadoop.hbase.namequeues.NamedQueueRecorder; import org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost; import org.apache.hadoop.hbase.procedure.flush.MasterFlushTableProcedureManager; import org.apache.hadoop.hbase.procedure2.LockedResource; @@ -190,7 +195,6 @@ import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshotNotifierFactory; import org.apache.hadoop.hbase.quotas.SpaceViolationPolicy; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException; -import org.apache.hadoop.hbase.regionserver.RSRpcServices; import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.replication.ReplicationLoadSource; import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; @@ -199,16 +203,17 @@ import org.apache.hadoop.hbase.replication.ReplicationUtils; import org.apache.hadoop.hbase.replication.SyncReplicationState; import org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner; import org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner; -import org.apache.hadoop.hbase.replication.regionserver.ReplicationStatus; import org.apache.hadoop.hbase.rsgroup.RSGroupAdminEndpoint; import org.apache.hadoop.hbase.rsgroup.RSGroupBasedLoadBalancer; import org.apache.hadoop.hbase.rsgroup.RSGroupInfoManager; import org.apache.hadoop.hbase.rsgroup.RSGroupUtil; import org.apache.hadoop.hbase.security.AccessDeniedException; import org.apache.hadoop.hbase.security.SecurityConstants; +import org.apache.hadoop.hbase.security.Superusers; import org.apache.hadoop.hbase.security.UserProvider; import org.apache.hadoop.hbase.util.Addressing; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.FutureUtils; @@ -266,8 +271,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.Snapshot * @see org.apache.zookeeper.Watcher */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) -@SuppressWarnings("deprecation") -public class HMaster extends HRegionServer implements MasterServices { +public class HMaster extends HBaseServerBase implements MasterServices { private static final Logger LOG = LoggerFactory.getLogger(HMaster.class); @@ -304,6 +308,8 @@ public class HMaster extends HRegionServer implements MasterServices { public static final int DEFAULT_HBASE_MASTER_CLEANER_INTERVAL = 600 * 1000; + private String clusterId; + // Metrics for the HMaster final MetricsMaster metricsMaster; // file system manager for the master FS operations @@ -435,7 +441,7 @@ public class HMaster extends HRegionServer implements MasterServices { * active one. */ public HMaster(final Configuration conf) throws IOException { - super(conf); + super(conf, "Master"); try { if (conf.getBoolean(MAINTENANCE_MODE, false)) { LOG.info("Detected {}=true via configuration.", MAINTENANCE_MODE); @@ -447,9 +453,10 @@ public class HMaster extends HRegionServer implements MasterServices { maintenanceMode = false; } this.rsFatals = new MemoryBoundedLogMessageBuffer( - conf.getLong("hbase.master.buffer.for.rs.fatals", 1 * 1024 * 1024)); - LOG.info("hbase.rootdir={}, hbase.cluster.distributed={}", getDataRootDir(), - this.conf.getBoolean(HConstants.CLUSTER_DISTRIBUTED, false)); + conf.getLong("hbase.master.buffer.for.rs.fatals", 1 * 1024 * 1024)); + LOG.info("hbase.rootdir={}, hbase.cluster.distributed={}", + CommonFSUtils.getRootDir(this.conf), + this.conf.getBoolean(HConstants.CLUSTER_DISTRIBUTED, false)); // Disable usage of meta replicas in the master this.conf.setBoolean(HConstants.USE_META_REPLICAS, false); @@ -491,12 +498,10 @@ public class HMaster extends HRegionServer implements MasterServices { getChoreService().scheduleChore(clusterStatusPublisherChore); } } - this.activeMasterManager = createActiveMasterManager(zooKeeper, serverName, this); - cachedClusterId = new CachedClusterId(this, conf); - this.regionServerTracker = new RegionServerTracker(zooKeeper, this); + this.rpcServices.start(zooKeeper); } catch (Throwable t) { // Make sure we log the exception. HMaster is often started via reflection and the // cause of failed startup is lost. @@ -519,11 +524,17 @@ public class HMaster extends HRegionServer implements MasterServices { return conf.get(MASTER_HOSTNAME_KEY); } + private void registerConfigurationObservers() { + configurationManager.registerObserver(this.rpcServices); + configurationManager.registerObserver(this); + } + // Main run loop. Calls through to the regionserver run loop AFTER becoming active Master; will // block in here until then. @Override public void run() { try { + registerConfigurationObservers(); Threads.setDaemonThreadRunning(new Thread(() -> { try { int infoPort = putUpJettyServer(); @@ -538,9 +549,16 @@ public class HMaster extends HRegionServer implements MasterServices { } } }), getName() + ":becomeActiveMaster"); - // Fall in here even if we have been aborted. Need to run the shutdown services and - // the super run call will do this for us. - super.run(); + while (!isStopped() && !isAborted()) { + sleeper.sleep(); + } + stopInfoServer(); + closeClusterConnection(); + stopServiceThreads(); + if (this.rpcServices != null) { + this.rpcServices.stop(); + } + closeZooKeeper(); } finally { if (this.clusterSchemaService != null) { // If on way out, then we are no longer active master. @@ -615,26 +633,16 @@ public class HMaster extends HRegionServer implements MasterServices { @Override protected void login(UserProvider user, String host) throws IOException { try { - super.login(user, host); + user.login(SecurityConstants.REGIONSERVER_KRB_KEYTAB_FILE, + SecurityConstants.REGIONSERVER_KRB_PRINCIPAL, host); } catch (IOException ie) { - user.login(SecurityConstants.MASTER_KRB_KEYTAB_FILE, - SecurityConstants.MASTER_KRB_PRINCIPAL, host); + user.login(SecurityConstants.MASTER_KRB_KEYTAB_FILE, SecurityConstants.MASTER_KRB_PRINCIPAL, + host); } } - /** - * Loop till the server is stopped or aborted. - */ - @Override - protected void waitForMasterActive() { - while (!isStopped() && !isAborted()) { - sleeper.sleep(); - } - } - - @InterfaceAudience.Private public MasterRpcServices getMasterRpcServices() { - return (MasterRpcServices)rpcServices; + return rpcServices; } public boolean balanceSwitch(final boolean b) throws IOException { @@ -661,13 +669,12 @@ public class HMaster extends HRegionServer implements MasterServices { return true; } - @Override - protected RSRpcServices createRpcServices() throws IOException { + protected MasterRpcServices createRpcServices() throws IOException { return new MasterRpcServices(this); } @Override - protected void configureInfoServer() { + protected void configureInfoServer(InfoServer infoServer) { infoServer.addUnprivilegedServlet("master-status", "/master-status", MasterStatusServlet.class); infoServer.setAttribute(MASTER, this); } @@ -860,7 +867,7 @@ public class HMaster extends HRegionServer implements MasterServices { // always initialize the MemStoreLAB as we use a region to store data in master now, see // localStore. - initializeMemStoreChunkCreator(); + initializeMemStoreChunkCreator(null); this.fileSystemManager = new MasterFileSystem(conf); this.walManager = new MasterWalManager(this); @@ -1539,7 +1546,6 @@ public class HMaster extends HRegionServer implements MasterServices { } } - @Override protected void stopServiceThreads() { if (masterJettyServer != null) { LOG.info("Stopping master jetty server"); @@ -1549,9 +1555,8 @@ public class HMaster extends HRegionServer implements MasterServices { LOG.error("Failed to stop master jetty server", e); } } - stopChores(); - - super.stopServiceThreads(); + stopChoreService(); + stopExecutorService(); if (cleanerPool != null) { cleanerPool.shutdownNow(); cleanerPool = null; @@ -1680,25 +1685,23 @@ public class HMaster extends HRegionServer implements MasterServices { } } - private void stopChores() { - if (getChoreService() != null) { - shutdownChore(mobFileCleanerChore); - shutdownChore(mobFileCompactionChore); - shutdownChore(balancerChore); - if (regionNormalizerManager != null) { - shutdownChore(regionNormalizerManager.getRegionNormalizerChore()); - } - shutdownChore(clusterStatusChore); - shutdownChore(catalogJanitorChore); - shutdownChore(clusterStatusPublisherChore); - shutdownChore(snapshotQuotaChore); - shutdownChore(logCleaner); - shutdownChore(hfileCleaner); - shutdownChore(replicationBarrierCleaner); - shutdownChore(snapshotCleanerChore); - shutdownChore(hbckChore); - shutdownChore(regionsRecoveryChore); + protected void stopChores() { + shutdownChore(mobFileCleanerChore); + shutdownChore(mobFileCompactionChore); + shutdownChore(balancerChore); + if (regionNormalizerManager != null) { + shutdownChore(regionNormalizerManager.getRegionNormalizerChore()); } + shutdownChore(clusterStatusChore); + shutdownChore(catalogJanitorChore); + shutdownChore(clusterStatusPublisherChore); + shutdownChore(snapshotQuotaChore); + shutdownChore(logCleaner); + shutdownChore(hfileCleaner); + shutdownChore(replicationBarrierCleaner); + shutdownChore(snapshotCleanerChore); + shutdownChore(hbckChore); + shutdownChore(regionsRecoveryChore); } /** @@ -2722,16 +2725,6 @@ public class HMaster extends HRegionServer implements MasterServices { return status; } - @Override - public Optional getActiveMaster() { - return activeMasterManager.getActiveMasterServerName(); - } - - @Override - public List getBackupMasters() { - return activeMasterManager.getBackupMasters(); - } - /** * @return info port of active master or 0 if any exception occurs. */ @@ -2747,11 +2740,6 @@ public class HMaster extends HRegionServer implements MasterServices { return activeMasterManager.getBackupMasterInfoPort(sn); } - @Override - public Collection getRegionServers() { - return regionServerTracker.getRegionServers(); - } - /** * The set of loaded coprocessors is stored in a static set. Since it's * statically allocated, it does not require that HMaster's cpHost be @@ -2842,11 +2830,6 @@ public class HMaster extends HRegionServer implements MasterServices { } } - @Override - public ZKWatcher getZooKeeper() { - return zooKeeper; - } - @Override public MasterCoprocessorHost getMasterCoprocessorHost() { return cpHost; @@ -2927,15 +2910,18 @@ public class HMaster extends HRegionServer implements MasterServices { @Override public void stop(String msg) { - if (!isStopped()) { - super.stop(msg); + if (!this.stopped) { + LOG.info("***** STOPPING master '" + this + "' *****"); + this.stopped = true; + LOG.info("STOPPED: " + msg); + // Wakes run() if it is sleeping + sleeper.skipSleepCycle(); if (this.activeMasterManager != null) { this.activeMasterManager.stop(); } } } - @InterfaceAudience.Private protected void checkServiceStarted() throws ServerNotRunningYetException { if (!serviceStarted) { throw new ServerNotRunningYetException("Server is not running yet"); @@ -2987,8 +2973,6 @@ public class HMaster extends HRegionServer implements MasterServices { * * @return true if master is ready to go, false if not. */ - - @Override public boolean isOnline() { return serviceStarted; } @@ -3003,7 +2987,6 @@ public class HMaster extends HRegionServer implements MasterServices { return maintenanceMode; } - @InterfaceAudience.Private public void setInitialized(boolean isInitialized) { procedureExecutor.getEnvironment().setEventReady(initialized, isInitialized); } @@ -3871,28 +3854,19 @@ public class HMaster extends HRegionServer implements MasterServices { return this.snapshotQuotaChore; } + public ActiveMasterManager getActiveMasterManager() { + return activeMasterManager; + } + @Override public SyncReplicationReplayWALManager getSyncReplicationReplayWALManager() { return this.syncReplicationReplayWALManager; } - @Override - public Map getWalGroupsReplicationStatus() { - return new HashMap<>(); - } - public HbckChore getHbckChore() { return this.hbckChore; } - @Override - public String getClusterId() { - if (activeMaster) { - return super.getClusterId(); - } - return cachedClusterId.getFromCacheOrFetch(); - } - @Override public void runReplicationBarrierCleaner() { ReplicationBarrierCleaner rbc = this.replicationBarrierCleaner; @@ -3959,4 +3933,58 @@ public class HMaster extends HRegionServer implements MasterServices { MasterRegion getMasterRegion() { return masterRegion; } + + @Override + public void onConfigurationChange(Configuration newConf) { + try { + Superusers.initialize(newConf); + } catch (IOException e) { + LOG.warn("Failed to initialize SuperUsers on reloading of the configuration"); + } + } + + @Override + protected NamedQueueRecorder createNamedQueueRecord() { + final boolean isBalancerDecisionRecording = conf + .getBoolean(BaseLoadBalancer.BALANCER_DECISION_BUFFER_ENABLED, + BaseLoadBalancer.DEFAULT_BALANCER_DECISION_BUFFER_ENABLED); + final boolean isBalancerRejectionRecording = conf + .getBoolean(BaseLoadBalancer.BALANCER_REJECTION_BUFFER_ENABLED, + BaseLoadBalancer.DEFAULT_BALANCER_REJECTION_BUFFER_ENABLED); + if (isBalancerDecisionRecording || isBalancerRejectionRecording) { + return NamedQueueRecorder.getInstance(conf); + } else { + return null; + } + } + + @Override + protected boolean clusterMode() { + return true; + } + + public String getClusterId() { + if (activeMaster) { + return clusterId; + } + return cachedClusterId.getFromCacheOrFetch(); + } + + public Optional getActiveMaster() { + return activeMasterManager.getActiveMasterServerName(); + } + + public List getBackupMasters() { + return activeMasterManager.getBackupMasters(); + } + + @Override + public Collection getRegionServers() { + return regionServerTracker.getRegionServers(); + } + + @Override + public List getMetaLocations() { + return metaRegionLocationCache.getMetaRegionLocations(); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index a8446c327e0..f144850ade3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -17,14 +17,10 @@ */ package org.apache.hadoop.hbase.master; -import static org.apache.hadoop.hbase.master.MasterWalManager.META_FILTER; -import java.io.FileNotFoundException; import java.io.IOException; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; -import java.net.BindException; import java.net.InetAddress; -import java.net.InetSocketAddress; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; @@ -35,13 +31,12 @@ import java.util.Map.Entry; import java.util.Set; import java.util.stream.Collectors; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.ClusterMetricsBuilder; import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.HBaseRpcServicesBase; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.NamespaceDescriptor; -import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerMetrics; import org.apache.hadoop.hbase.ServerMetricsBuilder; import org.apache.hadoop.hbase.ServerName; @@ -62,15 +57,11 @@ import org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil; import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor; import org.apache.hadoop.hbase.errorhandling.ForeignException; import org.apache.hadoop.hbase.exceptions.UnknownProtocolException; -import org.apache.hadoop.hbase.io.ByteBuffAllocator; -import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils; import org.apache.hadoop.hbase.ipc.PriorityFunction; import org.apache.hadoop.hbase.ipc.QosPriority; import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface; -import org.apache.hadoop.hbase.ipc.RpcServerFactory; -import org.apache.hadoop.hbase.ipc.RpcServerInterface; import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException; import org.apache.hadoop.hbase.ipc.ServerRpcController; import org.apache.hadoop.hbase.master.assignment.RegionStates; @@ -98,8 +89,7 @@ import org.apache.hadoop.hbase.quotas.MasterQuotaManager; import org.apache.hadoop.hbase.quotas.QuotaObserverChore; import org.apache.hadoop.hbase.quotas.QuotaUtil; import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot; -import org.apache.hadoop.hbase.regionserver.RSRpcServices; -import org.apache.hadoop.hbase.regionserver.RpcSchedulerFactory; +import org.apache.hadoop.hbase.regionserver.SimpleRpcSchedulerFactory; import org.apache.hadoop.hbase.replication.ReplicationException; import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; @@ -119,15 +109,18 @@ import org.apache.hadoop.hbase.security.visibility.VisibilityController; import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils; import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.DNS; +import org.apache.hadoop.hbase.util.DNS.ServerType; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.ForeignExceptionUtil; import org.apache.hadoop.hbase.util.Pair; -import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.KeeperException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.collect.Sets; import org.apache.hbase.thirdparty.com.google.protobuf.ByteString; import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptor; @@ -137,6 +130,7 @@ import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; import org.apache.hbase.thirdparty.com.google.protobuf.Service; import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter; import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos; @@ -150,10 +144,43 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.Has import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.Permission.Type; import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.RevokeRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.RevokeResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearRegionBlockCacheResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactionSwitchRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactionSwitchResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WarmupRegionRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WarmupRegionResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds; @@ -321,6 +348,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaSta import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesResponse.RegionSizes; +import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.AddRSGroupResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest; @@ -392,16 +421,17 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.VisibilityLabelsProtos. * Implements the master RPC services. */ @InterfaceAudience.Private -@SuppressWarnings("deprecation") -public class MasterRpcServices extends RSRpcServices implements - MasterService.BlockingInterface, RegionServerStatusService.BlockingInterface, - LockService.BlockingInterface, HbckService.BlockingInterface { +public class MasterRpcServices extends HBaseRpcServicesBase + implements MasterService.BlockingInterface, RegionServerStatusService.BlockingInterface, + LockService.BlockingInterface, HbckService.BlockingInterface { private static final Logger LOG = LoggerFactory.getLogger(MasterRpcServices.class.getName()); private static final Logger AUDITLOG = LoggerFactory.getLogger("SecurityLogger."+MasterRpcServices.class.getName()); - private final HMaster master; + /** RPC scheduler to use for the master. */ + public static final String MASTER_RPC_SCHEDULER_FACTORY_CLASS = + "hbase.master.rpc.scheduler.factory.class"; /** * @return Subset of configuration to pass initializing regionservers: e.g. @@ -418,42 +448,43 @@ public class MasterRpcServices extends RSRpcServices implements final RegionServerStartupResponse.Builder resp, final String key) { NameStringPair.Builder entry = NameStringPair.newBuilder() .setName(key) - .setValue(master.getConfiguration().get(key)); + .setValue(server.getConfiguration().get(key)); resp.addMapEntries(entry.build()); return resp; } public MasterRpcServices(HMaster m) throws IOException { - super(m); - master = m; + super(m, m.getProcessName()); } @Override - protected Class getRpcSchedulerFactoryClass() { - Configuration conf = getConfiguration(); - if (conf != null) { - return conf.getClass(MASTER_RPC_SCHEDULER_FACTORY_CLASS, super.getRpcSchedulerFactoryClass()); - } else { - return super.getRpcSchedulerFactoryClass(); - } + protected boolean defaultReservoirEnabled() { + return false; } @Override - protected RpcServerInterface createRpcServer(final Server server, - final RpcSchedulerFactory rpcSchedulerFactory, final InetSocketAddress bindAddress, - final String name) throws IOException { - final Configuration conf = regionServer.getConfiguration(); - // RpcServer at HM by default enable ByteBufferPool iff HM having user table region in it - boolean reservoirEnabled = conf.getBoolean(ByteBuffAllocator.ALLOCATOR_POOL_ENABLED_KEY, false); - try { - return RpcServerFactory.createRpcServer(server, name, getServices(), - bindAddress, // use final bindAddress for this server. - conf, rpcSchedulerFactory.create(conf, this, server), reservoirEnabled); - } catch (BindException be) { - throw new IOException(be.getMessage() + ". To switch ports use the '" - + HConstants.MASTER_PORT + "' configuration property.", - be.getCause() != null ? be.getCause() : be); - } + protected ServerType getDNSServerType() { + return DNS.ServerType.MASTER; + } + + @Override + protected String getHostname(Configuration conf, String defaultHostname) { + return conf.get("hbase.master.ipc.address", defaultHostname); + } + + @Override + protected String getPortConfigName() { + return HConstants.MASTER_PORT; + } + + @Override + protected int getDefaultPort() { + return HConstants.DEFAULT_MASTER_PORT; + } + + @Override + protected Class getRpcSchedulerFactoryClass(Configuration conf) { + return conf.getClass(MASTER_RPC_SCHEDULER_FACTORY_CLASS, SimpleRpcSchedulerFactory.class); } @Override @@ -472,7 +503,7 @@ public class MasterRpcServices extends RSRpcServices implements */ private void rpcPreCheck(String requestName) throws ServiceException { try { - master.checkInitialized(); + server.checkInitialized(); requirePermission(requestName, Permission.Action.ADMIN); } catch (IOException ioe) { throw new ServiceException(ioe); @@ -491,28 +522,28 @@ public class MasterRpcServices extends RSRpcServices implements * @return old balancer switch */ boolean switchBalancer(final boolean b, BalanceSwitchMode mode) throws IOException { - boolean oldValue = master.loadBalancerTracker.isBalancerOn(); + boolean oldValue = server.loadBalancerTracker.isBalancerOn(); boolean newValue = b; try { - if (master.cpHost != null) { - master.cpHost.preBalanceSwitch(newValue); + if (server.cpHost != null) { + server.cpHost.preBalanceSwitch(newValue); } try { if (mode == BalanceSwitchMode.SYNC) { - synchronized (master.getLoadBalancer()) { - master.loadBalancerTracker.setBalancerOn(newValue); + synchronized (server.getLoadBalancer()) { + server.loadBalancerTracker.setBalancerOn(newValue); } } else { - master.loadBalancerTracker.setBalancerOn(newValue); + server.loadBalancerTracker.setBalancerOn(newValue); } } catch (KeeperException ke) { throw new IOException(ke); } - LOG.info(master.getClientIdAuditPrefix() + " set balanceSwitch=" + newValue); - if (master.cpHost != null) { - master.cpHost.postBalanceSwitch(oldValue, newValue); + LOG.info(server.getClientIdAuditPrefix() + " set balanceSwitch=" + newValue); + if (server.cpHost != null) { + server.cpHost.postBalanceSwitch(oldValue, newValue); } - master.getLoadBalancer().updateBalancerStatus(newValue); + server.getLoadBalancer().updateBalancerStatus(newValue); } catch (IOException ioe) { LOG.warn("Error flipping balance switch", ioe); } @@ -541,21 +572,30 @@ public class MasterRpcServices extends RSRpcServices implements HbckService.BlockingInterface.class)); bssi.add(new BlockingServiceAndInterface(ClientMetaService.newReflectiveBlockingService(this), ClientMetaService.BlockingInterface.class)); - bssi.addAll(super.getServices()); + bssi.add(new BlockingServiceAndInterface(AdminService.newReflectiveBlockingService(this), + AdminService.BlockingInterface.class)); return bssi; } + void start(ZKWatcher zkWatcher) { + internalStart(zkWatcher); + } + + void stop() { + internalStop(); + } + @Override @QosPriority(priority = HConstants.ADMIN_QOS) public GetLastFlushedSequenceIdResponse getLastFlushedSequenceId(RpcController controller, GetLastFlushedSequenceIdRequest request) throws ServiceException { try { - master.checkServiceStarted(); + server.checkServiceStarted(); } catch (IOException ioe) { throw new ServiceException(ioe); } byte[] encodedRegionName = request.getRegionName().toByteArray(); - RegionStoreSequenceIds ids = master.getServerManager() + RegionStoreSequenceIds ids = server.getServerManager() .getLastFlushedSequenceId(encodedRegionName); return ResponseConverter.buildGetLastFlushedSequenceIdResponse(ids); } @@ -564,7 +604,7 @@ public class MasterRpcServices extends RSRpcServices implements public RegionServerReportResponse regionServerReport(RpcController controller, RegionServerReportRequest request) throws ServiceException { try { - master.checkServiceStarted(); + server.checkServiceStarted(); int versionNumber = 0; String version = "0.0.0"; VersionInfo versionInfo = VersionInfoUtil.getCurrentClientVersionInfo(); @@ -574,20 +614,20 @@ public class MasterRpcServices extends RSRpcServices implements } ClusterStatusProtos.ServerLoad sl = request.getLoad(); ServerName serverName = ProtobufUtil.toServerName(request.getServer()); - ServerMetrics oldLoad = master.getServerManager().getLoad(serverName); + ServerMetrics oldLoad = server.getServerManager().getLoad(serverName); ServerMetrics newLoad = ServerMetricsBuilder.toServerMetrics(serverName, versionNumber, version, sl); - master.getServerManager().regionServerReport(serverName, newLoad); - master.getAssignmentManager().reportOnlineRegions(serverName, + server.getServerManager().regionServerReport(serverName, newLoad); + server.getAssignmentManager().reportOnlineRegions(serverName, newLoad.getRegionMetrics().keySet()); - if (sl != null && master.metricsMaster != null) { + if (sl != null && server.metricsMaster != null) { // Up our metrics. - master.metricsMaster.incrementRequests( + server.metricsMaster.incrementRequests( sl.getTotalNumberOfRequests() - (oldLoad != null ? oldLoad.getRequestCount() : 0)); - master.metricsMaster.incrementReadRequests(sl.getReadRequestsCount() - + server.metricsMaster.incrementReadRequests(sl.getReadRequestsCount() - (oldLoad != null ? oldLoad.getReadRequestsCount() : 0)); - master.metricsMaster.incrementWriteRequests(sl.getWriteRequestsCount() - + server.metricsMaster.incrementWriteRequests(sl.getWriteRequestsCount() - (oldLoad != null ? oldLoad.getWriteRequestsCount() : 0)); } } catch (IOException ioe) { @@ -601,7 +641,7 @@ public class MasterRpcServices extends RSRpcServices implements RegionServerStartupRequest request) throws ServiceException { // Register with server manager try { - master.checkServiceStarted(); + server.checkServiceStarted(); int versionNumber = 0; String version = "0.0.0"; VersionInfo versionInfo = VersionInfoUtil.getCurrentClientVersionInfo(); @@ -609,11 +649,11 @@ public class MasterRpcServices extends RSRpcServices implements version = versionInfo.getVersion(); versionNumber = VersionInfoUtil.getVersionNumber(versionInfo); } - InetAddress ia = master.getRemoteInetAddress(request.getPort(), request.getServerStartCode()); + InetAddress ia = server.getRemoteInetAddress(request.getPort(), request.getServerStartCode()); // if regionserver passed hostname to use, // then use it instead of doing a reverse DNS lookup ServerName rs = - master.getServerManager().regionServerStartup(request, versionNumber, version, ia); + server.getServerManager().regionServerStartup(request, versionNumber, version, ia); // Send back some config info RegionServerStartupResponse.Builder resp = createConfigurationSubset(); @@ -634,7 +674,7 @@ public class MasterRpcServices extends RSRpcServices implements ServerName sn = ProtobufUtil.toServerName(request.getServer()); String msg = sn + " reported a fatal error:\n" + errorText; LOG.warn(msg); - master.rsFatals.add(msg); + server.rsFatals.add(msg); return ReportRSFatalErrorResponse.newBuilder().build(); } @@ -642,7 +682,7 @@ public class MasterRpcServices extends RSRpcServices implements public AddColumnResponse addColumn(RpcController controller, AddColumnRequest req) throws ServiceException { try { - long procId = master.addColumn( + long procId = server.addColumn( ProtobufUtil.toTableName(req.getTableName()), ProtobufUtil.toColumnFamilyDescriptor(req.getColumnFamilies()), req.getNonceGroup(), @@ -662,7 +702,7 @@ public class MasterRpcServices extends RSRpcServices implements public AssignRegionResponse assignRegion(RpcController controller, AssignRegionRequest req) throws ServiceException { try { - master.checkInitialized(); + server.checkInitialized(); final RegionSpecifierType type = req.getRegion().getType(); if (type != RegionSpecifierType.REGION_NAME) { @@ -671,19 +711,19 @@ public class MasterRpcServices extends RSRpcServices implements } final byte[] regionName = req.getRegion().getValue().toByteArray(); - final RegionInfo regionInfo = master.getAssignmentManager().getRegionInfo(regionName); + final RegionInfo regionInfo = server.getAssignmentManager().getRegionInfo(regionName); if (regionInfo == null) { throw new UnknownRegionException(Bytes.toStringBinary(regionName)); } final AssignRegionResponse arr = AssignRegionResponse.newBuilder().build(); - if (master.cpHost != null) { - master.cpHost.preAssign(regionInfo); + if (server.cpHost != null) { + server.cpHost.preAssign(regionInfo); } - LOG.info(master.getClientIdAuditPrefix() + " assign " + regionInfo.getRegionNameAsString()); - master.getAssignmentManager().assign(regionInfo); - if (master.cpHost != null) { - master.cpHost.postAssign(regionInfo); + LOG.info(server.getClientIdAuditPrefix() + " assign " + regionInfo.getRegionNameAsString()); + server.getAssignmentManager().assign(regionInfo); + if (server.cpHost != null) { + server.cpHost.postAssign(regionInfo); } return arr; } catch (IOException ioe) { @@ -691,12 +731,11 @@ public class MasterRpcServices extends RSRpcServices implements } } - @Override public MasterProtos.BalanceResponse balance(RpcController controller, - MasterProtos.BalanceRequest request) throws ServiceException { + MasterProtos.BalanceRequest request) throws ServiceException { try { - return ProtobufUtil.toBalanceResponse(master.balance(ProtobufUtil.toBalanceRequest(request))); + return ProtobufUtil.toBalanceResponse(server.balance(ProtobufUtil.toBalanceRequest(request))); } catch (IOException ex) { throw new ServiceException(ex); } @@ -706,7 +745,7 @@ public class MasterRpcServices extends RSRpcServices implements public CreateNamespaceResponse createNamespace(RpcController controller, CreateNamespaceRequest request) throws ServiceException { try { - long procId = master.createNamespace( + long procId = server.createNamespace( ProtobufUtil.toNamespaceDescriptor(request.getNamespaceDescriptor()), request.getNonceGroup(), request.getNonce()); @@ -723,8 +762,8 @@ public class MasterRpcServices extends RSRpcServices implements byte [][] splitKeys = ProtobufUtil.getSplitKeysArray(req); try { long procId = - master.createTable(tableDescriptor, splitKeys, req.getNonceGroup(), req.getNonce()); - LOG.info(master.getClientIdAuditPrefix() + " procedure request for creating table: " + + server.createTable(tableDescriptor, splitKeys, req.getNonceGroup(), req.getNonce()); + LOG.info(server.getClientIdAuditPrefix() + " procedure request for creating table: " + req.getTableSchema().getTableName() + " procId is: " + procId); return CreateTableResponse.newBuilder().setProcId(procId).build(); } catch (IOException ioe) { @@ -736,7 +775,7 @@ public class MasterRpcServices extends RSRpcServices implements public DeleteColumnResponse deleteColumn(RpcController controller, DeleteColumnRequest req) throws ServiceException { try { - long procId = master.deleteColumn( + long procId = server.deleteColumn( ProtobufUtil.toTableName(req.getTableName()), req.getColumnName().toByteArray(), req.getNonceGroup(), @@ -756,7 +795,7 @@ public class MasterRpcServices extends RSRpcServices implements public DeleteNamespaceResponse deleteNamespace(RpcController controller, DeleteNamespaceRequest request) throws ServiceException { try { - long procId = master.deleteNamespace( + long procId = server.deleteNamespace( request.getNamespaceName(), request.getNonceGroup(), request.getNonce()); @@ -777,11 +816,11 @@ public class MasterRpcServices extends RSRpcServices implements public DeleteSnapshotResponse deleteSnapshot(RpcController controller, DeleteSnapshotRequest request) throws ServiceException { try { - master.checkInitialized(); - master.snapshotManager.checkSnapshotSupport(); + server.checkInitialized(); + server.snapshotManager.checkSnapshotSupport(); - LOG.info(master.getClientIdAuditPrefix() + " delete " + request.getSnapshot()); - master.snapshotManager.deleteSnapshot(request.getSnapshot()); + LOG.info(server.getClientIdAuditPrefix() + " delete " + request.getSnapshot()); + server.snapshotManager.deleteSnapshot(request.getSnapshot()); return DeleteSnapshotResponse.newBuilder().build(); } catch (IOException e) { throw new ServiceException(e); @@ -792,7 +831,7 @@ public class MasterRpcServices extends RSRpcServices implements public DeleteTableResponse deleteTable(RpcController controller, DeleteTableRequest request) throws ServiceException { try { - long procId = master.deleteTable(ProtobufUtil.toTableName( + long procId = server.deleteTable(ProtobufUtil.toTableName( request.getTableName()), request.getNonceGroup(), request.getNonce()); return DeleteTableResponse.newBuilder().setProcId(procId).build(); } catch (IOException ioe) { @@ -804,7 +843,7 @@ public class MasterRpcServices extends RSRpcServices implements public TruncateTableResponse truncateTable(RpcController controller, TruncateTableRequest request) throws ServiceException { try { - long procId = master.truncateTable( + long procId = server.truncateTable( ProtobufUtil.toTableName(request.getTableName()), request.getPreserveSplits(), request.getNonceGroup(), @@ -819,7 +858,7 @@ public class MasterRpcServices extends RSRpcServices implements public DisableTableResponse disableTable(RpcController controller, DisableTableRequest request) throws ServiceException { try { - long procId = master.disableTable( + long procId = server.disableTable( ProtobufUtil.toTableName(request.getTableName()), request.getNonceGroup(), request.getNonce()); @@ -834,7 +873,7 @@ public class MasterRpcServices extends RSRpcServices implements EnableCatalogJanitorRequest req) throws ServiceException { rpcPreCheck("enableCatalogJanitor"); return EnableCatalogJanitorResponse.newBuilder().setPrevValue( - master.catalogJanitorChore.setEnabled(req.getEnable())).build(); + server.catalogJanitorChore.setEnabled(req.getEnable())).build(); } @Override @@ -843,9 +882,9 @@ public class MasterRpcServices extends RSRpcServices implements rpcPreCheck("setCleanerChoreRunning"); boolean prevValue = - master.getLogCleaner().getEnabled() && master.getHFileCleaner().getEnabled(); - master.getLogCleaner().setEnabled(req.getOn()); - master.getHFileCleaner().setEnabled(req.getOn()); + server.getLogCleaner().getEnabled() && server.getHFileCleaner().getEnabled(); + server.getLogCleaner().setEnabled(req.getOn()); + server.getHFileCleaner().setEnabled(req.getOn()); return SetCleanerChoreRunningResponse.newBuilder().setPrevValue(prevValue).build(); } @@ -853,7 +892,7 @@ public class MasterRpcServices extends RSRpcServices implements public EnableTableResponse enableTable(RpcController controller, EnableTableRequest request) throws ServiceException { try { - long procId = master.enableTable( + long procId = server.enableTable( ProtobufUtil.toTableName(request.getTableName()), request.getNonceGroup(), request.getNonce()); @@ -867,12 +906,12 @@ public class MasterRpcServices extends RSRpcServices implements public MergeTableRegionsResponse mergeTableRegions( RpcController c, MergeTableRegionsRequest request) throws ServiceException { try { - master.checkInitialized(); + server.checkInitialized(); } catch (IOException ioe) { throw new ServiceException(ioe); } - RegionStates regionStates = master.getAssignmentManager().getRegionStates(); + RegionStates regionStates = server.getAssignmentManager().getRegionStates(); RegionInfo[] regionsToMerge = new RegionInfo[request.getRegionCount()]; for (int i = 0; i < request.getRegionCount(); i++) { @@ -891,7 +930,7 @@ public class MasterRpcServices extends RSRpcServices implements } try { - long procId = master.mergeRegions( + long procId = server.mergeRegions( regionsToMerge, request.getForcible(), request.getNonceGroup(), @@ -906,7 +945,7 @@ public class MasterRpcServices extends RSRpcServices implements public SplitTableRegionResponse splitRegion(final RpcController controller, final SplitTableRegionRequest request) throws ServiceException { try { - long procId = master.splitRegion( + long procId = server.splitRegion( ProtobufUtil.toRegionInfo(request.getRegionInfo()), request.hasSplitRow() ? request.getSplitRow().toByteArray() : null, request.getNonceGroup(), @@ -926,13 +965,13 @@ public class MasterRpcServices extends RSRpcServices implements ClientProtos.CoprocessorServiceCall call = request.getCall(); String serviceName = call.getServiceName(); String methodName = call.getMethodName(); - if (!master.coprocessorServiceHandlers.containsKey(serviceName)) { + if (!server.coprocessorServiceHandlers.containsKey(serviceName)) { throw new UnknownProtocolException(null, "No registered Master Coprocessor Endpoint found for " + serviceName + ". Has it been enabled?"); } - Service service = master.coprocessorServiceHandlers.get(serviceName); + Service service = server.coprocessorServiceHandlers.get(serviceName); ServiceDescriptor serviceDesc = service.getDescriptorForType(); MethodDescriptor methodDesc = CoprocessorRpcUtils.getMethodDescriptor(methodName, serviceDesc); @@ -971,15 +1010,15 @@ public class MasterRpcServices extends RSRpcServices implements public ExecProcedureResponse execProcedure(RpcController controller, ExecProcedureRequest request) throws ServiceException { try { - master.checkInitialized(); + server.checkInitialized(); ProcedureDescription desc = request.getProcedure(); - MasterProcedureManager mpm = master.getMasterProcedureManagerHost().getProcedureManager( + MasterProcedureManager mpm = server.getMasterProcedureManagerHost().getProcedureManager( desc.getSignature()); if (mpm == null) { throw new ServiceException(new DoNotRetryIOException("The procedure is not registered: " + desc.getSignature())); } - LOG.info(master.getClientIdAuditPrefix() + " procedure request for: " + desc.getSignature()); + LOG.info(server.getClientIdAuditPrefix() + " procedure request for: " + desc.getSignature()); mpm.checkPermissions(desc, getAccessChecker(), RpcServer.getRequestUser().orElse(null)); mpm.execProcedure(desc); // send back the max amount of time the client should wait for the procedure @@ -1006,11 +1045,11 @@ public class MasterRpcServices extends RSRpcServices implements try { ProcedureDescription desc = request.getProcedure(); MasterProcedureManager mpm = - master.getMasterProcedureManagerHost().getProcedureManager(desc.getSignature()); + server.getMasterProcedureManagerHost().getProcedureManager(desc.getSignature()); if (mpm == null) { throw new ServiceException("The procedure is not registered: " + desc.getSignature()); } - LOG.info(master.getClientIdAuditPrefix() + " procedure request for: " + desc.getSignature()); + LOG.info(server.getClientIdAuditPrefix() + " procedure request for: " + desc.getSignature()); byte[] data = mpm.execProcedureWithRet(desc); ExecProcedureResponse.Builder builder = ExecProcedureResponse.newBuilder(); // set return data if available @@ -1033,7 +1072,7 @@ public class MasterRpcServices extends RSRpcServices implements // since it queries this method to figure cluster version. hbck2 wants to be able to work // against Master even if it is 'initializing' so it can do fixup. response.setClusterStatus(ClusterMetricsBuilder.toClusterStatus( - master.getClusterMetrics(ClusterMetricsBuilder.toOptions(req.getOptionsList())))); + server.getClusterMetrics(ClusterMetricsBuilder.toOptions(req.getOptionsList())))); } catch (IOException e) { throw new ServiceException(e); } @@ -1047,9 +1086,9 @@ public class MasterRpcServices extends RSRpcServices implements public GetCompletedSnapshotsResponse getCompletedSnapshots(RpcController controller, GetCompletedSnapshotsRequest request) throws ServiceException { try { - master.checkInitialized(); + server.checkInitialized(); GetCompletedSnapshotsResponse.Builder builder = GetCompletedSnapshotsResponse.newBuilder(); - List snapshots = master.snapshotManager.getCompletedSnapshots(); + List snapshots = server.snapshotManager.getCompletedSnapshots(); // convert to protobuf for (SnapshotDescription snapshot : snapshots) { @@ -1067,7 +1106,7 @@ public class MasterRpcServices extends RSRpcServices implements throws ServiceException { try { return ListNamespacesResponse.newBuilder() - .addAllNamespaceName(master.listNamespaces()) + .addAllNamespaceName(server.listNamespaces()) .build(); } catch (IOException e) { throw new ServiceException(e); @@ -1081,7 +1120,7 @@ public class MasterRpcServices extends RSRpcServices implements try { return GetNamespaceDescriptorResponse.newBuilder() .setNamespaceDescriptor(ProtobufUtil.toProtoNamespaceDescriptor( - master.getNamespace(request.getNamespaceName()))) + server.getNamespace(request.getNamespaceName()))) .build(); } catch (IOException e) { throw new ServiceException(e); @@ -1106,8 +1145,8 @@ public class MasterRpcServices extends RSRpcServices implements TableName tableName = ProtobufUtil.toTableName(req.getTableName()); try { - master.checkInitialized(); - Pair pair = master.getAssignmentManager().getReopenStatus(tableName); + server.checkInitialized(); + Pair pair = server.getAssignmentManager().getReopenStatus(tableName); GetSchemaAlterStatusResponse.Builder ret = GetSchemaAlterStatusResponse.newBuilder(); ret.setYetToUpdateRegions(pair.getFirst()); ret.setTotalRegions(pair.getSecond()); @@ -1129,7 +1168,7 @@ public class MasterRpcServices extends RSRpcServices implements public GetTableDescriptorsResponse getTableDescriptors(RpcController c, GetTableDescriptorsRequest req) throws ServiceException { try { - master.checkInitialized(); + server.checkInitialized(); final String regex = req.hasRegex() ? req.getRegex() : null; final String namespace = req.hasNamespace() ? req.getNamespace() : null; @@ -1141,7 +1180,7 @@ public class MasterRpcServices extends RSRpcServices implements } } - List descriptors = master.listTableDescriptors(namespace, regex, + List descriptors = server.listTableDescriptors(namespace, regex, tableNameList, req.getIncludeSysTables()); GetTableDescriptorsResponse.Builder builder = GetTableDescriptorsResponse.newBuilder(); @@ -1168,11 +1207,11 @@ public class MasterRpcServices extends RSRpcServices implements public GetTableNamesResponse getTableNames(RpcController controller, GetTableNamesRequest req) throws ServiceException { try { - master.checkServiceStarted(); + server.checkServiceStarted(); final String regex = req.hasRegex() ? req.getRegex() : null; final String namespace = req.hasNamespace() ? req.getNamespace() : null; - List tableNames = master.listTableNames(namespace, regex, + List tableNames = server.listTableNames(namespace, regex, req.getIncludeSysTables()); GetTableNamesResponse.Builder builder = GetTableNamesResponse.newBuilder(); @@ -1192,9 +1231,9 @@ public class MasterRpcServices extends RSRpcServices implements public GetTableStateResponse getTableState(RpcController controller, GetTableStateRequest request) throws ServiceException { try { - master.checkServiceStarted(); + server.checkServiceStarted(); TableName tableName = ProtobufUtil.toTableName(request.getTableName()); - TableState ts = master.getTableStateManager().getTableState(tableName); + TableState ts = server.getTableStateManager().getTableState(tableName); GetTableStateResponse.Builder builder = GetTableStateResponse.newBuilder(); builder.setTableState(ts.convert()); return builder.build(); @@ -1207,14 +1246,14 @@ public class MasterRpcServices extends RSRpcServices implements public IsCatalogJanitorEnabledResponse isCatalogJanitorEnabled(RpcController c, IsCatalogJanitorEnabledRequest req) throws ServiceException { return IsCatalogJanitorEnabledResponse.newBuilder().setValue( - master.isCatalogJanitorEnabled()).build(); + server.isCatalogJanitorEnabled()).build(); } @Override public IsCleanerChoreEnabledResponse isCleanerChoreEnabled(RpcController c, IsCleanerChoreEnabledRequest req) throws ServiceException { - return IsCleanerChoreEnabledResponse.newBuilder().setValue(master.isCleanerChoreEnabled()) + return IsCleanerChoreEnabledResponse.newBuilder().setValue(server.isCleanerChoreEnabled()) .build(); } @@ -1222,9 +1261,9 @@ public class MasterRpcServices extends RSRpcServices implements public IsMasterRunningResponse isMasterRunning(RpcController c, IsMasterRunningRequest req) throws ServiceException { try { - master.checkServiceStarted(); + server.checkServiceStarted(); return IsMasterRunningResponse.newBuilder().setIsMasterRunning( - !master.isStopped()).build(); + !server.isStopped()).build(); } catch (IOException e) { throw new ServiceException(e); } @@ -1239,9 +1278,9 @@ public class MasterRpcServices extends RSRpcServices implements public IsProcedureDoneResponse isProcedureDone(RpcController controller, IsProcedureDoneRequest request) throws ServiceException { try { - master.checkInitialized(); + server.checkInitialized(); ProcedureDescription desc = request.getProcedure(); - MasterProcedureManager mpm = master.getMasterProcedureManagerHost().getProcedureManager( + MasterProcedureManager mpm = server.getMasterProcedureManagerHost().getProcedureManager( desc.getSignature()); if (mpm == null) { throw new ServiceException("The procedure is not registered: " @@ -1275,9 +1314,9 @@ public class MasterRpcServices extends RSRpcServices implements LOG.debug("Checking to see if snapshot from request:" + ClientSnapshotDescriptionUtils.toString(request.getSnapshot()) + " is done"); try { - master.checkInitialized(); + server.checkInitialized(); IsSnapshotDoneResponse.Builder builder = IsSnapshotDoneResponse.newBuilder(); - boolean done = master.snapshotManager.isSnapshotDone(request.getSnapshot()); + boolean done = server.snapshotManager.isSnapshotDone(request.getSnapshot()); builder.setDone(done); return builder.build(); } catch (ForeignException e) { @@ -1292,10 +1331,10 @@ public class MasterRpcServices extends RSRpcServices implements GetProcedureResultRequest request) throws ServiceException { LOG.debug("Checking to see if procedure is done pid=" + request.getProcId()); try { - master.checkInitialized(); + server.checkInitialized(); GetProcedureResultResponse.Builder builder = GetProcedureResultResponse.newBuilder(); long procId = request.getProcId(); - ProcedureExecutor executor = master.getMasterProcedureExecutor(); + ProcedureExecutor executor = server.getMasterProcedureExecutor(); Procedure result = executor.getResultOrProcedure(procId); if (result != null) { builder.setSubmittedTime(result.getSubmittedTime()); @@ -1311,7 +1350,7 @@ public class MasterRpcServices extends RSRpcServices implements if (resultData != null) { builder.setResult(UnsafeByteOperations.unsafeWrap(resultData)); } - master.getMasterProcedureExecutor().removeResult(request.getProcId()); + server.getMasterProcedureExecutor().removeResult(request.getProcId()); } else { builder.setState(GetProcedureResultResponse.State.RUNNING); } @@ -1330,7 +1369,7 @@ public class MasterRpcServices extends RSRpcServices implements try { AbortProcedureResponse.Builder response = AbortProcedureResponse.newBuilder(); boolean abortResult = - master.abortProcedure(request.getProcId(), request.getMayInterruptIfRunning()); + server.abortProcedure(request.getProcId(), request.getMayInterruptIfRunning()); response.setIsProcedureAborted(abortResult); return response.build(); } catch (IOException e) { @@ -1344,7 +1383,7 @@ public class MasterRpcServices extends RSRpcServices implements try { ListNamespaceDescriptorsResponse.Builder response = ListNamespaceDescriptorsResponse.newBuilder(); - for(NamespaceDescriptor ns: master.getNamespaces()) { + for(NamespaceDescriptor ns: server.getNamespaces()) { response.addNamespaceDescriptor(ProtobufUtil.toProtoNamespaceDescriptor(ns)); } return response.build(); @@ -1359,7 +1398,7 @@ public class MasterRpcServices extends RSRpcServices implements GetProceduresRequest request) throws ServiceException { try { final GetProceduresResponse.Builder response = GetProceduresResponse.newBuilder(); - for (Procedure p: master.getProcedures()) { + for (Procedure p: server.getProcedures()) { response.addProcedure(ProcedureUtil.convertToProtoProcedure(p)); } return response.build(); @@ -1375,7 +1414,7 @@ public class MasterRpcServices extends RSRpcServices implements try { final GetLocksResponse.Builder builder = GetLocksResponse.newBuilder(); - for (LockedResource lockedResource: master.getLocks()) { + for (LockedResource lockedResource: server.getLocks()) { builder.addLock(ProcedureUtil.convertToProtoLockedResource(lockedResource)); } @@ -1390,9 +1429,9 @@ public class MasterRpcServices extends RSRpcServices implements ListTableDescriptorsByNamespaceRequest request) throws ServiceException { try { ListTableDescriptorsByNamespaceResponse.Builder b = - ListTableDescriptorsByNamespaceResponse.newBuilder(); - for (TableDescriptor htd : master - .listTableDescriptorsByNamespace(request.getNamespaceName())) { + ListTableDescriptorsByNamespaceResponse.newBuilder(); + for (TableDescriptor htd : server + .listTableDescriptorsByNamespace(request.getNamespaceName())) { b.addTableSchema(ProtobufUtil.toTableSchema(htd)); } return b.build(); @@ -1407,7 +1446,7 @@ public class MasterRpcServices extends RSRpcServices implements try { ListTableNamesByNamespaceResponse.Builder b = ListTableNamesByNamespaceResponse.newBuilder(); - for (TableName tableName: master.listTableNamesByNamespace(request.getNamespaceName())) { + for (TableName tableName: server.listTableNamesByNamespace(request.getNamespaceName())) { b.addTableName(ProtobufUtil.toProtoTableName(tableName)); } return b.build(); @@ -1420,7 +1459,7 @@ public class MasterRpcServices extends RSRpcServices implements public ModifyColumnResponse modifyColumn(RpcController controller, ModifyColumnRequest req) throws ServiceException { try { - long procId = master.modifyColumn( + long procId = server.modifyColumn( ProtobufUtil.toTableName(req.getTableName()), ProtobufUtil.toColumnFamilyDescriptor(req.getColumnFamilies()), req.getNonceGroup(), @@ -1440,7 +1479,7 @@ public class MasterRpcServices extends RSRpcServices implements public ModifyNamespaceResponse modifyNamespace(RpcController controller, ModifyNamespaceRequest request) throws ServiceException { try { - long procId = master.modifyNamespace( + long procId = server.modifyNamespace( ProtobufUtil.toNamespaceDescriptor(request.getNamespaceDescriptor()), request.getNonceGroup(), request.getNonce()); @@ -1454,7 +1493,7 @@ public class MasterRpcServices extends RSRpcServices implements public ModifyTableResponse modifyTable(RpcController controller, ModifyTableRequest req) throws ServiceException { try { - long procId = master.modifyTable( + long procId = server.modifyTable( ProtobufUtil.toTableName(req.getTableName()), ProtobufUtil.toTableDescriptor(req.getTableSchema()), req.getNonceGroup(), @@ -1480,8 +1519,8 @@ public class MasterRpcServices extends RSRpcServices implements } try { - master.checkInitialized(); - master.move(encodedRegionName, destServerName); + server.checkInitialized(); + server.move(encodedRegionName, destServerName); } catch (IOException ioe) { throw new ServiceException(ioe); } @@ -1499,7 +1538,7 @@ public class MasterRpcServices extends RSRpcServices implements public OfflineRegionResponse offlineRegion(RpcController controller, OfflineRegionRequest request) throws ServiceException { try { - master.checkInitialized(); + server.checkInitialized(); final RegionSpecifierType type = request.getRegion().getType(); if (type != RegionSpecifierType.REGION_NAME) { @@ -1508,18 +1547,18 @@ public class MasterRpcServices extends RSRpcServices implements } final byte[] regionName = request.getRegion().getValue().toByteArray(); - final RegionInfo hri = master.getAssignmentManager().getRegionInfo(regionName); + final RegionInfo hri = server.getAssignmentManager().getRegionInfo(regionName); if (hri == null) { throw new UnknownRegionException(Bytes.toStringBinary(regionName)); } - if (master.cpHost != null) { - master.cpHost.preRegionOffline(hri); + if (server.cpHost != null) { + server.cpHost.preRegionOffline(hri); } - LOG.info(master.getClientIdAuditPrefix() + " offline " + hri.getRegionNameAsString()); - master.getAssignmentManager().offlineRegion(hri); - if (master.cpHost != null) { - master.cpHost.postRegionOffline(hri); + LOG.info(server.getClientIdAuditPrefix() + " offline " + hri.getRegionNameAsString()); + server.getAssignmentManager().offlineRegion(hri); + if (server.cpHost != null) { + server.cpHost.postRegionOffline(hri); } } catch (IOException ioe) { throw new ServiceException(ioe); @@ -1544,7 +1583,7 @@ public class MasterRpcServices extends RSRpcServices implements public RestoreSnapshotResponse restoreSnapshot(RpcController controller, RestoreSnapshotRequest request) throws ServiceException { try { - long procId = master.restoreSnapshot(request.getSnapshot(), request.getNonceGroup(), + long procId = server.restoreSnapshot(request.getSnapshot(), request.getNonceGroup(), request.getNonce(), request.getRestoreACL()); return RestoreSnapshotResponse.newBuilder().setProcId(procId).build(); } catch (ForeignException e) { @@ -1559,7 +1598,7 @@ public class MasterRpcServices extends RSRpcServices implements RpcController controller, SetSnapshotCleanupRequest request) throws ServiceException { try { - master.checkInitialized(); + server.checkInitialized(); final boolean enabled = request.getEnabled(); final boolean isSynchronous = request.hasSynchronous() && request.getSynchronous(); final boolean prevSnapshotCleanupRunning = this.switchSnapshotCleanup(enabled, isSynchronous); @@ -1575,8 +1614,8 @@ public class MasterRpcServices extends RSRpcServices implements RpcController controller, IsSnapshotCleanupEnabledRequest request) throws ServiceException { try { - master.checkInitialized(); - final boolean isSnapshotCleanupEnabled = master.snapshotCleanupTracker + server.checkInitialized(); + final boolean isSnapshotCleanupEnabled = server.snapshotCleanupTracker .isSnapshotCleanupEnabled(); return IsSnapshotCleanupEnabledResponse.newBuilder() .setEnabled(isSnapshotCleanupEnabled).build(); @@ -1595,9 +1634,9 @@ public class MasterRpcServices extends RSRpcServices implements */ private synchronized boolean switchSnapshotCleanup(final boolean enabledNewVal, final boolean synchronous) { - final boolean oldValue = master.snapshotCleanupTracker.isSnapshotCleanupEnabled(); - master.switchSnapshotCleanup(enabledNewVal, synchronous); - LOG.info("{} Successfully set snapshot cleanup to {}", master.getClientIdAuditPrefix(), + final boolean oldValue = server.snapshotCleanupTracker.isSnapshotCleanupEnabled(); + server.switchSnapshotCleanup(enabledNewVal, synchronous); + LOG.info("{} Successfully set snapshot cleanup to {}", server.getClientIdAuditPrefix(), enabledNewVal); return oldValue; } @@ -1609,7 +1648,7 @@ public class MasterRpcServices extends RSRpcServices implements rpcPreCheck("runCatalogScan"); try { return ResponseConverter.buildRunCatalogScanResponse( - this.master.catalogJanitorChore.scan()); + this.server.catalogJanitorChore.scan()); } catch (IOException ioe) { throw new ServiceException(ioe); } @@ -1619,7 +1658,7 @@ public class MasterRpcServices extends RSRpcServices implements public RunCleanerChoreResponse runCleanerChore(RpcController c, RunCleanerChoreRequest req) throws ServiceException { rpcPreCheck("runCleanerChore"); - boolean result = master.getHFileCleaner().runCleaner() && master.getLogCleaner().runCleaner(); + boolean result = server.getHFileCleaner().runCleaner() && server.getLogCleaner().runCleaner(); return ResponseConverter.buildRunCleanerChoreResponse(result); } @@ -1627,9 +1666,9 @@ public class MasterRpcServices extends RSRpcServices implements public SetBalancerRunningResponse setBalancerRunning(RpcController c, SetBalancerRunningRequest req) throws ServiceException { try { - master.checkInitialized(); + server.checkInitialized(); boolean prevValue = (req.getSynchronous())? - synchronousBalanceSwitch(req.getOn()) : master.balanceSwitch(req.getOn()); + synchronousBalanceSwitch(req.getOn()) : server.balanceSwitch(req.getOn()); return SetBalancerRunningResponse.newBuilder().setPrevBalanceValue(prevValue).build(); } catch (IOException ioe) { throw new ServiceException(ioe); @@ -1639,9 +1678,9 @@ public class MasterRpcServices extends RSRpcServices implements @Override public ShutdownResponse shutdown(RpcController controller, ShutdownRequest request) throws ServiceException { - LOG.info(master.getClientIdAuditPrefix() + " shutdown"); + LOG.info(server.getClientIdAuditPrefix() + " shutdown"); try { - master.shutdown(); + server.shutdown(); } catch (IOException e) { LOG.error("Exception occurred in HMaster.shutdown()", e); throw new ServiceException(e); @@ -1657,18 +1696,18 @@ public class MasterRpcServices extends RSRpcServices implements public SnapshotResponse snapshot(RpcController controller, SnapshotRequest request) throws ServiceException { try { - master.checkInitialized(); - master.snapshotManager.checkSnapshotSupport(); + server.checkInitialized(); + server.snapshotManager.checkSnapshotSupport(); - LOG.info(master.getClientIdAuditPrefix() + " snapshot request for:" + + LOG.info(server.getClientIdAuditPrefix() + " snapshot request for:" + ClientSnapshotDescriptionUtils.toString(request.getSnapshot())); // get the snapshot information SnapshotDescription snapshot = SnapshotDescriptionUtils.validate( - request.getSnapshot(), master.getConfiguration()); - master.snapshotManager.takeSnapshot(snapshot); + request.getSnapshot(), server.getConfiguration()); + server.snapshotManager.takeSnapshot(snapshot); // send back the max amount of time the client should wait for the snapshot to complete - long waitTime = SnapshotDescriptionUtils.getMaxMasterTimeout(master.getConfiguration(), + long waitTime = SnapshotDescriptionUtils.getMaxMasterTimeout(server.getConfiguration(), snapshot.getType(), SnapshotDescriptionUtils.DEFAULT_MAX_WAIT_TIME); return SnapshotResponse.newBuilder().setExpectedTimeout(waitTime).build(); } catch (ForeignException e) { @@ -1681,9 +1720,9 @@ public class MasterRpcServices extends RSRpcServices implements @Override public StopMasterResponse stopMaster(RpcController controller, StopMasterRequest request) throws ServiceException { - LOG.info(master.getClientIdAuditPrefix() + " stop"); + LOG.info(server.getClientIdAuditPrefix() + " stop"); try { - master.stopMaster(); + server.stopMaster(); } catch (IOException e) { LOG.error("Exception occurred while stopping master", e); throw new ServiceException(e); @@ -1696,7 +1735,7 @@ public class MasterRpcServices extends RSRpcServices implements final RpcController controller, final IsInMaintenanceModeRequest request) throws ServiceException { IsInMaintenanceModeResponse.Builder response = IsInMaintenanceModeResponse.newBuilder(); - response.setInMaintenanceMode(master.isInMaintenanceMode()); + response.setInMaintenanceMode(server.isInMaintenanceMode()); return response.build(); } @@ -1708,30 +1747,30 @@ public class MasterRpcServices extends RSRpcServices implements RegionSpecifierType type = req.getRegion().getType(); UnassignRegionResponse urr = UnassignRegionResponse.newBuilder().build(); - master.checkInitialized(); + server.checkInitialized(); if (type != RegionSpecifierType.REGION_NAME) { LOG.warn("unassignRegion specifier type: expected: " + RegionSpecifierType.REGION_NAME + " actual: " + type); } Pair pair = - MetaTableAccessor.getRegion(master.getConnection(), regionName); + MetaTableAccessor.getRegion(server.getConnection(), regionName); if (Bytes.equals(RegionInfoBuilder.FIRST_META_REGIONINFO.getRegionName(), regionName)) { pair = new Pair<>(RegionInfoBuilder.FIRST_META_REGIONINFO, - MetaTableLocator.getMetaRegionLocation(master.getZooKeeper())); + MetaTableLocator.getMetaRegionLocation(server.getZooKeeper())); } if (pair == null) { throw new UnknownRegionException(Bytes.toString(regionName)); } RegionInfo hri = pair.getFirst(); - if (master.cpHost != null) { - master.cpHost.preUnassign(hri); + if (server.cpHost != null) { + server.cpHost.preUnassign(hri); } - LOG.debug(master.getClientIdAuditPrefix() + " unassign " + hri.getRegionNameAsString() + LOG.debug(server.getClientIdAuditPrefix() + " unassign " + hri.getRegionNameAsString() + " in current location if it is online"); - master.getAssignmentManager().unassign(hri); - if (master.cpHost != null) { - master.cpHost.postUnassign(hri); + server.getAssignmentManager().unassign(hri); + if (server.cpHost != null) { + server.cpHost.postUnassign(hri); } return urr; @@ -1744,8 +1783,8 @@ public class MasterRpcServices extends RSRpcServices implements public ReportRegionStateTransitionResponse reportRegionStateTransition(RpcController c, ReportRegionStateTransitionRequest req) throws ServiceException { try { - master.checkServiceStarted(); - return master.getAssignmentManager().reportRegionStateTransition(req); + server.checkServiceStarted(); + return server.getAssignmentManager().reportRegionStateTransition(req); } catch (IOException ioe) { throw new ServiceException(ioe); } @@ -1755,8 +1794,8 @@ public class MasterRpcServices extends RSRpcServices implements public SetQuotaResponse setQuota(RpcController c, SetQuotaRequest req) throws ServiceException { try { - master.checkInitialized(); - return master.getMasterQuotaManager().setQuota(req); + server.checkInitialized(); + return server.getMasterQuotaManager().setQuota(req); } catch (Exception e) { throw new ServiceException(e); } @@ -1768,8 +1807,8 @@ public class MasterRpcServices extends RSRpcServices implements MajorCompactionTimestampResponse.Builder response = MajorCompactionTimestampResponse.newBuilder(); try { - master.checkInitialized(); - response.setCompactionTimestamp(master.getLastMajorCompactionTimestamp(ProtobufUtil + server.checkInitialized(); + response.setCompactionTimestamp(server.getLastMajorCompactionTimestamp(ProtobufUtil .toTableName(request.getTableName()))); } catch (IOException e) { throw new ServiceException(e); @@ -1784,8 +1823,8 @@ public class MasterRpcServices extends RSRpcServices implements MajorCompactionTimestampResponse.Builder response = MajorCompactionTimestampResponse.newBuilder(); try { - master.checkInitialized(); - response.setCompactionTimestamp(master.getLastMajorCompactionTimestampForRegion(request + server.checkInitialized(); + response.setCompactionTimestamp(server.getLastMajorCompactionTimestampForRegion(request .getRegion().getValue().toByteArray())); } catch (IOException e) { throw new ServiceException(e); @@ -1793,98 +1832,11 @@ public class MasterRpcServices extends RSRpcServices implements return response.build(); } - /** - * Compact a region on the master. - * - * @param controller the RPC controller - * @param request the request - * @throws ServiceException - */ - @Override - @QosPriority(priority=HConstants.ADMIN_QOS) - public CompactRegionResponse compactRegion(final RpcController controller, - final CompactRegionRequest request) throws ServiceException { - try { - master.checkInitialized(); - byte[] regionName = request.getRegion().getValue().toByteArray(); - TableName tableName = RegionInfo.getTable(regionName); - // TODO: support CompactType.MOB - // if the region is a mob region, do the mob file compaction. - if (MobUtils.isMobRegionName(tableName, regionName)) { - checkHFileFormatVersionForMob(); - //TODO: support CompactType.MOB - // HBASE-23571 - LOG.warn("CompactType.MOB is not supported yet, will run regular compaction."+ - " Refer to HBASE-23571."); - return super.compactRegion(controller, request); - } else { - return super.compactRegion(controller, request); - } - } catch (IOException ie) { - throw new ServiceException(ie); - } - } - - /** - * check configured hfile format version before to do compaction - * @throws IOException throw IOException - */ - private void checkHFileFormatVersionForMob() throws IOException { - if (HFile.getFormatVersion(master.getConfiguration()) < HFile.MIN_FORMAT_VERSION_WITH_TAGS) { - LOG.error("A minimum HFile version of " + HFile.MIN_FORMAT_VERSION_WITH_TAGS - + " is required for MOB compaction. Compaction will not run."); - throw new IOException("A minimum HFile version of " + HFile.MIN_FORMAT_VERSION_WITH_TAGS - + " is required for MOB feature. Consider setting " + HFile.FORMAT_VERSION_KEY - + " accordingly."); - } - } - - /** - * This method implements Admin getRegionInfo. On RegionServer, it is - * able to return RegionInfo and detail. On Master, it just returns - * RegionInfo. On Master it has been hijacked to return Mob detail. - * Master implementation is good for querying full region name if - * you only have the encoded name (useful around region replicas - * for example which do not have a row in hbase:meta). - */ - @Override - @QosPriority(priority=HConstants.ADMIN_QOS) - public GetRegionInfoResponse getRegionInfo(final RpcController controller, - final GetRegionInfoRequest request) throws ServiceException { - RegionInfo ri = null; - try { - ri = getRegionInfo(request.getRegion()); - } catch(UnknownRegionException ure) { - throw new ServiceException(ure); - } - GetRegionInfoResponse.Builder builder = GetRegionInfoResponse.newBuilder(); - if (ri != null) { - builder.setRegionInfo(ProtobufUtil.toRegionInfo(ri)); - } else { - // Is it a MOB name? These work differently. - byte [] regionName = request.getRegion().getValue().toByteArray(); - TableName tableName = RegionInfo.getTable(regionName); - if (MobUtils.isMobRegionName(tableName, regionName)) { - // a dummy region info contains the compaction state. - RegionInfo mobRegionInfo = MobUtils.getMobRegionInfo(tableName); - builder.setRegionInfo(ProtobufUtil.toRegionInfo(mobRegionInfo)); - if (request.hasCompactionState() && request.getCompactionState()) { - builder.setCompactionState(master.getMobCompactionState(tableName)); - } - } else { - // If unknown RegionInfo and not a MOB region, it is unknown. - throw new ServiceException(new UnknownRegionException(Bytes.toString(regionName))); - } - } - return builder.build(); - } - - @Override public IsBalancerEnabledResponse isBalancerEnabled(RpcController controller, IsBalancerEnabledRequest request) throws ServiceException { IsBalancerEnabledResponse.Builder response = IsBalancerEnabledResponse.newBuilder(); - response.setEnabled(master.isBalancerOn()); + response.setEnabled(server.isBalancerOn()); return response.build(); } @@ -1893,18 +1845,18 @@ public class MasterRpcServices extends RSRpcServices implements SetSplitOrMergeEnabledRequest request) throws ServiceException { SetSplitOrMergeEnabledResponse.Builder response = SetSplitOrMergeEnabledResponse.newBuilder(); try { - master.checkInitialized(); + server.checkInitialized(); boolean newValue = request.getEnabled(); for (MasterProtos.MasterSwitchType masterSwitchType: request.getSwitchTypesList()) { MasterSwitchType switchType = convert(masterSwitchType); - boolean oldValue = master.isSplitOrMergeEnabled(switchType); + boolean oldValue = server.isSplitOrMergeEnabled(switchType); response.addPrevValue(oldValue); - if (master.cpHost != null) { - master.cpHost.preSetSplitOrMergeEnabled(newValue, switchType); + if (server.cpHost != null) { + server.cpHost.preSetSplitOrMergeEnabled(newValue, switchType); } - master.getSplitOrMergeTracker().setSplitOrMergeEnabled(newValue, switchType); - if (master.cpHost != null) { - master.cpHost.postSetSplitOrMergeEnabled(newValue, switchType); + server.getSplitOrMergeTracker().setSplitOrMergeEnabled(newValue, switchType); + if (server.cpHost != null) { + server.cpHost.postSetSplitOrMergeEnabled(newValue, switchType); } } } catch (IOException | KeeperException e) { @@ -1917,7 +1869,7 @@ public class MasterRpcServices extends RSRpcServices implements public IsSplitOrMergeEnabledResponse isSplitOrMergeEnabled(RpcController controller, IsSplitOrMergeEnabledRequest request) throws ServiceException { IsSplitOrMergeEnabledResponse.Builder response = IsSplitOrMergeEnabledResponse.newBuilder(); - response.setEnabled(master.isSplitOrMergeEnabled(convert(request.getSwitchType()))); + response.setEnabled(server.isSplitOrMergeEnabled(convert(request.getSwitchType()))); return response.build(); } @@ -1933,7 +1885,7 @@ public class MasterRpcServices extends RSRpcServices implements .build(); return NormalizeResponse.newBuilder() // all API requests are considered priority requests. - .setNormalizerRan(master.normalizeRegions(ntfp, true)) + .setNormalizerRan(server.normalizeRegions(ntfp, true)) .build(); } catch (IOException ex) { throw new ServiceException(ex); @@ -1957,10 +1909,10 @@ public class MasterRpcServices extends RSRpcServices implements // subsequent `createAndWatch`, with another client creating said node. // That said, there's supposed to be only one active master and thus there's supposed to be // only one process with the authority to modify the value. - final boolean prevValue = master.getRegionNormalizerManager().isNormalizerOn(); + final boolean prevValue = server.getRegionNormalizerManager().isNormalizerOn(); final boolean newValue = request.getOn(); - master.getRegionNormalizerManager().setNormalizerOn(newValue); - LOG.info("{} set normalizerSwitch={}", master.getClientIdAuditPrefix(), newValue); + server.getRegionNormalizerManager().setNormalizerOn(newValue); + LOG.info("{} set normalizerSwitch={}", server.getClientIdAuditPrefix(), newValue); return SetNormalizerRunningResponse.newBuilder().setPrevNormalizerValue(prevValue).build(); } @@ -1968,7 +1920,7 @@ public class MasterRpcServices extends RSRpcServices implements public IsNormalizerEnabledResponse isNormalizerEnabled(RpcController controller, IsNormalizerEnabledRequest request) { IsNormalizerEnabledResponse.Builder response = IsNormalizerEnabledResponse.newBuilder(); - response.setEnabled(master.isNormalizerOn()); + response.setEnabled(server.isNormalizerOn()); return response.build(); } @@ -1980,27 +1932,27 @@ public class MasterRpcServices extends RSRpcServices implements SecurityCapabilitiesRequest request) throws ServiceException { SecurityCapabilitiesResponse.Builder response = SecurityCapabilitiesResponse.newBuilder(); try { - master.checkInitialized(); + server.checkInitialized(); Set capabilities = new HashSet<>(); // Authentication - if (User.isHBaseSecurityEnabled(master.getConfiguration())) { + if (User.isHBaseSecurityEnabled(server.getConfiguration())) { capabilities.add(SecurityCapabilitiesResponse.Capability.SECURE_AUTHENTICATION); } else { capabilities.add(SecurityCapabilitiesResponse.Capability.SIMPLE_AUTHENTICATION); } // A coprocessor that implements AccessControlService can provide AUTHORIZATION and // CELL_AUTHORIZATION - if (master.cpHost != null && hasAccessControlServiceCoprocessor(master.cpHost)) { - if (AccessChecker.isAuthorizationSupported(master.getConfiguration())) { + if (server.cpHost != null && hasAccessControlServiceCoprocessor(server.cpHost)) { + if (AccessChecker.isAuthorizationSupported(server.getConfiguration())) { capabilities.add(SecurityCapabilitiesResponse.Capability.AUTHORIZATION); } - if (AccessController.isCellAuthorizationSupported(master.getConfiguration())) { + if (AccessController.isCellAuthorizationSupported(server.getConfiguration())) { capabilities.add(SecurityCapabilitiesResponse.Capability.CELL_AUTHORIZATION); } } // A coprocessor that implements VisibilityLabelsService can provide CELL_VISIBILITY. - if (master.cpHost != null && hasVisibilityLabelsServiceCoprocessor(master.cpHost)) { - if (VisibilityController.isCellAuthorizationSupported(master.getConfiguration())) { + if (server.cpHost != null && hasVisibilityLabelsServiceCoprocessor(server.cpHost)) { + if (VisibilityController.isCellAuthorizationSupported(server.getConfiguration())) { capabilities.add(SecurityCapabilitiesResponse.Capability.CELL_VISIBILITY); } } @@ -2062,7 +2014,7 @@ public class MasterRpcServices extends RSRpcServices implements public AddReplicationPeerResponse addReplicationPeer(RpcController controller, AddReplicationPeerRequest request) throws ServiceException { try { - long procId = master.addReplicationPeer(request.getPeerId(), + long procId = server.addReplicationPeer(request.getPeerId(), ReplicationPeerConfigUtil.convert(request.getPeerConfig()), request.getPeerState().getState().equals(ReplicationState.State.ENABLED)); return AddReplicationPeerResponse.newBuilder().setProcId(procId).build(); @@ -2075,7 +2027,7 @@ public class MasterRpcServices extends RSRpcServices implements public RemoveReplicationPeerResponse removeReplicationPeer(RpcController controller, RemoveReplicationPeerRequest request) throws ServiceException { try { - long procId = master.removeReplicationPeer(request.getPeerId()); + long procId = server.removeReplicationPeer(request.getPeerId()); return RemoveReplicationPeerResponse.newBuilder().setProcId(procId).build(); } catch (ReplicationException | IOException e) { throw new ServiceException(e); @@ -2086,7 +2038,7 @@ public class MasterRpcServices extends RSRpcServices implements public EnableReplicationPeerResponse enableReplicationPeer(RpcController controller, EnableReplicationPeerRequest request) throws ServiceException { try { - long procId = master.enableReplicationPeer(request.getPeerId()); + long procId = server.enableReplicationPeer(request.getPeerId()); return EnableReplicationPeerResponse.newBuilder().setProcId(procId).build(); } catch (ReplicationException | IOException e) { throw new ServiceException(e); @@ -2097,7 +2049,7 @@ public class MasterRpcServices extends RSRpcServices implements public DisableReplicationPeerResponse disableReplicationPeer(RpcController controller, DisableReplicationPeerRequest request) throws ServiceException { try { - long procId = master.disableReplicationPeer(request.getPeerId()); + long procId = server.disableReplicationPeer(request.getPeerId()); return DisableReplicationPeerResponse.newBuilder().setProcId(procId).build(); } catch (ReplicationException | IOException e) { throw new ServiceException(e); @@ -2111,7 +2063,7 @@ public class MasterRpcServices extends RSRpcServices implements .newBuilder(); try { String peerId = request.getPeerId(); - ReplicationPeerConfig peerConfig = master.getReplicationPeerConfig(peerId); + ReplicationPeerConfig peerConfig = server.getReplicationPeerConfig(peerId); response.setPeerId(peerId); response.setPeerConfig(ReplicationPeerConfigUtil.convert(peerConfig)); } catch (ReplicationException | IOException e) { @@ -2124,7 +2076,7 @@ public class MasterRpcServices extends RSRpcServices implements public UpdateReplicationPeerConfigResponse updateReplicationPeerConfig(RpcController controller, UpdateReplicationPeerConfigRequest request) throws ServiceException { try { - long procId = master.updateReplicationPeerConfig(request.getPeerId(), + long procId = server.updateReplicationPeerConfig(request.getPeerId(), ReplicationPeerConfigUtil.convert(request.getPeerConfig())); return UpdateReplicationPeerConfigResponse.newBuilder().setProcId(procId).build(); } catch (ReplicationException | IOException e) { @@ -2137,7 +2089,7 @@ public class MasterRpcServices extends RSRpcServices implements transitReplicationPeerSyncReplicationState(RpcController controller, TransitReplicationPeerSyncReplicationStateRequest request) throws ServiceException { try { - long procId = master.transitReplicationPeerSyncReplicationState(request.getPeerId(), + long procId = server.transitReplicationPeerSyncReplicationState(request.getPeerId(), ReplicationPeerConfigUtil.toSyncReplicationState(request.getSyncReplicationState())); return TransitReplicationPeerSyncReplicationStateResponse.newBuilder().setProcId(procId) .build(); @@ -2151,7 +2103,7 @@ public class MasterRpcServices extends RSRpcServices implements ListReplicationPeersRequest request) throws ServiceException { ListReplicationPeersResponse.Builder response = ListReplicationPeersResponse.newBuilder(); try { - List peers = master + List peers = server .listReplicationPeers(request.hasRegex() ? request.getRegex() : null); for (ReplicationPeerDescription peer : peers) { response.addPeerDesc(ReplicationPeerConfigUtil.toProtoReplicationPeerDescription(peer)); @@ -2169,15 +2121,15 @@ public class MasterRpcServices extends RSRpcServices implements ListDecommissionedRegionServersResponse.Builder response = ListDecommissionedRegionServersResponse.newBuilder(); try { - master.checkInitialized(); - if (master.cpHost != null) { - master.cpHost.preListDecommissionedRegionServers(); + server.checkInitialized(); + if (server.cpHost != null) { + server.cpHost.preListDecommissionedRegionServers(); } - List servers = master.listDecommissionedRegionServers(); + List servers = server.listDecommissionedRegionServers(); response.addAllServerName((servers.stream().map(server -> ProtobufUtil.toServerName(server))) .collect(Collectors.toList())); - if (master.cpHost != null) { - master.cpHost.postListDecommissionedRegionServers(); + if (server.cpHost != null) { + server.cpHost.postListDecommissionedRegionServers(); } } catch (IOException io) { throw new ServiceException(io); @@ -2190,16 +2142,16 @@ public class MasterRpcServices extends RSRpcServices implements public DecommissionRegionServersResponse decommissionRegionServers(RpcController controller, DecommissionRegionServersRequest request) throws ServiceException { try { - master.checkInitialized(); + server.checkInitialized(); List servers = request.getServerNameList().stream() .map(pbServer -> ProtobufUtil.toServerName(pbServer)).collect(Collectors.toList()); boolean offload = request.getOffload(); - if (master.cpHost != null) { - master.cpHost.preDecommissionRegionServers(servers, offload); + if (server.cpHost != null) { + server.cpHost.preDecommissionRegionServers(servers, offload); } - master.decommissionRegionServers(servers, offload); - if (master.cpHost != null) { - master.cpHost.postDecommissionRegionServers(servers, offload); + server.decommissionRegionServers(servers, offload); + if (server.cpHost != null) { + server.cpHost.postDecommissionRegionServers(servers, offload); } } catch (IOException io) { throw new ServiceException(io); @@ -2212,17 +2164,17 @@ public class MasterRpcServices extends RSRpcServices implements public RecommissionRegionServerResponse recommissionRegionServer(RpcController controller, RecommissionRegionServerRequest request) throws ServiceException { try { - master.checkInitialized(); - ServerName server = ProtobufUtil.toServerName(request.getServerName()); + server.checkInitialized(); + ServerName sn = ProtobufUtil.toServerName(request.getServerName()); List encodedRegionNames = request.getRegionList().stream() .map(regionSpecifier -> regionSpecifier.getValue().toByteArray()) .collect(Collectors.toList()); - if (master.cpHost != null) { - master.cpHost.preRecommissionRegionServer(server, encodedRegionNames); + if (server.cpHost != null) { + server.cpHost.preRecommissionRegionServer(sn, encodedRegionNames); } - master.recommissionRegionServer(server, encodedRegionNames); - if (master.cpHost != null) { - master.cpHost.postRecommissionRegionServer(server, encodedRegionNames); + server.recommissionRegionServer(sn, encodedRegionNames); + if (server.cpHost != null) { + server.cpHost.postRecommissionRegionServer(sn, encodedRegionNames); } } catch (IOException io) { throw new ServiceException(io); @@ -2245,10 +2197,10 @@ public class MasterRpcServices extends RSRpcServices implements for (int i = 0; i < request.getRegionInfoCount(); ++i) { regionInfos[i] = ProtobufUtil.toRegionInfo(request.getRegionInfo(i)); } - npr = new NonceProcedureRunnable(master, request.getNonceGroup(), request.getNonce()) { + npr = new NonceProcedureRunnable(server, request.getNonceGroup(), request.getNonce()) { @Override protected void run() throws IOException { - setProcId(master.getLockManager().remoteLocks().requestRegionsLock(regionInfos, + setProcId(server.getLockManager().remoteLocks().requestRegionsLock(regionInfos, request.getDescription(), getNonceKey())); } @@ -2259,10 +2211,10 @@ public class MasterRpcServices extends RSRpcServices implements }; } else if (request.hasTableName()) { final TableName tableName = ProtobufUtil.toTableName(request.getTableName()); - npr = new NonceProcedureRunnable(master, request.getNonceGroup(), request.getNonce()) { + npr = new NonceProcedureRunnable(server, request.getNonceGroup(), request.getNonce()) { @Override protected void run() throws IOException { - setProcId(master.getLockManager().remoteLocks().requestTableLock(tableName, type, + setProcId(server.getLockManager().remoteLocks().requestTableLock(tableName, type, request.getDescription(), getNonceKey())); } @@ -2272,10 +2224,10 @@ public class MasterRpcServices extends RSRpcServices implements } }; } else if (request.hasNamespace()) { - npr = new NonceProcedureRunnable(master, request.getNonceGroup(), request.getNonce()) { + npr = new NonceProcedureRunnable(server, request.getNonceGroup(), request.getNonce()) { @Override protected void run() throws IOException { - setProcId(master.getLockManager().remoteLocks().requestNamespaceLock( + setProcId(server.getLockManager().remoteLocks().requestNamespaceLock( request.getNamespace(), type, request.getDescription(), getNonceKey())); } @@ -2306,10 +2258,10 @@ public class MasterRpcServices extends RSRpcServices implements public LockHeartbeatResponse lockHeartbeat(RpcController controller, LockHeartbeatRequest request) throws ServiceException { try { - if (master.getLockManager().remoteLocks().lockHeartbeat(request.getProcId(), + if (server.getLockManager().remoteLocks().lockHeartbeat(request.getProcId(), request.getKeepAlive())) { return LockHeartbeatResponse.newBuilder().setTimeoutMs( - master.getConfiguration().getInt(LockProcedure.REMOTE_LOCKS_TIMEOUT_MS_CONF, + server.getConfiguration().getInt(LockProcedure.REMOTE_LOCKS_TIMEOUT_MS_CONF, LockProcedure.DEFAULT_REMOTE_LOCKS_TIMEOUT_MS)) .setLockStatus(LockHeartbeatResponse.LockStatus.LOCKED).build(); } else { @@ -2325,11 +2277,11 @@ public class MasterRpcServices extends RSRpcServices implements public RegionSpaceUseReportResponse reportRegionSpaceUse(RpcController controller, RegionSpaceUseReportRequest request) throws ServiceException { try { - master.checkInitialized(); - if (!QuotaUtil.isQuotaEnabled(master.getConfiguration())) { + server.checkInitialized(); + if (!QuotaUtil.isQuotaEnabled(server.getConfiguration())) { return RegionSpaceUseReportResponse.newBuilder().build(); } - MasterQuotaManager quotaManager = this.master.getMasterQuotaManager(); + MasterQuotaManager quotaManager = this.server.getMasterQuotaManager(); if (quotaManager != null) { final long now = EnvironmentEdgeManager.currentTime(); for (RegionSpaceUse report : request.getSpaceUseList()) { @@ -2350,8 +2302,8 @@ public class MasterRpcServices extends RSRpcServices implements public GetSpaceQuotaRegionSizesResponse getSpaceQuotaRegionSizes( RpcController controller, GetSpaceQuotaRegionSizesRequest request) throws ServiceException { try { - master.checkInitialized(); - MasterQuotaManager quotaManager = this.master.getMasterQuotaManager(); + server.checkInitialized(); + MasterQuotaManager quotaManager = this.server.getMasterQuotaManager(); GetSpaceQuotaRegionSizesResponse.Builder builder = GetSpaceQuotaRegionSizesResponse.newBuilder(); if (quotaManager != null) { @@ -2387,8 +2339,8 @@ public class MasterRpcServices extends RSRpcServices implements public GetQuotaStatesResponse getQuotaStates( RpcController controller, GetQuotaStatesRequest request) throws ServiceException { try { - master.checkInitialized(); - QuotaObserverChore quotaChore = this.master.getQuotaObserverChore(); + server.checkInitialized(); + QuotaObserverChore quotaChore = this.server.getQuotaObserverChore(); GetQuotaStatesResponse.Builder builder = GetQuotaStatesResponse.newBuilder(); if (quotaChore != null) { // The "current" view of all tables with quotas @@ -2418,43 +2370,42 @@ public class MasterRpcServices extends RSRpcServices implements @Override public ClearDeadServersResponse clearDeadServers(RpcController controller, ClearDeadServersRequest request) throws ServiceException { - LOG.debug(master.getClientIdAuditPrefix() + " clear dead region servers."); + LOG.debug(server.getClientIdAuditPrefix() + " clear dead region servers."); ClearDeadServersResponse.Builder response = ClearDeadServersResponse.newBuilder(); try { - master.checkInitialized(); - if (master.cpHost != null) { - master.cpHost.preClearDeadServers(); + server.checkInitialized(); + if (server.cpHost != null) { + server.cpHost.preClearDeadServers(); } - if (master.getServerManager().areDeadServersInProgress()) { + if (server.getServerManager().areDeadServersInProgress()) { LOG.debug("Some dead server is still under processing, won't clear the dead server list"); response.addAllServerName(request.getServerNameList()); } else { - DeadServer deadServer = master.getServerManager().getDeadServers(); + DeadServer deadServer = server.getServerManager().getDeadServers(); Set
clearedServers = new HashSet<>(); for (HBaseProtos.ServerName pbServer : request.getServerNameList()) { - ServerName server = ProtobufUtil.toServerName(pbServer); - - final boolean deadInProcess = master.getProcedures().stream().anyMatch( + ServerName serverName = ProtobufUtil.toServerName(pbServer); + final boolean deadInProcess = server.getProcedures().stream().anyMatch( p -> (p instanceof ServerCrashProcedure) - && ((ServerCrashProcedure) p).getServerName().equals(server)); + && ((ServerCrashProcedure) p).getServerName().equals(serverName)); if (deadInProcess) { throw new ServiceException( - String.format("Dead server '%s' is not 'dead' in fact...", server)); + String.format("Dead server '%s' is not 'dead' in fact...", serverName)); } - if (!deadServer.removeDeadServer(server)) { + if (!deadServer.removeDeadServer(serverName)) { response.addServerName(pbServer); } else { - clearedServers.add(server.getAddress()); + clearedServers.add(serverName.getAddress()); } } - master.getRSGroupInfoManager().removeServers(clearedServers); + server.getRSGroupInfoManager().removeServers(clearedServers); LOG.info("Remove decommissioned servers {} from RSGroup done", clearedServers); } - if (master.cpHost != null) { - master.cpHost.postClearDeadServers( + if (server.cpHost != null) { + server.cpHost.postClearDeadServers( ProtobufUtil.toServerNameList(request.getServerNameList()), ProtobufUtil.toServerNameList(response.getServerNameList())); } @@ -2469,15 +2420,15 @@ public class MasterRpcServices extends RSRpcServices implements ReportProcedureDoneRequest request) throws ServiceException { // Check Masters is up and ready for duty before progressing. Remote side will keep trying. try { - this.master.checkServiceStarted(); + this.server.checkServiceStarted(); } catch (ServerNotRunningYetException snrye) { throw new ServiceException(snrye); } request.getResultList().forEach(result -> { if (result.getStatus() == RemoteProcedureResult.Status.SUCCESS) { - master.remoteProcedureCompleted(result.getProcId()); + server.remoteProcedureCompleted(result.getProcId()); } else { - master.remoteProcedureFailed(result.getProcId(), + server.remoteProcedureFailed(result.getProcId(), RemoteProcedureException.fromProto(result.getError())); } }); @@ -2488,12 +2439,12 @@ public class MasterRpcServices extends RSRpcServices implements public FileArchiveNotificationResponse reportFileArchival(RpcController controller, FileArchiveNotificationRequest request) throws ServiceException { try { - master.checkInitialized(); - if (!QuotaUtil.isQuotaEnabled(master.getConfiguration())) { + server.checkInitialized(); + if (!QuotaUtil.isQuotaEnabled(server.getConfiguration())) { return FileArchiveNotificationResponse.newBuilder().build(); } - master.getMasterQuotaManager().processFileArchivals(request, master.getConnection(), - master.getConfiguration(), master.getFileSystem()); + server.getMasterQuotaManager().processFileArchivals(request, server.getConnection(), + server.getConfiguration(), server.getFileSystem()); return FileArchiveNotificationResponse.newBuilder().build(); } catch (Exception e) { throw new ServiceException(e); @@ -2506,8 +2457,8 @@ public class MasterRpcServices extends RSRpcServices implements public RunHbckChoreResponse runHbckChore(RpcController c, RunHbckChoreRequest req) throws ServiceException { rpcPreCheck("runHbckChore"); - LOG.info("{} request HBCK chore to run", master.getClientIdAuditPrefix()); - HbckChore hbckChore = master.getHbckChore(); + LOG.info("{} request HBCK chore to run", server.getClientIdAuditPrefix()); + HbckChore hbckChore = server.getHbckChore(); boolean ran = hbckChore.runChore(); return RunHbckChoreResponse.newBuilder().setRan(ran).build(); } @@ -2524,11 +2475,11 @@ public class MasterRpcServices extends RSRpcServices implements rpcPreCheck("setTableStateInMeta"); TableName tn = ProtobufUtil.toTableName(request.getTableName()); try { - TableState prevState = this.master.getTableStateManager().getTableState(tn); + TableState prevState = this.server.getTableStateManager().getTableState(tn); TableState newState = TableState.convert(tn, request.getTableState()); - LOG.info("{} set table={} state from {} to {}", master.getClientIdAuditPrefix(), + LOG.info("{} set table={} state from {} to {}", server.getClientIdAuditPrefix(), tn, prevState.getState(), newState.getState()); - this.master.getTableStateManager().setTableState(tn, newState.getState()); + this.server.getTableStateManager().setTableState(tn, newState.getState()); return GetTableStateResponse.newBuilder().setTableState(prevState.convert()).build(); } catch (Exception e) { throw new ServiceException(e); @@ -2556,12 +2507,12 @@ public class MasterRpcServices extends RSRpcServices implements // TODO: actually, a full region name can save a lot on meta scan, improve later. encodedName = RegionInfo.encodeRegionName(spec.getValue().toByteArray()); } - RegionInfo info = this.master.getAssignmentManager().loadRegionFromMeta(encodedName); + RegionInfo info = this.server.getAssignmentManager().loadRegionFromMeta(encodedName); LOG.trace("region info loaded from meta table: {}", info); RegionState prevState = - this.master.getAssignmentManager().getRegionStates().getRegionState(info); + this.server.getAssignmentManager().getRegionStates().getRegionState(info); RegionState.State newState = RegionState.State.convert(s.getState()); - LOG.info("{} set region={} state from {} to {}", master.getClientIdAuditPrefix(), info, + LOG.info("{} set region={} state from {} to {}", server.getClientIdAuditPrefix(), info, prevState.getState(), newState); Put metaPut = MetaTableAccessor.makePutFromRegionInfo(info, EnvironmentEdgeManager.currentTime()); @@ -2569,9 +2520,9 @@ public class MasterRpcServices extends RSRpcServices implements Bytes.toBytes(newState.name())); List putList = new ArrayList<>(); putList.add(metaPut); - MetaTableAccessor.putsToMetaTable(this.master.getConnection(), putList); + MetaTableAccessor.putsToMetaTable(this.server.getConnection(), putList); // Loads from meta again to refresh AM cache with the new region state - this.master.getAssignmentManager().loadRegionFromMeta(encodedName); + this.server.getAssignmentManager().loadRegionFromMeta(encodedName); builder.addStates(RegionSpecifierAndState.newBuilder().setRegionSpecifier(spec) .setState(prevState.getState().convert())); } @@ -2590,14 +2541,14 @@ public class MasterRpcServices extends RSRpcServices implements switch(rs.getType()) { case REGION_NAME: final byte[] regionName = rs.getValue().toByteArray(); - ri = this.master.getAssignmentManager().getRegionInfo(regionName); + ri = this.server.getAssignmentManager().getRegionInfo(regionName); break; case ENCODED_REGION_NAME: String encodedRegionName = Bytes.toString(rs.getValue().toByteArray()); - RegionState regionState = this.master.getAssignmentManager().getRegionStates(). + RegionState regionState = this.server.getAssignmentManager().getRegionStates(). getRegionState(encodedRegionName); ri = regionState == null ? - this.master.getAssignmentManager().loadRegionFromMeta(encodedRegionName) : + this.server.getAssignmentManager().loadRegionFromMeta(encodedRegionName) : regionState.getRegion(); break; default: @@ -2610,7 +2561,7 @@ public class MasterRpcServices extends RSRpcServices implements * @throws ServiceException If no MasterProcedureExecutor */ private void checkMasterProcedureExecutor() throws ServiceException { - if (this.master.getMasterProcedureExecutor() == null) { + if (this.server.getMasterProcedureExecutor() == null) { throw new ServiceException("Master's ProcedureExecutor not initialized; retry later"); } } @@ -2628,16 +2579,16 @@ public class MasterRpcServices extends RSRpcServices implements MasterProtos.AssignsResponse.newBuilder(); try { boolean override = request.getOverride(); - LOG.info("{} assigns, override={}", master.getClientIdAuditPrefix(), override); + LOG.info("{} assigns, override={}", server.getClientIdAuditPrefix(), override); for (HBaseProtos.RegionSpecifier rs: request.getRegionList()) { long pid = Procedure.NO_PROC_ID; RegionInfo ri = getRegionInfo(rs); if (ri == null) { LOG.info("Unknown={}", rs); } else { - Procedure p = this.master.getAssignmentManager().createOneAssignProcedure(ri, override); + Procedure p = this.server.getAssignmentManager().createOneAssignProcedure(ri, override); if (p != null) { - pid = this.master.getMasterProcedureExecutor().submitProcedure(p); + pid = this.server.getMasterProcedureExecutor().submitProcedure(p); } } responseBuilder.addPid(pid); @@ -2661,16 +2612,16 @@ public class MasterRpcServices extends RSRpcServices implements MasterProtos.UnassignsResponse.newBuilder(); try { boolean override = request.getOverride(); - LOG.info("{} unassigns, override={}", master.getClientIdAuditPrefix(), override); + LOG.info("{} unassigns, override={}", server.getClientIdAuditPrefix(), override); for (HBaseProtos.RegionSpecifier rs: request.getRegionList()) { long pid = Procedure.NO_PROC_ID; RegionInfo ri = getRegionInfo(rs); if (ri == null) { LOG.info("Unknown={}", rs); } else { - Procedure p = this.master.getAssignmentManager().createOneUnassignProcedure(ri, override); + Procedure p = this.server.getAssignmentManager().createOneUnassignProcedure(ri, override); if (p != null) { - pid = this.master.getMasterProcedureExecutor().submitProcedure(p); + pid = this.server.getMasterProcedureExecutor().submitProcedure(p); } } responseBuilder.addPid(pid); @@ -2697,10 +2648,10 @@ public class MasterRpcServices extends RSRpcServices implements MasterProtos.BypassProcedureRequest request) throws ServiceException { try { LOG.info("{} bypass procedures={}, waitTime={}, override={}, recursive={}", - master.getClientIdAuditPrefix(), request.getProcIdList(), request.getWaitTime(), + server.getClientIdAuditPrefix(), request.getProcIdList(), request.getWaitTime(), request.getOverride(), request.getRecursive()); List ret = - master.getMasterProcedureExecutor().bypassProcedure(request.getProcIdList(), + server.getMasterProcedureExecutor().bypassProcedure(request.getProcIdList(), request.getWaitTime(), request.getOverride(), request.getRecursive()); return MasterProtos.BypassProcedureResponse.newBuilder().addAllBypassed(ret).build(); } catch (IOException e) { @@ -2716,9 +2667,9 @@ public class MasterRpcServices extends RSRpcServices implements for (HBaseProtos.ServerName sn: request.getServerNameList()) { ServerName serverName = ProtobufUtil.toServerName(sn); LOG.info("{} schedule ServerCrashProcedure for {}", - this.master.getClientIdAuditPrefix(), serverName); + this.server.getClientIdAuditPrefix(), serverName); if (shouldSubmitSCP(serverName)) { - pids.add(this.master.getServerManager().expireServer(serverName, true)); + pids.add(this.server.getServerManager().expireServer(serverName, true)); } else { pids.add(Procedure.NO_PROC_ID); } @@ -2730,20 +2681,19 @@ public class MasterRpcServices extends RSRpcServices implements public MasterProtos.ScheduleSCPsForUnknownServersResponse scheduleSCPsForUnknownServers( RpcController controller, MasterProtos.ScheduleSCPsForUnknownServersRequest request) throws ServiceException { - List pids = new ArrayList<>(); final Set serverNames = - master.getAssignmentManager().getRegionStates().getRegionStates().stream() + server.getAssignmentManager().getRegionStates().getRegionStates().stream() .map(RegionState::getServerName).collect(Collectors.toSet()); final Set unknownServerNames = serverNames.stream() - .filter(sn -> master.getServerManager().isServerUnknown(sn)).collect(Collectors.toSet()); + .filter(sn -> server.getServerManager().isServerUnknown(sn)).collect(Collectors.toSet()); for (ServerName sn: unknownServerNames) { LOG.info("{} schedule ServerCrashProcedure for unknown {}", - this.master.getClientIdAuditPrefix(), sn); + this.server.getClientIdAuditPrefix(), sn); if (shouldSubmitSCP(sn)) { - pids.add(this.master.getServerManager().expireServer(sn, true)); + pids.add(this.server.getServerManager().expireServer(sn, true)); } else { pids.add(Procedure.NO_PROC_ID); } @@ -2756,7 +2706,7 @@ public class MasterRpcServices extends RSRpcServices implements throws ServiceException { rpcPreCheck("fixMeta"); try { - MetaFixer mf = new MetaFixer(this.master); + MetaFixer mf = new MetaFixer(this.server); mf.fix(); return FixMetaResponse.newBuilder().build(); } catch (IOException ioe) { @@ -2768,8 +2718,8 @@ public class MasterRpcServices extends RSRpcServices implements public SwitchRpcThrottleResponse switchRpcThrottle(RpcController controller, SwitchRpcThrottleRequest request) throws ServiceException { try { - master.checkInitialized(); - return master.getMasterQuotaManager().switchRpcThrottle(request); + server.checkInitialized(); + return server.getMasterQuotaManager().switchRpcThrottle(request); } catch (Exception e) { throw new ServiceException(e); } @@ -2779,8 +2729,8 @@ public class MasterRpcServices extends RSRpcServices implements public MasterProtos.IsRpcThrottleEnabledResponse isRpcThrottleEnabled(RpcController controller, MasterProtos.IsRpcThrottleEnabledRequest request) throws ServiceException { try { - master.checkInitialized(); - return master.getMasterQuotaManager().isRpcThrottleEnabled(request); + server.checkInitialized(); + return server.getMasterQuotaManager().isRpcThrottleEnabled(request); } catch (Exception e) { throw new ServiceException(e); } @@ -2790,8 +2740,8 @@ public class MasterRpcServices extends RSRpcServices implements public SwitchExceedThrottleQuotaResponse switchExceedThrottleQuota(RpcController controller, SwitchExceedThrottleQuotaRequest request) throws ServiceException { try { - master.checkInitialized(); - return master.getMasterQuotaManager().switchExceedThrottleQuota(request); + server.checkInitialized(); + return server.getMasterQuotaManager().switchExceedThrottleQuota(request); } catch (Exception e) { throw new ServiceException(e); } @@ -2801,17 +2751,17 @@ public class MasterRpcServices extends RSRpcServices implements public GrantResponse grant(RpcController controller, GrantRequest request) throws ServiceException { try { - master.checkInitialized(); - if (master.cpHost != null && hasAccessControlServiceCoprocessor(master.cpHost)) { + server.checkInitialized(); + if (server.cpHost != null && hasAccessControlServiceCoprocessor(server.cpHost)) { final UserPermission perm = ShadedAccessControlUtil.toUserPermission(request.getUserPermission()); boolean mergeExistingPermissions = request.getMergeExistingPermissions(); - master.cpHost.preGrant(perm, mergeExistingPermissions); - try (Table table = master.getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) { + server.cpHost.preGrant(perm, mergeExistingPermissions); + try (Table table = server.getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) { PermissionStorage.addUserPermission(getConfiguration(), perm, table, mergeExistingPermissions); } - master.cpHost.postGrant(perm, mergeExistingPermissions); + server.cpHost.postGrant(perm, mergeExistingPermissions); User caller = RpcServer.getRequestUser().orElse(null); if (AUDITLOG.isTraceEnabled()) { // audit log should store permission changes in addition to auth results @@ -2833,15 +2783,15 @@ public class MasterRpcServices extends RSRpcServices implements public RevokeResponse revoke(RpcController controller, RevokeRequest request) throws ServiceException { try { - master.checkInitialized(); - if (master.cpHost != null && hasAccessControlServiceCoprocessor(master.cpHost)) { + server.checkInitialized(); + if (server.cpHost != null && hasAccessControlServiceCoprocessor(server.cpHost)) { final UserPermission userPermission = ShadedAccessControlUtil.toUserPermission(request.getUserPermission()); - master.cpHost.preRevoke(userPermission); - try (Table table = master.getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) { - PermissionStorage.removeUserPermission(master.getConfiguration(), userPermission, table); + server.cpHost.preRevoke(userPermission); + try (Table table = server.getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) { + PermissionStorage.removeUserPermission(server.getConfiguration(), userPermission, table); } - master.cpHost.postRevoke(userPermission); + server.cpHost.postRevoke(userPermission); User caller = RpcServer.getRequestUser().orElse(null); if (AUDITLOG.isTraceEnabled()) { // audit log should record all permission changes @@ -2863,8 +2813,8 @@ public class MasterRpcServices extends RSRpcServices implements public GetUserPermissionsResponse getUserPermissions(RpcController controller, GetUserPermissionsRequest request) throws ServiceException { try { - master.checkInitialized(); - if (master.cpHost != null && hasAccessControlServiceCoprocessor(master.cpHost)) { + server.checkInitialized(); + if (server.cpHost != null && hasAccessControlServiceCoprocessor(server.cpHost)) { final String userName = request.hasUserName() ? request.getUserName().toStringUtf8() : null; String namespace = request.hasNamespaceName() ? request.getNamespaceName().toStringUtf8() : null; @@ -2874,18 +2824,18 @@ public class MasterRpcServices extends RSRpcServices implements byte[] cq = request.hasColumnQualifier() ? request.getColumnQualifier().toByteArray() : null; Type permissionType = request.hasType() ? request.getType() : null; - master.getMasterCoprocessorHost().preGetUserPermissions(userName, namespace, table, cf, cq); + server.getMasterCoprocessorHost().preGetUserPermissions(userName, namespace, table, cf, cq); List perms = null; if (permissionType == Type.Table) { boolean filter = (cf != null || userName != null) ? true : false; - perms = PermissionStorage.getUserTablePermissions(master.getConfiguration(), table, cf, + perms = PermissionStorage.getUserTablePermissions(server.getConfiguration(), table, cf, cq, userName, filter); } else if (permissionType == Type.Namespace) { - perms = PermissionStorage.getUserNamespacePermissions(master.getConfiguration(), + perms = PermissionStorage.getUserNamespacePermissions(server.getConfiguration(), namespace, userName, userName != null ? true : false); } else { - perms = PermissionStorage.getUserPermissions(master.getConfiguration(), null, null, null, + perms = PermissionStorage.getUserPermissions(server.getConfiguration(), null, null, null, userName, userName != null ? true : false); // Skip super users when filter user is specified if (userName == null) { @@ -2899,7 +2849,7 @@ public class MasterRpcServices extends RSRpcServices implements } } - master.getMasterCoprocessorHost().postGetUserPermissions(userName, namespace, table, cf, + server.getMasterCoprocessorHost().postGetUserPermissions(userName, namespace, table, cf, cq); AccessControlProtos.GetUserPermissionsResponse response = ShadedAccessControlUtil.buildGetUserPermissionsResponse(perms); @@ -2917,8 +2867,8 @@ public class MasterRpcServices extends RSRpcServices implements public HasUserPermissionsResponse hasUserPermissions(RpcController controller, HasUserPermissionsRequest request) throws ServiceException { try { - master.checkInitialized(); - if (master.cpHost != null && hasAccessControlServiceCoprocessor(master.cpHost)) { + server.checkInitialized(); + if (server.cpHost != null && hasAccessControlServiceCoprocessor(server.cpHost)) { User caller = RpcServer.getRequestUser().orElse(null); String userName = request.hasUserName() ? request.getUserName().toStringUtf8() : caller.getShortName(); @@ -2926,7 +2876,7 @@ public class MasterRpcServices extends RSRpcServices implements for (int i = 0; i < request.getPermissionCount(); i++) { permissions.add(ShadedAccessControlUtil.toPermission(request.getPermission(i))); } - master.getMasterCoprocessorHost().preHasUserPermissions(userName, permissions); + server.getMasterCoprocessorHost().preHasUserPermissions(userName, permissions); if (!caller.getShortName().equals(userName)) { List groups = AccessChecker.getUserGroups(userName); caller = new InputUser(userName, groups.toArray(new String[groups.size()])); @@ -2943,7 +2893,7 @@ public class MasterRpcServices extends RSRpcServices implements hasUserPermissions.add(true); } } - master.getMasterCoprocessorHost().postHasUserPermissions(userName, permissions); + server.getMasterCoprocessorHost().postHasUserPermissions(userName, permissions); HasUserPermissionsResponse.Builder builder = HasUserPermissionsResponse.newBuilder().addAllHasUserPermission(hasUserPermissions); return builder.build(); @@ -2956,25 +2906,10 @@ public class MasterRpcServices extends RSRpcServices implements } } - private boolean containMetaWals(ServerName serverName) throws IOException { - Path logDir = new Path(master.getWALRootDir(), - AbstractFSWALProvider.getWALDirectoryName(serverName.toString())); - Path splitDir = logDir.suffix(AbstractFSWALProvider.SPLITTING_EXT); - Path checkDir = master.getFileSystem().exists(splitDir) ? splitDir : logDir; - try { - return master.getFileSystem().listStatus(checkDir, META_FILTER).length > 0; - } catch (FileNotFoundException fnfe) { - // If no files, then we don't contain metas; was failing schedule of - // SCP because this was FNFE'ing when no server dirs ('Unknown Server'). - LOG.warn("No dir for WALs for {}; continuing", serverName.toString()); - return false; - } - } - private boolean shouldSubmitSCP(ServerName serverName) { // check if there is already a SCP of this server running List> procedures = - master.getMasterProcedureExecutor().getProcedures(); + server.getMasterProcedureExecutor().getProcedures(); for (Procedure procedure : procedures) { if (procedure instanceof ServerCrashProcedure) { if (serverName.compareTo(((ServerCrashProcedure) procedure).getServerName()) == 0 @@ -2993,12 +2928,12 @@ public class MasterRpcServices extends RSRpcServices implements GetRSGroupInfoRequest request) throws ServiceException { String groupName = request.getRSGroupName(); LOG.info( - master.getClientIdAuditPrefix() + " initiates rsgroup info retrieval, group=" + groupName); + server.getClientIdAuditPrefix() + " initiates rsgroup info retrieval, group=" + groupName); try { - if (master.getMasterCoprocessorHost() != null) { - master.getMasterCoprocessorHost().preGetRSGroupInfo(groupName); + if (server.getMasterCoprocessorHost() != null) { + server.getMasterCoprocessorHost().preGetRSGroupInfo(groupName); } - RSGroupInfo rsGroupInfo = master.getRSGroupInfoManager().getRSGroup(groupName); + RSGroupInfo rsGroupInfo = server.getRSGroupInfoManager().getRSGroup(groupName); GetRSGroupInfoResponse resp; if (rsGroupInfo != null) { resp = GetRSGroupInfoResponse.newBuilder() @@ -3006,8 +2941,8 @@ public class MasterRpcServices extends RSRpcServices implements } else { resp = GetRSGroupInfoResponse.getDefaultInstance(); } - if (master.getMasterCoprocessorHost() != null) { - master.getMasterCoprocessorHost().postGetRSGroupInfo(groupName); + if (server.getMasterCoprocessorHost() != null) { + server.getMasterCoprocessorHost().postGetRSGroupInfo(groupName); } return resp; } catch (IOException e) { @@ -3020,24 +2955,24 @@ public class MasterRpcServices extends RSRpcServices implements GetRSGroupInfoOfTableRequest request) throws ServiceException { TableName tableName = ProtobufUtil.toTableName(request.getTableName()); LOG.info( - master.getClientIdAuditPrefix() + " initiates rsgroup info retrieval, table=" + tableName); + server.getClientIdAuditPrefix() + " initiates rsgroup info retrieval, table=" + tableName); try { - if (master.getMasterCoprocessorHost() != null) { - master.getMasterCoprocessorHost().preGetRSGroupInfoOfTable(tableName); + if (server.getMasterCoprocessorHost() != null) { + server.getMasterCoprocessorHost().preGetRSGroupInfoOfTable(tableName); } GetRSGroupInfoOfTableResponse resp; - TableDescriptor td = master.getTableDescriptors().get(tableName); + TableDescriptor td = server.getTableDescriptors().get(tableName); if (td == null) { resp = GetRSGroupInfoOfTableResponse.getDefaultInstance(); } else { RSGroupInfo rsGroupInfo = - RSGroupUtil.getRSGroupInfo(master, master.getRSGroupInfoManager(), tableName) - .orElse(master.getRSGroupInfoManager().getRSGroup(RSGroupInfo.DEFAULT_GROUP)); + RSGroupUtil.getRSGroupInfo(server, server.getRSGroupInfoManager(), tableName) + .orElse(server.getRSGroupInfoManager().getRSGroup(RSGroupInfo.DEFAULT_GROUP)); resp = GetRSGroupInfoOfTableResponse.newBuilder() .setRSGroupInfo(ProtobufUtil.toProtoGroupInfo(rsGroupInfo)).build(); } - if (master.getMasterCoprocessorHost() != null) { - master.getMasterCoprocessorHost().postGetRSGroupInfoOfTable(tableName); + if (server.getMasterCoprocessorHost() != null) { + server.getMasterCoprocessorHost().postGetRSGroupInfoOfTable(tableName); } return resp; } catch (IOException e) { @@ -3050,12 +2985,12 @@ public class MasterRpcServices extends RSRpcServices implements GetRSGroupInfoOfServerRequest request) throws ServiceException { Address hp = Address.fromParts(request.getServer().getHostName(), request.getServer().getPort()); - LOG.info(master.getClientIdAuditPrefix() + " initiates rsgroup info retrieval, server=" + hp); + LOG.info(server.getClientIdAuditPrefix() + " initiates rsgroup info retrieval, server=" + hp); try { - if (master.getMasterCoprocessorHost() != null) { - master.getMasterCoprocessorHost().preGetRSGroupInfoOfServer(hp); + if (server.getMasterCoprocessorHost() != null) { + server.getMasterCoprocessorHost().preGetRSGroupInfoOfServer(hp); } - RSGroupInfo rsGroupInfo = master.getRSGroupInfoManager().getRSGroupOfServer(hp); + RSGroupInfo rsGroupInfo = server.getRSGroupInfoManager().getRSGroupOfServer(hp); GetRSGroupInfoOfServerResponse resp; if (rsGroupInfo != null) { resp = GetRSGroupInfoOfServerResponse.newBuilder() @@ -3063,8 +2998,8 @@ public class MasterRpcServices extends RSRpcServices implements } else { resp = GetRSGroupInfoOfServerResponse.getDefaultInstance(); } - if (master.getMasterCoprocessorHost() != null) { - master.getMasterCoprocessorHost().postGetRSGroupInfoOfServer(hp); + if (server.getMasterCoprocessorHost() != null) { + server.getMasterCoprocessorHost().postGetRSGroupInfoOfServer(hp); } return resp; } catch (IOException e) { @@ -3080,15 +3015,15 @@ public class MasterRpcServices extends RSRpcServices implements for (HBaseProtos.ServerName el : request.getServersList()) { hostPorts.add(Address.fromParts(el.getHostName(), el.getPort())); } - LOG.info(master.getClientIdAuditPrefix() + " move servers " + hostPorts + " to rsgroup " + + LOG.info(server.getClientIdAuditPrefix() + " move servers " + hostPorts + " to rsgroup " + request.getTargetGroup()); try { - if (master.getMasterCoprocessorHost() != null) { - master.getMasterCoprocessorHost().preMoveServers(hostPorts, request.getTargetGroup()); + if (server.getMasterCoprocessorHost() != null) { + server.getMasterCoprocessorHost().preMoveServers(hostPorts, request.getTargetGroup()); } - master.getRSGroupInfoManager().moveServers(hostPorts, request.getTargetGroup()); - if (master.getMasterCoprocessorHost() != null) { - master.getMasterCoprocessorHost().postMoveServers(hostPorts, request.getTargetGroup()); + server.getRSGroupInfoManager().moveServers(hostPorts, request.getTargetGroup()); + if (server.getMasterCoprocessorHost() != null) { + server.getMasterCoprocessorHost().postMoveServers(hostPorts, request.getTargetGroup()); } } catch (IOException e) { throw new ServiceException(e); @@ -3100,14 +3035,14 @@ public class MasterRpcServices extends RSRpcServices implements public AddRSGroupResponse addRSGroup(RpcController controller, AddRSGroupRequest request) throws ServiceException { AddRSGroupResponse.Builder builder = AddRSGroupResponse.newBuilder(); - LOG.info(master.getClientIdAuditPrefix() + " add rsgroup " + request.getRSGroupName()); + LOG.info(server.getClientIdAuditPrefix() + " add rsgroup " + request.getRSGroupName()); try { - if (master.getMasterCoprocessorHost() != null) { - master.getMasterCoprocessorHost().preAddRSGroup(request.getRSGroupName()); + if (server.getMasterCoprocessorHost() != null) { + server.getMasterCoprocessorHost().preAddRSGroup(request.getRSGroupName()); } - master.getRSGroupInfoManager().addRSGroup(new RSGroupInfo(request.getRSGroupName())); - if (master.getMasterCoprocessorHost() != null) { - master.getMasterCoprocessorHost().postAddRSGroup(request.getRSGroupName()); + server.getRSGroupInfoManager().addRSGroup(new RSGroupInfo(request.getRSGroupName())); + if (server.getMasterCoprocessorHost() != null) { + server.getMasterCoprocessorHost().postAddRSGroup(request.getRSGroupName()); } } catch (IOException e) { throw new ServiceException(e); @@ -3119,14 +3054,14 @@ public class MasterRpcServices extends RSRpcServices implements public RemoveRSGroupResponse removeRSGroup(RpcController controller, RemoveRSGroupRequest request) throws ServiceException { RemoveRSGroupResponse.Builder builder = RemoveRSGroupResponse.newBuilder(); - LOG.info(master.getClientIdAuditPrefix() + " remove rsgroup " + request.getRSGroupName()); + LOG.info(server.getClientIdAuditPrefix() + " remove rsgroup " + request.getRSGroupName()); try { - if (master.getMasterCoprocessorHost() != null) { - master.getMasterCoprocessorHost().preRemoveRSGroup(request.getRSGroupName()); + if (server.getMasterCoprocessorHost() != null) { + server.getMasterCoprocessorHost().preRemoveRSGroup(request.getRSGroupName()); } - master.getRSGroupInfoManager().removeRSGroup(request.getRSGroupName()); - if (master.getMasterCoprocessorHost() != null) { - master.getMasterCoprocessorHost().postRemoveRSGroup(request.getRSGroupName()); + server.getRSGroupInfoManager().removeRSGroup(request.getRSGroupName()); + if (server.getMasterCoprocessorHost() != null) { + server.getMasterCoprocessorHost().postRemoveRSGroup(request.getRSGroupName()); } } catch (IOException e) { throw new ServiceException(e); @@ -3143,17 +3078,17 @@ public class MasterRpcServices extends RSRpcServices implements .setBalanceRan(false); LOG.info( - master.getClientIdAuditPrefix() + " balance rsgroup, group=" + request.getRSGroupName()); + server.getClientIdAuditPrefix() + " balance rsgroup, group=" + request.getRSGroupName()); try { - if (master.getMasterCoprocessorHost() != null) { - master.getMasterCoprocessorHost() + if (server.getMasterCoprocessorHost() != null) { + server.getMasterCoprocessorHost() .preBalanceRSGroup(request.getRSGroupName(), balanceRequest); } BalanceResponse response = - master.getRSGroupInfoManager().balanceRSGroup(request.getRSGroupName(), balanceRequest); + server.getRSGroupInfoManager().balanceRSGroup(request.getRSGroupName(), balanceRequest); ProtobufUtil.populateBalanceRSGroupResponse(builder, response); - if (master.getMasterCoprocessorHost() != null) { - master.getMasterCoprocessorHost() + if (server.getMasterCoprocessorHost() != null) { + server.getMasterCoprocessorHost() .postBalanceRSGroup(request.getRSGroupName(), balanceRequest, response); } } catch (IOException e) { @@ -3166,19 +3101,19 @@ public class MasterRpcServices extends RSRpcServices implements public ListRSGroupInfosResponse listRSGroupInfos(RpcController controller, ListRSGroupInfosRequest request) throws ServiceException { ListRSGroupInfosResponse.Builder builder = ListRSGroupInfosResponse.newBuilder(); - LOG.info(master.getClientIdAuditPrefix() + " list rsgroup"); + LOG.info(server.getClientIdAuditPrefix() + " list rsgroup"); try { - if (master.getMasterCoprocessorHost() != null) { - master.getMasterCoprocessorHost().preListRSGroups(); + if (server.getMasterCoprocessorHost() != null) { + server.getMasterCoprocessorHost().preListRSGroups(); } - List rsGroupInfos = master.getRSGroupInfoManager().listRSGroups().stream() + List rsGroupInfos = server.getRSGroupInfoManager().listRSGroups().stream() .map(RSGroupInfo::new).collect(Collectors.toList()); Map name2Info = new HashMap<>(); List needToFill = - new ArrayList<>(master.getTableDescriptors().getAll().values()); + new ArrayList<>(server.getTableDescriptors().getAll().values()); for (RSGroupInfo rsGroupInfo : rsGroupInfos) { name2Info.put(rsGroupInfo.getName(), rsGroupInfo); - for (TableDescriptor td : master.getTableDescriptors().getAll().values()) { + for (TableDescriptor td : server.getTableDescriptors().getAll().values()) { if (rsGroupInfo.containsTable(td.getTableName())){ needToFill.remove(td); } @@ -3195,8 +3130,8 @@ public class MasterRpcServices extends RSRpcServices implements // TODO: this can be done at once outside this loop, do not need to scan all every time. builder.addRSGroupInfo(ProtobufUtil.toProtoGroupInfo(rsGroupInfo)); } - if (master.getMasterCoprocessorHost() != null) { - master.getMasterCoprocessorHost().postListRSGroups(); + if (server.getMasterCoprocessorHost() != null) { + server.getMasterCoprocessorHost().postListRSGroups(); } } catch (IOException e) { throw new ServiceException(e); @@ -3212,15 +3147,15 @@ public class MasterRpcServices extends RSRpcServices implements for (HBaseProtos.ServerName el : request.getServersList()) { servers.add(Address.fromParts(el.getHostName(), el.getPort())); } - LOG.info(master.getClientIdAuditPrefix() + " remove decommissioned servers from rsgroup: " + + LOG.info(server.getClientIdAuditPrefix() + " remove decommissioned servers from rsgroup: " + servers); try { - if (master.getMasterCoprocessorHost() != null) { - master.getMasterCoprocessorHost().preRemoveServers(servers); + if (server.getMasterCoprocessorHost() != null) { + server.getMasterCoprocessorHost().preRemoveServers(servers); } - master.getRSGroupInfoManager().removeServers(servers); - if (master.getMasterCoprocessorHost() != null) { - master.getMasterCoprocessorHost().postRemoveServers(servers); + server.getRSGroupInfoManager().removeServers(servers); + if (server.getMasterCoprocessorHost() != null) { + server.getMasterCoprocessorHost().postRemoveServers(servers); } } catch (IOException e) { throw new ServiceException(e); @@ -3233,15 +3168,15 @@ public class MasterRpcServices extends RSRpcServices implements ListTablesInRSGroupRequest request) throws ServiceException { ListTablesInRSGroupResponse.Builder builder = ListTablesInRSGroupResponse.newBuilder(); String groupName = request.getGroupName(); - LOG.info(master.getClientIdAuditPrefix() + " list tables in rsgroup " + groupName); + LOG.info(server.getClientIdAuditPrefix() + " list tables in rsgroup " + groupName); try { - if (master.getMasterCoprocessorHost() != null) { - master.getMasterCoprocessorHost().preListTablesInRSGroup(groupName); + if (server.getMasterCoprocessorHost() != null) { + server.getMasterCoprocessorHost().preListTablesInRSGroup(groupName); } - RSGroupUtil.listTablesInRSGroup(master, groupName).stream() + RSGroupUtil.listTablesInRSGroup(server, groupName).stream() .map(ProtobufUtil::toProtoTableName).forEach(builder::addTableName); - if (master.getMasterCoprocessorHost() != null) { - master.getMasterCoprocessorHost().postListTablesInRSGroup(groupName); + if (server.getMasterCoprocessorHost() != null) { + server.getMasterCoprocessorHost().postListTablesInRSGroup(groupName); } } catch (IOException e) { throw new ServiceException(e); @@ -3256,24 +3191,24 @@ public class MasterRpcServices extends RSRpcServices implements GetConfiguredNamespacesAndTablesInRSGroupResponse.Builder builder = GetConfiguredNamespacesAndTablesInRSGroupResponse.newBuilder(); String groupName = request.getGroupName(); - LOG.info(master.getClientIdAuditPrefix() + " get configured namespaces and tables in rsgroup " + + LOG.info(server.getClientIdAuditPrefix() + " get configured namespaces and tables in rsgroup " + groupName); try { - if (master.getMasterCoprocessorHost() != null) { - master.getMasterCoprocessorHost().preGetConfiguredNamespacesAndTablesInRSGroup(groupName); + if (server.getMasterCoprocessorHost() != null) { + server.getMasterCoprocessorHost().preGetConfiguredNamespacesAndTablesInRSGroup(groupName); } - for (NamespaceDescriptor nd : master.getClusterSchema().getNamespaces()) { + for (NamespaceDescriptor nd : server.getClusterSchema().getNamespaces()) { if (groupName.equals(nd.getConfigurationValue(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP))) { builder.addNamespace(nd.getName()); } } - for (TableDescriptor td : master.getTableDescriptors().getAll().values()) { + for (TableDescriptor td : server.getTableDescriptors().getAll().values()) { if (td.getRegionServerGroup().map(g -> g.equals(groupName)).orElse(false)) { builder.addTableName(ProtobufUtil.toProtoTableName(td.getTableName())); } } - if (master.getMasterCoprocessorHost() != null) { - master.getMasterCoprocessorHost().postGetConfiguredNamespacesAndTablesInRSGroup(groupName); + if (server.getMasterCoprocessorHost() != null) { + server.getMasterCoprocessorHost().postGetConfiguredNamespacesAndTablesInRSGroup(groupName); } } catch (IOException e) { throw new ServiceException(e); @@ -3288,14 +3223,14 @@ public class MasterRpcServices extends RSRpcServices implements String oldRSGroup = request.getOldRsgroupName(); String newRSGroup = request.getNewRsgroupName(); LOG.info("{} rename rsgroup from {} to {} ", - master.getClientIdAuditPrefix(), oldRSGroup, newRSGroup); + server.getClientIdAuditPrefix(), oldRSGroup, newRSGroup); try { - if (master.getMasterCoprocessorHost() != null) { - master.getMasterCoprocessorHost().preRenameRSGroup(oldRSGroup, newRSGroup); + if (server.getMasterCoprocessorHost() != null) { + server.getMasterCoprocessorHost().preRenameRSGroup(oldRSGroup, newRSGroup); } - master.getRSGroupInfoManager().renameRSGroup(oldRSGroup, newRSGroup); - if (master.getMasterCoprocessorHost() != null) { - master.getMasterCoprocessorHost().postRenameRSGroup(oldRSGroup, newRSGroup); + server.getRSGroupInfoManager().renameRSGroup(oldRSGroup, newRSGroup); + if (server.getMasterCoprocessorHost() != null) { + server.getMasterCoprocessorHost().postRenameRSGroup(oldRSGroup, newRSGroup); } } catch (IOException e) { throw new ServiceException(e); @@ -3311,15 +3246,15 @@ public class MasterRpcServices extends RSRpcServices implements String groupName = request.getGroupName(); Map configuration = new HashMap<>(); request.getConfigurationList().forEach(p -> configuration.put(p.getName(), p.getValue())); - LOG.info("{} update rsgroup {} configuration {}", master.getClientIdAuditPrefix(), groupName, + LOG.info("{} update rsgroup {} configuration {}", server.getClientIdAuditPrefix(), groupName, configuration); try { - if (master.getMasterCoprocessorHost() != null) { - master.getMasterCoprocessorHost().preUpdateRSGroupConfig(groupName, configuration); + if (server.getMasterCoprocessorHost() != null) { + server.getMasterCoprocessorHost().preUpdateRSGroupConfig(groupName, configuration); } - master.getRSGroupInfoManager().updateRSGroupConfig(groupName, configuration); - if (master.getMasterCoprocessorHost() != null) { - master.getMasterCoprocessorHost().postUpdateRSGroupConfig(groupName, configuration); + server.getRSGroupInfoManager().updateRSGroupConfig(groupName, configuration); + if (server.getMasterCoprocessorHost() != null) { + server.getMasterCoprocessorHost().postUpdateRSGroupConfig(groupName, configuration); } } catch (IOException e) { throw new ServiceException(e); @@ -3364,9 +3299,9 @@ public class MasterRpcServices extends RSRpcServices implements throw new ServiceException("Invalid request params"); } - private MasterProtos.BalancerDecisionsResponse getBalancerDecisions( - MasterProtos.BalancerDecisionsRequest request) { - final NamedQueueRecorder namedQueueRecorder = this.regionServer.getNamedQueueRecorder(); + private MasterProtos.BalancerDecisionsResponse + getBalancerDecisions(MasterProtos.BalancerDecisionsRequest request) { + final NamedQueueRecorder namedQueueRecorder = this.server.getNamedQueueRecorder(); if (namedQueueRecorder == null) { return MasterProtos.BalancerDecisionsResponse.newBuilder() .addAllBalancerDecision(Collections.emptyList()).build(); @@ -3385,7 +3320,7 @@ public class MasterRpcServices extends RSRpcServices implements private MasterProtos.BalancerRejectionsResponse getBalancerRejections( MasterProtos.BalancerRejectionsRequest request) { - final NamedQueueRecorder namedQueueRecorder = this.regionServer.getNamedQueueRecorder(); + final NamedQueueRecorder namedQueueRecorder = this.server.getNamedQueueRecorder(); if (namedQueueRecorder == null) { return MasterProtos.BalancerRejectionsResponse.newBuilder() .addAllBalancerRejection(Collections.emptyList()).build(); @@ -3402,4 +3337,149 @@ public class MasterRpcServices extends RSRpcServices implements .addAllBalancerRejection(balancerRejections).build(); } + @Override + @QosPriority(priority=HConstants.ADMIN_QOS) + public GetRegionInfoResponse getRegionInfo(final RpcController controller, + final GetRegionInfoRequest request) throws ServiceException { + RegionInfo ri = null; + try { + ri = getRegionInfo(request.getRegion()); + } catch(UnknownRegionException ure) { + throw new ServiceException(ure); + } + GetRegionInfoResponse.Builder builder = GetRegionInfoResponse.newBuilder(); + if (ri != null) { + builder.setRegionInfo(ProtobufUtil.toRegionInfo(ri)); + } else { + // Is it a MOB name? These work differently. + byte [] regionName = request.getRegion().getValue().toByteArray(); + TableName tableName = RegionInfo.getTable(regionName); + if (MobUtils.isMobRegionName(tableName, regionName)) { + // a dummy region info contains the compaction state. + RegionInfo mobRegionInfo = MobUtils.getMobRegionInfo(tableName); + builder.setRegionInfo(ProtobufUtil.toRegionInfo(mobRegionInfo)); + if (request.hasCompactionState() && request.getCompactionState()) { + builder.setCompactionState(server.getMobCompactionState(tableName)); + } + } else { + // If unknown RegionInfo and not a MOB region, it is unknown. + throw new ServiceException(new UnknownRegionException(Bytes.toString(regionName))); + } + } + return builder.build(); + } + + @Override + public GetStoreFileResponse getStoreFile(RpcController controller, GetStoreFileRequest request) + throws ServiceException { + throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); + } + + @Override + public GetOnlineRegionResponse getOnlineRegion(RpcController controller, + GetOnlineRegionRequest request) throws ServiceException { + throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); + } + + @Override + public OpenRegionResponse openRegion(RpcController controller, OpenRegionRequest request) + throws ServiceException { + throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); + } + + @Override + public WarmupRegionResponse warmupRegion(RpcController controller, WarmupRegionRequest request) + throws ServiceException { + throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); + } + + @Override + public CloseRegionResponse closeRegion(RpcController controller, CloseRegionRequest request) + throws ServiceException { + throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); + } + + @Override + public FlushRegionResponse flushRegion(RpcController controller, FlushRegionRequest request) + throws ServiceException { + throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); + } + + @Override + public CompactionSwitchResponse compactionSwitch(RpcController controller, + CompactionSwitchRequest request) throws ServiceException { + throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); + } + + @Override + public CompactRegionResponse compactRegion(RpcController controller, CompactRegionRequest request) + throws ServiceException { + throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); + } + + @Override + public ReplicateWALEntryResponse replicateWALEntry(RpcController controller, + ReplicateWALEntryRequest request) throws ServiceException { + throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); + } + + @Override + public ReplicateWALEntryResponse replay(RpcController controller, + ReplicateWALEntryRequest request) throws ServiceException { + throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); + } + + @Override + public RollWALWriterResponse rollWALWriter(RpcController controller, RollWALWriterRequest request) + throws ServiceException { + throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); + } + + @Override + public GetServerInfoResponse getServerInfo(RpcController controller, GetServerInfoRequest request) + throws ServiceException { + throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); + } + + @Override + public StopServerResponse stopServer(RpcController controller, StopServerRequest request) + throws ServiceException { + throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); + } + + @Override + public UpdateFavoredNodesResponse updateFavoredNodes(RpcController controller, + UpdateFavoredNodesRequest request) throws ServiceException { + throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); + } + + @Override + public GetRegionLoadResponse getRegionLoad(RpcController controller, GetRegionLoadRequest request) + throws ServiceException { + throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); + } + + @Override + public ClearCompactionQueuesResponse clearCompactionQueues(RpcController controller, + ClearCompactionQueuesRequest request) throws ServiceException { + throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); + } + + @Override + public ClearRegionBlockCacheResponse clearRegionBlockCache(RpcController controller, + ClearRegionBlockCacheRequest request) throws ServiceException { + throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); + } + + @Override + public GetSpaceQuotaSnapshotsResponse getSpaceQuotaSnapshots(RpcController controller, + GetSpaceQuotaSnapshotsRequest request) throws ServiceException { + throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); + } + + @Override + public ExecuteProceduresResponse executeProcedures(RpcController controller, + ExecuteProceduresRequest request) throws ServiceException { + throw new ServiceException(new DoNotRetryIOException("Unsupported method on master")); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/http/MasterDumpServlet.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/http/MasterDumpServlet.java index 6db4579b6a7..9c73ab21747 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/http/MasterDumpServlet.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/http/MasterDumpServlet.java @@ -35,7 +35,6 @@ import org.apache.hadoop.hbase.master.assignment.AssignmentManager; import org.apache.hadoop.hbase.master.assignment.RegionStateNode; import org.apache.hadoop.hbase.monitoring.StateDumpServlet; import org.apache.hadoop.hbase.monitoring.TaskMonitor; -import org.apache.hadoop.hbase.regionserver.http.RSDumpServlet; import org.apache.hadoop.hbase.util.LogMonitoring; import org.apache.hadoop.hbase.util.Threads; import org.apache.yetus.audience.InterfaceAudience; @@ -102,11 +101,6 @@ public class MasterDumpServlet extends StateDumpServlet { long tailKb = getTailKbParam(request); LogMonitoring.dumpTailOfLogs(out, tailKb); - out.println("\n\nRS Queue:"); - out.println(LINE); - if (isShowQueueDump(conf)) { - RSDumpServlet.dumpQueue(master, out); - } out.flush(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index a7a3b7dd6db..efd9fa70f92 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -23,14 +23,10 @@ import static org.apache.hadoop.hbase.HConstants.HBASE_SPLIT_WAL_COORDINATED_BY_ import static org.apache.hadoop.hbase.HConstants.HBASE_SPLIT_WAL_MAX_SPLITTER; import static org.apache.hadoop.hbase.util.DNS.UNSAFE_RS_HOSTNAME_KEY; -import com.google.errorprone.annotations.RestrictedApi; import java.io.IOException; import java.io.PrintWriter; -import java.lang.management.MemoryType; import java.lang.management.MemoryUsage; import java.lang.reflect.Constructor; -import java.net.BindException; -import java.net.InetAddress; import java.net.InetSocketAddress; import java.time.Duration; import java.util.ArrayList; @@ -60,72 +56,54 @@ import javax.management.MalformedObjectNameException; import javax.servlet.http.HttpServlet; import org.apache.commons.lang3.RandomUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.commons.lang3.SystemUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.CacheEvictionStats; import org.apache.hadoop.hbase.CallQueueTooBigException; -import org.apache.hadoop.hbase.ChoreService; import org.apache.hadoop.hbase.ClockOutOfSyncException; -import org.apache.hadoop.hbase.CoordinatedStateManager; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.ExecutorStatusChore; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.hadoop.hbase.HBaseServerBase; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HDFSBlocksDistribution; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.HealthCheckChore; -import org.apache.hadoop.hbase.MetaRegionLocationCache; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.NotServingRegionException; import org.apache.hadoop.hbase.PleaseHoldException; import org.apache.hadoop.hbase.ScheduledChore; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.Stoppable; -import org.apache.hadoop.hbase.TableDescriptors; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.YouAreDeadException; import org.apache.hadoop.hbase.ZNodeClearer; -import org.apache.hadoop.hbase.client.AsyncClusterConnection; -import org.apache.hadoop.hbase.client.ClusterConnectionFactory; -import org.apache.hadoop.hbase.client.Connection; -import org.apache.hadoop.hbase.client.ConnectionFactory; -import org.apache.hadoop.hbase.client.ConnectionRegistryEndpoint; import org.apache.hadoop.hbase.client.ConnectionUtils; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.client.locking.EntityLock; import org.apache.hadoop.hbase.client.locking.LockServiceClient; import org.apache.hadoop.hbase.conf.ConfigurationManager; -import org.apache.hadoop.hbase.conf.ConfigurationObserver; -import org.apache.hadoop.hbase.coordination.ZkCoordinatedStateManager; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.exceptions.RegionMovedException; import org.apache.hadoop.hbase.exceptions.RegionOpeningException; import org.apache.hadoop.hbase.exceptions.UnknownProtocolException; -import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.executor.ExecutorType; -import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.http.InfoServer; import org.apache.hadoop.hbase.io.hfile.BlockCache; import org.apache.hadoop.hbase.io.hfile.BlockCacheFactory; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.util.MemorySizeUtil; import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils; -import org.apache.hadoop.hbase.ipc.NettyRpcClientConfigHelper; import org.apache.hadoop.hbase.ipc.RpcClient; import org.apache.hadoop.hbase.ipc.RpcServer; -import org.apache.hadoop.hbase.ipc.RpcServerInterface; import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException; import org.apache.hadoop.hbase.ipc.ServerRpcController; import org.apache.hadoop.hbase.log.HBaseMarkers; -import org.apache.hadoop.hbase.master.HMaster; -import org.apache.hadoop.hbase.master.MasterRpcServicesVersionWrapper; import org.apache.hadoop.hbase.master.RegionState; -import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer; import org.apache.hadoop.hbase.mob.MobFileCache; import org.apache.hadoop.hbase.namequeues.NamedQueueRecorder; import org.apache.hadoop.hbase.namequeues.SlowLogTableOpsChore; @@ -157,44 +135,33 @@ import org.apache.hadoop.hbase.security.SecurityConstants; import org.apache.hadoop.hbase.security.Superusers; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.UserProvider; -import org.apache.hadoop.hbase.security.access.AccessChecker; -import org.apache.hadoop.hbase.security.access.ZKPermissionWatcher; -import org.apache.hadoop.hbase.util.Addressing; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.util.CompressionTest; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.FutureUtils; import org.apache.hadoop.hbase.util.JvmPauseMonitor; -import org.apache.hadoop.hbase.util.NettyEventLoopGroupConfig; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.RetryCounter; import org.apache.hadoop.hbase.util.RetryCounterFactory; import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil; -import org.apache.hadoop.hbase.util.Sleeper; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.util.VersionInfo; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; -import org.apache.hadoop.hbase.wal.NettyAsyncFSWALConfigHelper; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALFactory; -import org.apache.hadoop.hbase.zookeeper.ClusterStatusTracker; import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; import org.apache.hadoop.hbase.zookeeper.RegionServerAddressTracker; import org.apache.hadoop.hbase.zookeeper.ZKClusterId; import org.apache.hadoop.hbase.zookeeper.ZKNodeTracker; import org.apache.hadoop.hbase.zookeeper.ZKUtil; -import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.util.ReflectionUtils; import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.KeeperException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import sun.misc.Signal; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.common.base.Throwables; @@ -249,8 +216,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProto */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) @SuppressWarnings({ "deprecation"}) -public class HRegionServer extends Thread implements RegionServerServices, LastSequenceId, - ConnectionRegistryEndpoint, ConfigurationObserver { +public class HRegionServer extends HBaseServerBase + implements RegionServerServices, LastSequenceId { private static final Logger LOG = LoggerFactory.getLogger(HRegionServer.class); @@ -292,16 +259,6 @@ public class HRegionServer extends Thread implements RegionServerServices, LastS private HeapMemoryManager hMemManager; - /** - * The asynchronous cluster connection to be shared by services. - */ - protected AsyncClusterConnection asyncClusterConnection; - - /** - * Go here to get table descriptors. - */ - protected TableDescriptors tableDescriptors; - // Replication services. If no replication, this handler will be null. private ReplicationSourceService replicationSourceHandler; private ReplicationSinkService replicationSinkHandler; @@ -336,21 +293,8 @@ public class HRegionServer extends Thread implements RegionServerServices, LastS private LeaseManager leaseManager; - // Instance of the hbase executor executorService. - protected ExecutorService executorService; - private volatile boolean dataFsOk; - private HFileSystem dataFs; - private HFileSystem walFs; - // Set when a report to the master comes back with a message asking us to - // shutdown. Also set by call to stop when debugging or running unit tests - // of HRegionServer in isolation. - private volatile boolean stopped = false; - - // Go down hard. Used if file system becomes unavailable and also in - // debugging and unit tests. - private AtomicBoolean abortRequested; static final String ABORT_TIMEOUT = "hbase.regionserver.abort.timeout"; // Default abort timeout is 1200 seconds for safe private static final long DEFAULT_ABORT_TIMEOUT = 1200000; @@ -361,15 +305,8 @@ public class HRegionServer extends Thread implements RegionServerServices, LastS // space regions. private boolean stopping = false; private volatile boolean killed = false; - private volatile boolean shutDown = false; - - protected final Configuration conf; - - private Path dataRootDir; - private Path walRootDir; private final int threadWakeFrequency; - final int msgInterval; private static final String PERIOD_COMPACTION = "hbase.regionserver.compaction.check.period"; private final int compactionCheckFrequency; @@ -384,10 +321,6 @@ public class HRegionServer extends Thread implements RegionServerServices, LastS private UncaughtExceptionHandler uncaughtExceptionHandler; - // Info server. Default access so can be used by unit tests. REGIONSERVER - // is name of the webapp and the attribute name used stuffing this instance - // into web context. - protected InfoServer infoServer; private JvmPauseMonitor pauseMonitor; /** region server process name */ @@ -397,11 +330,6 @@ public class HRegionServer extends Thread implements RegionServerServices, LastS private MetricsRegionServer metricsRegionServer; MetricsRegionServerWrapperImpl metricsRegionServerImpl; - /** - * ChoreService used to schedule tasks that we want to run periodically - */ - private ChoreService choreService; - /** * Check for compactions requests. */ @@ -422,31 +350,17 @@ public class HRegionServer extends Thread implements RegionServerServices, LastS // flag set after we're done setting up server threads final AtomicBoolean online = new AtomicBoolean(false); - // zookeeper connection and watcher - protected final ZKWatcher zooKeeper; - // master address tracker private final MasterAddressTracker masterAddressTracker; - /** - * Cache for the meta region replica's locations. Also tracks their changes to avoid stale cache - * entries. Used for serving ClientMetaService. - */ - private final MetaRegionLocationCache metaRegionLocationCache; /** * Cache for all the region servers in the cluster. Used for serving ClientMetaService. */ private final RegionServerAddressTracker regionServerAddressTracker; - // Cluster Status Tracker - protected final ClusterStatusTracker clusterStatusTracker; - // Log Splitting Worker private SplitLogWorker splitLogWorker; - // A sleeper that sleeps for msgInterval. - protected final Sleeper sleeper; - private final int shortOperationTimeout; // Time to pause if master says 'please hold' @@ -472,18 +386,6 @@ public class HRegionServer extends Thread implements RegionServerServices, LastS private Map coprocessorServiceHandlers = Maps.newHashMap(); - /** - * The server name the Master sees us as. Its made from the hostname the - * master passes us, port, and server startcode. Gets set after registration - * against Master. - */ - protected ServerName serverName; - - /** - * hostname specified by hostname config - */ - protected String useThisHostnameInstead; - /** * @deprecated since 2.4.0 and will be removed in 4.0.0. * Use {@link HRegionServer#UNSAFE_RS_HOSTNAME_DISABLE_MASTER_REVERSEDNS_KEY} instead. @@ -502,15 +404,10 @@ public class HRegionServer extends Thread implements RegionServerServices, LastS final static String UNSAFE_RS_HOSTNAME_DISABLE_MASTER_REVERSEDNS_KEY = "hbase.unsafe.regionserver.hostname.disable.master.reversedns"; - /** - * This servers startcode. - */ - protected final long startcode; - /** * Unique identifier for the cluster we are a part of. */ - protected String clusterId; + private String clusterId; // chore for refreshing store files for secondary regions private StorefileRefresherChore storefileRefresher; @@ -542,18 +439,6 @@ public class HRegionServer extends Thread implements RegionServerServices, LastS */ final ServerNonceManager nonceManager; - private UserProvider userProvider; - - protected final RSRpcServices rpcServices; - - private CoordinatedStateManager csm; - - /** - * Configuration manager is used to register/deregister and notify the configuration observers - * when the regionserver is notified that there was a change in the on disk configs. - */ - protected final ConfigurationManager configurationManager; - @InterfaceAudience.Private CompactedHFilesDischarger compactedFileDischarger; @@ -563,13 +448,6 @@ public class HRegionServer extends Thread implements RegionServerServices, LastS private FileSystemUtilizationChore fsUtilizationChore; - private final NettyEventLoopGroupConfig eventLoopGroupConfig; - - /** - * Provide online slow log responses from ringbuffer - */ - private NamedQueueRecorder namedQueueRecorder = null; - /** * True if this RegionServer is coming up in a cluster where there is no Master; * means it needs to just come up and make do without a Master to talk to: e.g. in test or @@ -593,17 +471,13 @@ public class HRegionServer extends Thread implements RegionServerServices, LastS * Defer till after we register with the Master as much as possible. See {@link #startServices}. */ public HRegionServer(final Configuration conf) throws IOException { - super("RegionServer"); // thread name + super(conf, "RegionServer"); // thread name try { - this.startcode = EnvironmentEdgeManager.currentTime(); - this.conf = conf; this.dataFsOk = true; - this.masterless = conf.getBoolean(MASTERLESS_CONFIG_NAME, false); - this.eventLoopGroupConfig = setupNetty(this.conf); + this.masterless = !clusterMode(); MemorySizeUtil.checkForClusterFreeHeapMemoryLimit(this.conf); HFile.checkHFileVersion(this.conf); checkCodecs(this.conf); - this.userProvider = UserProvider.instantiate(conf); FSUtils.setupShortCircuitRead(this.conf); // Disable usage of meta replicas in the regionserver @@ -612,9 +486,6 @@ public class HRegionServer extends Thread implements RegionServerServices, LastS this.threadWakeFrequency = conf.getInt(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000); this.compactionCheckFrequency = conf.getInt(PERIOD_COMPACTION, this.threadWakeFrequency); this.flushCheckFrequency = conf.getInt(PERIOD_FLUSH, this.threadWakeFrequency); - this.msgInterval = conf.getInt("hbase.regionserver.msginterval", 3 * 1000); - - this.sleeper = new Sleeper(this.msgInterval, this); boolean isNoncesEnabled = conf.getBoolean(HConstants.HBASE_RS_NONCES_ENABLED, true); this.nonceManager = isNoncesEnabled ? new ServerNonceManager(this.conf) : null; @@ -625,80 +496,23 @@ public class HRegionServer extends Thread implements RegionServerServices, LastS this.retryPauseTime = conf.getLong(HConstants.HBASE_RPC_SHORTOPERATION_RETRY_PAUSE_TIME, HConstants.DEFAULT_HBASE_RPC_SHORTOPERATION_RETRY_PAUSE_TIME); - this.abortRequested = new AtomicBoolean(false); - this.stopped = false; - - initNamedQueueRecorder(conf); - rpcServices = createRpcServices(); - useThisHostnameInstead = getUseThisHostnameInstead(conf); - String hostName = - StringUtils.isBlank(useThisHostnameInstead) ? this.rpcServices.isa.getHostName() - : this.useThisHostnameInstead; - serverName = ServerName.valueOf(hostName, this.rpcServices.isa.getPort(), this.startcode); - - // login the zookeeper client principal (if using security) - ZKUtil.loginClient(this.conf, HConstants.ZK_CLIENT_KEYTAB_FILE, - HConstants.ZK_CLIENT_KERBEROS_PRINCIPAL, hostName); - // login the server principal (if using secure Hadoop) - login(userProvider, hostName); - // init superusers and add the server principal (if using security) - // or process owner as default super user. - Superusers.initialize(conf); regionServerAccounting = new RegionServerAccounting(conf); - boolean isMasterNotCarryTable = - this instanceof HMaster && !((HMaster) this).isInMaintenanceMode(); - - // no need to instantiate block cache and mob file cache when master not carry table - if (!isMasterNotCarryTable) { - blockCache = BlockCacheFactory.createBlockCache(conf); - mobFileCache = new MobFileCache(conf); - } + blockCache = BlockCacheFactory.createBlockCache(conf); + mobFileCache = new MobFileCache(conf); uncaughtExceptionHandler = (t, e) -> abort("Uncaught exception in executorService thread " + t.getName(), e); - initializeFileSystem(); - - this.configurationManager = new ConfigurationManager(); - setupWindows(getConfiguration(), getConfigurationManager()); - - // Some unit tests don't need a cluster, so no zookeeper at all - // Open connection to zookeeper and set primary watcher - zooKeeper = new ZKWatcher(conf, getProcessName() + ":" + rpcServices.isa.getPort(), this, - canCreateBaseZNode()); // If no master in cluster, skip trying to track one or look for a cluster status. if (!this.masterless) { - if (conf.getBoolean(HBASE_SPLIT_WAL_COORDINATED_BY_ZK, - DEFAULT_HBASE_SPLIT_COORDINATED_BY_ZK)) { - this.csm = new ZkCoordinatedStateManager(this); - } - masterAddressTracker = new MasterAddressTracker(getZooKeeper(), this); masterAddressTracker.start(); - - clusterStatusTracker = new ClusterStatusTracker(zooKeeper, this); - clusterStatusTracker.start(); } else { masterAddressTracker = null; - clusterStatusTracker = null; } this.rpcServices.start(zooKeeper); - this.metaRegionLocationCache = new MetaRegionLocationCache(zooKeeper); - if (!(this instanceof HMaster)) { - // do not create this field for HMaster, we have another region server tracker for HMaster. - this.regionServerAddressTracker = new RegionServerAddressTracker(zooKeeper, this); - } else { - this.regionServerAddressTracker = null; - } - // This violates 'no starting stuff in Constructor' but Master depends on the below chore - // and executor being created and takes a different startup route. Lots of overlap between HRS - // and M (An M IS A HRS now). Need to refactor so less duplication between M and its super - // Master expects Constructor to put up web servers. Ugh. - // class HRS. TODO. - this.choreService = new ChoreService(getName(), true); - this.executorService = new ExecutorService(getName()); - putUpWebUI(); + this.regionServerAddressTracker = new RegionServerAddressTracker(zooKeeper, this); } catch (Throwable t) { // Make sure we log the exception. HRegionServer is often started via reflection and the // cause of failed startup is lost. @@ -707,28 +521,8 @@ public class HRegionServer extends Thread implements RegionServerServices, LastS } } - private void initNamedQueueRecorder(Configuration conf) { - if (!(this instanceof HMaster)) { - final boolean isOnlineLogProviderEnabled = conf.getBoolean( - HConstants.SLOW_LOG_BUFFER_ENABLED_KEY, - HConstants.DEFAULT_ONLINE_LOG_PROVIDER_ENABLED); - if (isOnlineLogProviderEnabled) { - this.namedQueueRecorder = NamedQueueRecorder.getInstance(this.conf); - } - } else { - final boolean isBalancerDecisionRecording = conf - .getBoolean(BaseLoadBalancer.BALANCER_DECISION_BUFFER_ENABLED, - BaseLoadBalancer.DEFAULT_BALANCER_DECISION_BUFFER_ENABLED); - final boolean isBalancerRejectionRecording = conf - .getBoolean(BaseLoadBalancer.BALANCER_REJECTION_BUFFER_ENABLED, - BaseLoadBalancer.DEFAULT_BALANCER_REJECTION_BUFFER_ENABLED); - if (isBalancerDecisionRecording || isBalancerRejectionRecording) { - this.namedQueueRecorder = NamedQueueRecorder.getInstance(this.conf); - } - } - } - // HMaster should override this method to load the specific config for master + @Override protected String getUseThisHostnameInstead(Configuration conf) throws IOException { String hostname = conf.get(UNSAFE_RS_HOSTNAME_KEY); if (conf.getBoolean(UNSAFE_RS_HOSTNAME_DISABLE_MASTER_REVERSEDNS_KEY, false)) { @@ -738,85 +532,35 @@ public class HRegionServer extends Thread implements RegionServerServices, LastS " to true while " + UNSAFE_RS_HOSTNAME_KEY + " is used"; throw new IOException(msg); } else { - return rpcServices.isa.getHostName(); + return rpcServices.getSocketAddress().getHostName(); } } else { return hostname; } } - /** - * If running on Windows, do windows-specific setup. - */ - private static void setupWindows(final Configuration conf, ConfigurationManager cm) { - if (!SystemUtils.IS_OS_WINDOWS) { - Signal.handle(new Signal("HUP"), signal -> { - conf.reloadConfiguration(); - cm.notifyAllObservers(conf); - }); - } - } - - private static NettyEventLoopGroupConfig setupNetty(Configuration conf) { - // Initialize netty event loop group at start as we may use it for rpc server, rpc client & WAL. - NettyEventLoopGroupConfig nelgc = - new NettyEventLoopGroupConfig(conf, "RS-EventLoopGroup"); - NettyRpcClientConfigHelper.setEventLoopConfig(conf, nelgc.group(), nelgc.clientChannelClass()); - NettyAsyncFSWALConfigHelper.setEventLoopConfig(conf, nelgc.group(), nelgc.clientChannelClass()); - return nelgc; - } - - private void initializeFileSystem() throws IOException { - // Get fs instance used by this RS. Do we use checksum verification in the hbase? If hbase - // checksum verification enabled, then automatically switch off hdfs checksum verification. - boolean useHBaseChecksum = conf.getBoolean(HConstants.HBASE_CHECKSUM_VERIFICATION, true); - String walDirUri = CommonFSUtils.getDirUri(this.conf, - new Path(conf.get(CommonFSUtils.HBASE_WAL_DIR, conf.get(HConstants.HBASE_DIR)))); - // set WAL's uri - if (walDirUri != null) { - CommonFSUtils.setFsDefault(this.conf, walDirUri); - } - // init the WALFs - this.walFs = new HFileSystem(this.conf, useHBaseChecksum); - this.walRootDir = CommonFSUtils.getWALRootDir(this.conf); - // Set 'fs.defaultFS' to match the filesystem on hbase.rootdir else - // underlying hadoop hdfs accessors will be going against wrong filesystem - // (unless all is set to defaults). - String rootDirUri = - CommonFSUtils.getDirUri(this.conf, new Path(conf.get(HConstants.HBASE_DIR))); - if (rootDirUri != null) { - CommonFSUtils.setFsDefault(this.conf, rootDirUri); - } - // init the filesystem - this.dataFs = new HFileSystem(this.conf, useHBaseChecksum); - this.dataRootDir = CommonFSUtils.getRootDir(this.conf); - this.tableDescriptors = new FSTableDescriptors(this.dataFs, this.dataRootDir, - !canUpdateTableDescriptor(), cacheTableDescriptor()); - } - + @Override protected void login(UserProvider user, String host) throws IOException { user.login(SecurityConstants.REGIONSERVER_KRB_KEYTAB_FILE, SecurityConstants.REGIONSERVER_KRB_PRINCIPAL, host); } - /** - * Wait for an active Master. - * See override in Master superclass for how it is used. - */ - protected void waitForMasterActive() {} - + @Override protected String getProcessName() { return REGIONSERVER; } + @Override protected boolean canCreateBaseZNode() { - return this.masterless; + return !clusterMode(); } + @Override protected boolean canUpdateTableDescriptor() { return false; } + @Override protected boolean cacheTableDescriptor() { return false; } @@ -825,11 +569,13 @@ public class HRegionServer extends Thread implements RegionServerServices, LastS return new RSRpcServices(this); } - protected void configureInfoServer() { + @Override + protected void configureInfoServer(InfoServer infoServer) { infoServer.addUnprivilegedServlet("rs-status", "/rs-status", RSStatusServlet.class); infoServer.setAttribute(REGIONSERVER, this); } + @Override protected Class getDumpServlet() { return RSDumpServlet.class; } @@ -890,19 +636,6 @@ public class HRegionServer extends Thread implements RegionServerServices, LastS return this.clusterId; } - /** - * Setup our cluster connection if not already initialized. - */ - protected final synchronized void setupClusterConnection() throws IOException { - if (asyncClusterConnection == null) { - InetSocketAddress localAddress = - new InetSocketAddress(rpcServices.getSocketAddress().getAddress(), 0); - User user = userProvider.getCurrent(); - asyncClusterConnection = - ClusterConnectionFactory.createAsyncClusterConnection(this, conf, localAddress, user); - } - } - /** * All initialization needed before we go register with Master.
* Do bare minimum. Do bulk of initializations AFTER we've connected to the Master.
@@ -961,7 +694,6 @@ public class HRegionServer extends Thread implements RegionServerServices, LastS } } - waitForMasterActive(); if (isStopped() || isAborted()) { return; // No need for further initialization } @@ -1071,7 +803,7 @@ public class HRegionServer extends Thread implements RegionServerServices, LastS } else if (!this.stopping) { this.stopping = true; LOG.info("Closing user regions"); - closeUserRegions(this.abortRequested.get()); + closeUserRegions(isAborted()); } else { boolean allUserRegionsOffline = areAllUserRegionsOffline(); if (allUserRegionsOffline) { @@ -1114,14 +846,7 @@ public class HRegionServer extends Thread implements RegionServerServices, LastS if (this.splitLogWorker != null) { splitLogWorker.stop(); } - if (this.infoServer != null) { - LOG.info("Stopping infoServer"); - try { - this.infoServer.stop(); - } catch (Exception e) { - LOG.error("Failed to stop infoServer", e); - } - } + stopInfoServer(); // Send cache a shutdown. if (blockCache != null) { blockCache.shutdown(); @@ -1153,15 +878,7 @@ public class HRegionServer extends Thread implements RegionServerServices, LastS LOG.info("stopping server " + this.serverName); } - if (this.asyncClusterConnection != null) { - try { - this.asyncClusterConnection.close(); - } catch (IOException e) { - // Although the {@link Closeable} interface throws an {@link - // IOException}, in reality, the implementation would never do that. - LOG.warn("Attempt to close server's AsyncClusterConnection failed.", e); - } - } + closeClusterConnection(); // Closing the compactSplit thread before closing meta regions if (!this.killed && containsMetaTableRegions()) { if (!abortRequested.get() || this.dataFsOk) { @@ -1228,10 +945,7 @@ public class HRegionServer extends Thread implements RegionServerServices, LastS // we delete the file anyway: a second attempt to delete the znode is likely to fail again. ZNodeClearer.deleteMyEphemeralNodeOnDisk(); - if (this.zooKeeper != null) { - this.zooKeeper.close(); - } - this.shutDown = true; + closeZooKeeper(); LOG.info("Exiting; stopping=" + this.serverName + "; zookeeper connection closed."); } @@ -1570,15 +1284,6 @@ public class HRegionServer extends Thread implements RegionServerServices, LastS } /** - * get NamedQueue Provider to add different logs to ringbuffer - * - * @return NamedQueueRecorder - */ - public NamedQueueRecorder getNamedQueueRecorder() { - return this.namedQueueRecorder; - } - - /* * Run init. Sets up wal and starts up all server threads. * * @param c Extra configuration. @@ -1592,19 +1297,19 @@ public class HRegionServer extends Thread implements RegionServerServices, LastS // The hostname the master sees us as. if (key.equals(HConstants.KEY_FOR_HOSTNAME_SEEN_BY_MASTER)) { String hostnameFromMasterPOV = e.getValue(); - this.serverName = ServerName.valueOf(hostnameFromMasterPOV, rpcServices.isa.getPort(), - this.startcode); + this.serverName = ServerName.valueOf(hostnameFromMasterPOV, + rpcServices.getSocketAddress().getPort(), this.startcode); if (!StringUtils.isBlank(useThisHostnameInstead) && - !hostnameFromMasterPOV.equals(useThisHostnameInstead)) { + !hostnameFromMasterPOV.equals(useThisHostnameInstead)) { String msg = "Master passed us a different hostname to use; was=" + - this.useThisHostnameInstead + ", but now=" + hostnameFromMasterPOV; + this.useThisHostnameInstead + ", but now=" + hostnameFromMasterPOV; LOG.error(msg); throw new IOException(msg); } if (StringUtils.isBlank(useThisHostnameInstead) && - !hostnameFromMasterPOV.equals(rpcServices.isa.getHostName())) { + !hostnameFromMasterPOV.equals(rpcServices.getSocketAddress().getHostName())) { String msg = "Master passed us a different hostname to use; was=" + - rpcServices.isa.getHostName() + ", but now=" + hostnameFromMasterPOV; + rpcServices.getSocketAddress().getHostName() + ", but now=" + hostnameFromMasterPOV; LOG.error(msg); } continue; @@ -1659,11 +1364,10 @@ public class HRegionServer extends Thread implements RegionServerServices, LastS // or make sense of it. startReplicationService(); - // Set up ZK - LOG.info("Serving as " + this.serverName + ", RpcServer on " + rpcServices.isa + - ", sessionid=0x" + - Long.toHexString(this.zooKeeper.getRecoverableZooKeeper().getSessionId())); + LOG.info("Serving as " + this.serverName + ", RpcServer on " + + rpcServices.getSocketAddress() + ", sessionid=0x" + + Long.toHexString(this.zooKeeper.getRecoverableZooKeeper().getSessionId())); // Wake up anyone waiting for this server to online synchronized (online) { @@ -1679,28 +1383,6 @@ public class HRegionServer extends Thread implements RegionServerServices, LastS } } - protected void initializeMemStoreChunkCreator() { - if (MemStoreLAB.isEnabled(conf)) { - // MSLAB is enabled. So initialize MemStoreChunkPool - // By this time, the MemstoreFlusher is already initialized. We can get the global limits from - // it. - Pair pair = MemorySizeUtil.getGlobalMemStoreSize(conf); - long globalMemStoreSize = pair.getFirst(); - boolean offheap = this.regionServerAccounting.isOffheap(); - // When off heap memstore in use, take full area for chunk pool. - float poolSizePercentage = offheap ? 1.0F : - conf.getFloat(MemStoreLAB.CHUNK_POOL_MAXSIZE_KEY, MemStoreLAB.POOL_MAX_SIZE_DEFAULT); - float initialCountPercentage = conf.getFloat(MemStoreLAB.CHUNK_POOL_INITIALSIZE_KEY, - MemStoreLAB.POOL_INITIAL_SIZE_DEFAULT); - int chunkSize = conf.getInt(MemStoreLAB.CHUNK_SIZE_KEY, MemStoreLAB.CHUNK_SIZE_DEFAULT); - float indexChunkSizePercent = conf.getFloat(MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_KEY, - MemStoreLAB.INDEX_CHUNK_SIZE_PERCENTAGE_DEFAULT); - // init the chunkCreator - ChunkCreator.initialize(chunkSize, offheap, globalMemStoreSize, poolSizePercentage, - initialCountPercentage, this.hMemManager, indexChunkSizePercent); - } - } - private void startHeapMemoryManager() { if (this.blockCache != null) { this.hMemManager = @@ -1953,28 +1635,24 @@ public class HRegionServer extends Thread implements RegionServerServices, LastS * be hooked up to WAL. */ private void setupWALAndReplication() throws IOException { - boolean isMaster = this instanceof HMaster; - WALFactory factory = - new WALFactory(conf, serverName.toString(), this, !isMaster); - if (!isMaster) { - // TODO Replication make assumptions here based on the default filesystem impl - Path oldLogDir = new Path(walRootDir, HConstants.HREGION_OLDLOGDIR_NAME); - String logName = AbstractFSWALProvider.getWALDirectoryName(this.serverName.toString()); + WALFactory factory = new WALFactory(conf, serverName.toString(), this, true); + // TODO Replication make assumptions here based on the default filesystem impl + Path oldLogDir = new Path(walRootDir, HConstants.HREGION_OLDLOGDIR_NAME); + String logName = AbstractFSWALProvider.getWALDirectoryName(this.serverName.toString()); - Path logDir = new Path(walRootDir, logName); - LOG.debug("logDir={}", logDir); - if (this.walFs.exists(logDir)) { - throw new RegionServerRunningException( - "Region server has already created directory at " + this.serverName.toString()); - } - // Always create wal directory as now we need this when master restarts to find out the live - // region servers. - if (!this.walFs.mkdirs(logDir)) { - throw new IOException("Can not create wal directory " + logDir); - } - // Instantiate replication if replication enabled. Pass it the log directories. - createNewReplicationInstance(conf, this, this.walFs, logDir, oldLogDir, factory); + Path logDir = new Path(walRootDir, logName); + LOG.debug("logDir={}", logDir); + if (this.walFs.exists(logDir)) { + throw new RegionServerRunningException( + "Region server has already created directory at " + this.serverName.toString()); } + // Always create wal directory as now we need this when master restarts to find out the live + // region servers. + if (!this.walFs.mkdirs(logDir)) { + throw new IOException("Can not create wal directory " + logDir); + } + // Instantiate replication if replication enabled. Pass it the log directories. + createNewReplicationInstance(conf, this, this.walFs, logDir, oldLogDir, factory); this.walFactory = factory; } @@ -2170,7 +1848,7 @@ public class HRegionServer extends Thread implements RegionServerServices, LastS // Memstore services. startHeapMemoryManager(); // Call it after starting HeapMemoryManager. - initializeMemStoreChunkCreator(); + initializeMemStoreChunkCreator(hMemManager); } private void initializeThreads() { @@ -2232,61 +1910,6 @@ public class HRegionServer extends Thread implements RegionServerServices, LastS configurationManager.registerObserver(this); } - /** - * Puts up the webui. - */ - private void putUpWebUI() throws IOException { - int port = this.conf.getInt(HConstants.REGIONSERVER_INFO_PORT, - HConstants.DEFAULT_REGIONSERVER_INFOPORT); - String addr = this.conf.get("hbase.regionserver.info.bindAddress", "0.0.0.0"); - - if(this instanceof HMaster) { - port = conf.getInt(HConstants.MASTER_INFO_PORT, - HConstants.DEFAULT_MASTER_INFOPORT); - addr = this.conf.get("hbase.master.info.bindAddress", "0.0.0.0"); - } - // -1 is for disabling info server - if (port < 0) { - return; - } - - if (!Addressing.isLocalAddress(InetAddress.getByName(addr))) { - String msg = - "Failed to start http info server. Address " + addr - + " does not belong to this host. Correct configuration parameter: " - + "hbase.regionserver.info.bindAddress"; - LOG.error(msg); - throw new IOException(msg); - } - // check if auto port bind enabled - boolean auto = this.conf.getBoolean(HConstants.REGIONSERVER_INFO_PORT_AUTO, false); - while (true) { - try { - this.infoServer = new InfoServer(getProcessName(), addr, port, false, this.conf); - infoServer.addPrivilegedServlet("dump", "/dump", getDumpServlet()); - configureInfoServer(); - this.infoServer.start(); - break; - } catch (BindException e) { - if (!auto) { - // auto bind disabled throw BindException - LOG.error("Failed binding http info server to port: " + port); - throw e; - } - // auto bind enabled, try to use another port - LOG.info("Failed binding http info server to port: " + port); - port++; - LOG.info("Retry starting http info server with port: " + port); - } - } - port = this.infoServer.getPort(); - conf.setInt(HConstants.REGIONSERVER_INFO_PORT, port); - int masterInfoPort = conf.getInt(HConstants.MASTER_INFO_PORT, - HConstants.DEFAULT_MASTER_INFOPORT); - conf.setInt("hbase.master.info.port.orig", masterInfoPort); - conf.setInt(HConstants.MASTER_INFO_PORT, port); - } - /* * Verify that server is healthy */ @@ -2329,11 +1952,6 @@ public class HRegionServer extends Thread implements RegionServerServices, LastS return walFactory; } - @Override - public Connection getConnection() { - return getAsyncConnection().toConnection(); - } - @Override public void stop(final String msg) { stop(msg, false, RpcServer.getRequestUser().orElse(null)); @@ -2562,11 +2180,6 @@ public class HRegionServer extends Thread implements RegionServerServices, LastS } } - @Override - public RpcServerInterface getRpcServer() { - return rpcServices.rpcServer; - } - @InterfaceAudience.Private public RSRpcServices getRSRpcServices() { return rpcServices; @@ -2631,20 +2244,6 @@ public class HRegionServer extends Thread implements RegionServerServices, LastS stop(reason, true, null); } - /** - * Sets the abort state if not already set. - * @return True if abortRequested set to True successfully, false if an abort is already in - * progress. - */ - protected boolean setAbortRequested() { - return abortRequested.compareAndSet(false, true); - } - - @Override - public boolean isAborted() { - return abortRequested.get(); - } - /* * Simulate a kill -9 of this server. Exits w/o closing regions or cleaninup * logs but it does close socket in case want to bring up server on old @@ -2676,31 +2275,13 @@ public class HRegionServer extends Thread implements RegionServerServices, LastS } } - protected final void shutdownChore(ScheduledChore chore) { - if (chore != null) { - chore.shutdown(); - } - } /** * Wait on all threads to finish. Presumption is that all closes and stops * have already been called. */ protected void stopServiceThreads() { // clean up the scheduled chores - if (this.choreService != null) { - shutdownChore(nonceManagerChore); - shutdownChore(compactionChecker); - shutdownChore(periodicFlusher); - shutdownChore(healthCheckChore); - shutdownChore(executorStatusChore); - shutdownChore(storefileRefresher); - shutdownChore(fsUtilizationChore); - shutdownChore(slowLogTableOpsChore); - // cancel the remaining scheduled chores (in case we missed out any) - // TODO: cancel will not cleanup the chores, so we need make sure we do not miss any - choreService.shutdown(); - } - + stopChoreService(); if (this.cacheFlusher != null) { this.cacheFlusher.join(); } @@ -2710,9 +2291,7 @@ public class HRegionServer extends Thread implements RegionServerServices, LastS if (this.compactSplitThread != null) { this.compactSplitThread.join(); } - if (this.executorService != null) { - this.executorService.shutdown(); - } + stopExecutorService(); if (sameReplicationSourceAndSink && this.replicationSourceHandler != null) { this.replicationSourceHandler.stopReplicationService(); } else { @@ -2790,15 +2369,6 @@ public class HRegionServer extends Thread implements RegionServerServices, LastS } continue; } - - // If we are on the active master, use the shortcut - if (this instanceof HMaster && sn.equals(getServerName())) { - // Wrap the shortcut in a class providing our version to the calls where it's relevant. - // Normally, RpcServer-based threadlocals do that. - intRssStub = new MasterRpcServicesVersionWrapper(((HMaster)this).getMasterRpcServices()); - intLockStub = ((HMaster)this).getMasterRpcServices(); - break; - } try { BlockingRpcChannel channel = this.rpcClient.createBlockingRpcChannel(sn, userProvider.getCurrent(), @@ -2860,10 +2430,10 @@ public class HRegionServer extends Thread implements RegionServerServices, LastS rpcServices.rpcFullScanRequestCount.reset(); rpcServices.rpcMultiRequestCount.reset(); rpcServices.rpcMutateRequestCount.reset(); - LOG.info("reportForDuty to master=" + masterServerName + " with isa=" - + rpcServices.isa + ", startcode=" + this.startcode); + LOG.info("reportForDuty to master=" + masterServerName + " with port=" + + rpcServices.getSocketAddress().getPort() + ", startcode=" + this.startcode); long now = EnvironmentEdgeManager.currentTime(); - int port = rpcServices.isa.getPort(); + int port = rpcServices.getSocketAddress().getPort(); RegionServerStartupRequest.Builder request = RegionServerStartupRequest.newBuilder(); if (!StringUtils.isBlank(useThisHostnameInstead)) { request.setUseThisHostnameInstead(useThisHostnameInstead); @@ -2958,29 +2528,6 @@ public class HRegionServer extends Thread implements RegionServerServices, LastS } } - /** @return the info server */ - public InfoServer getInfoServer() { - return infoServer; - } - - /** - * @return true if a stop has been requested. - */ - @Override - public boolean isStopped() { - return this.stopped; - } - - @Override - public boolean isStopping() { - return this.stopping; - } - - @Override - public Configuration getConfiguration() { - return conf; - } - protected Map getOnlineRegions() { return this.onlineRegions; } @@ -3040,13 +2587,6 @@ public class HRegionServer extends Thread implements RegionServerServices, LastS return sortedRegions; } - /** - * @return time stamp in millis of when this region server was started - */ - public long getStartcode() { - return this.startcode; - } - /** @return reference to FlushRequester */ @Override public FlushRequester getFlushRequester() { @@ -3063,18 +2603,6 @@ public class HRegionServer extends Thread implements RegionServerServices, LastS return leaseManager; } - /** - * @return Return the rootDir. - */ - protected Path getDataRootDir() { - return dataRootDir; - } - - @Override - public FileSystem getFileSystem() { - return dataFs; - } - /** * @return {@code true} when the data file system is available, {@code false} otherwise. */ @@ -3082,40 +2610,6 @@ public class HRegionServer extends Thread implements RegionServerServices, LastS return this.dataFsOk; } - /** - * @return Return the walRootDir. - */ - public Path getWALRootDir() { - return walRootDir; - } - - /** - * @return Return the walFs. - */ - public FileSystem getWALFileSystem() { - return walFs; - } - - @Override - public String toString() { - return getServerName().toString(); - } - - @Override - public ZKWatcher getZooKeeper() { - return zooKeeper; - } - - @Override - public CoordinatedStateManager getCoordinatedStateManager() { - return csm; - } - - @Override - public ServerName getServerName() { - return serverName; - } - public RegionServerCoprocessorHost getRegionServerCoprocessorHost(){ return this.rsHost; } @@ -3125,16 +2619,6 @@ public class HRegionServer extends Thread implements RegionServerServices, LastS return this.regionsInTransitionInRS; } - @Override - public ExecutorService getExecutorService() { - return executorService; - } - - @Override - public ChoreService getChoreService() { - return choreService; - } - @Override public RegionServerRpcQuotaManager getRegionServerRpcQuotaManager() { return rsQuotaManager; @@ -3689,42 +3173,13 @@ public class HRegionServer extends Thread implements RegionServerServices, LastS return Optional.ofNullable(this.mobFileCache); } - @Override - public AccessChecker getAccessChecker() { - return rpcServices.getAccessChecker(); - } - - @Override - public ZKPermissionWatcher getZKPermissionWatcher() { - return rpcServices.getZkPermissionWatcher(); - } - /** * @return : Returns the ConfigurationManager object for testing purposes. */ - @InterfaceAudience.Private ConfigurationManager getConfigurationManager() { return configurationManager; } - /** - * @return Return table descriptors implementation. - */ - @Override - public TableDescriptors getTableDescriptors() { - return this.tableDescriptors; - } - - /** - * Reload the configuration from disk. - */ - void updateConfiguration() { - LOG.info("Reloading the configuration from disk."); - // Reload the configuration from disk. - conf.reloadConfiguration(); - configurationManager.notifyAllObservers(conf); - } - CacheEvictionStats clearRegionBlockCache(Region region) { long evictedBlocks = 0; @@ -3864,16 +3319,6 @@ public class HRegionServer extends Thread implements RegionServerServices, LastS return true; } - public NettyEventLoopGroupConfig getEventLoopGroupConfig() { - return eventLoopGroupConfig; - } - - @Override - public Connection createConnection(Configuration conf) throws IOException { - User user = UserProvider.instantiate(conf).getCurrent(); - return ConnectionFactory.createConnection(conf, null, user); - } - void executeProcedure(long procId, RSProcedureCallable callable) { executorService.submit(new RSProcedureHandler(this, procId, callable)); } @@ -3947,10 +3392,6 @@ public class HRegionServer extends Thread implements RegionServerServices, LastS submittedRegionProcedures.remove(procId); } - public boolean isShutDown() { - return shutDown; - } - /** * Force to terminate region server when abort timeout. */ @@ -3969,11 +3410,6 @@ public class HRegionServer extends Thread implements RegionServerServices, LastS } } - @Override - public AsyncClusterConnection getAsyncClusterConnection() { - return asyncClusterConnection; - } - @InterfaceAudience.Private public CompactedHFilesDischarger getCompactedHFilesDischarger() { return compactedFileDischarger; @@ -4008,9 +3444,33 @@ public class HRegionServer extends Thread implements RegionServerServices, LastS return metaRegionLocationCache.getMetaRegionLocations(); } - @RestrictedApi(explanation = "Should only be called in tests", link = "", - allowedOnPath = ".*/src/test/.*") - public MetaRegionLocationCache getMetaRegionLocationCache() { - return metaRegionLocationCache; + @Override + protected NamedQueueRecorder createNamedQueueRecord() { + final boolean isOnlineLogProviderEnabled = conf.getBoolean( + HConstants.SLOW_LOG_BUFFER_ENABLED_KEY, HConstants.DEFAULT_ONLINE_LOG_PROVIDER_ENABLED); + if (isOnlineLogProviderEnabled) { + return NamedQueueRecorder.getInstance(conf); + } else { + return null; + } + } + + @Override + protected boolean clusterMode() { + // this method will be called in the constructor of super class, so we can not return masterless + // directly here, as it will always be false. + return !conf.getBoolean(MASTERLESS_CONFIG_NAME, false); + } + + @Override + protected void stopChores() { + shutdownChore(nonceManagerChore); + shutdownChore(compactionChecker); + shutdownChore(periodicFlusher); + shutdownChore(healthCheckChore); + shutdownChore(executorStatusChore); + shutdownChore(storefileRefresher); + shutdownChore(fsUtilizationChore); + shutdownChore(slowLogTableOpsChore); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java index 3da069dd833..c33807a683d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java @@ -225,7 +225,7 @@ class MetricsRegionServerWrapperImpl @Override public long getTotalRequestCount() { - return regionServer.rpcServices.requestCount.sum(); + return regionServer.getRpcServices().requestCount.sum(); } @Override @@ -479,27 +479,27 @@ class MetricsRegionServerWrapperImpl @Override public long getRpcGetRequestsCount() { - return regionServer.rpcServices.rpcGetRequestCount.sum(); + return regionServer.getRpcServices().rpcGetRequestCount.sum(); } @Override public long getRpcScanRequestsCount() { - return regionServer.rpcServices.rpcScanRequestCount.sum(); + return regionServer.getRpcServices().rpcScanRequestCount.sum(); } @Override public long getRpcFullScanRequestsCount() { - return regionServer.rpcServices.rpcFullScanRequestCount.sum(); + return regionServer.getRpcServices().rpcFullScanRequestCount.sum(); } @Override public long getRpcMultiRequestsCount() { - return regionServer.rpcServices.rpcMultiRequestCount.sum(); + return regionServer.getRpcServices().rpcMultiRequestCount.sum(); } @Override public long getRpcMutateRequestsCount() { - return regionServer.rpcServices.rpcMutateRequestCount.sum(); + return regionServer.getRpcServices().rpcMutateRequestCount.sum(); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index a684385d3fe..0115cfba674 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -21,8 +21,6 @@ import com.google.errorprone.annotations.RestrictedApi; import java.io.FileNotFoundException; import java.io.IOException; import java.io.UncheckedIOException; -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.Method; import java.net.BindException; import java.net.InetAddress; import java.net.InetSocketAddress; @@ -36,12 +34,10 @@ import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.NavigableMap; -import java.util.Optional; import java.util.Set; import java.util.TreeSet; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; @@ -60,6 +56,7 @@ import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.DroppedSnapshotException; import org.apache.hadoop.hbase.HBaseIOException; +import org.apache.hadoop.hbase.HBaseRpcServicesBase; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.MultiActionResultTooLarge; import org.apache.hadoop.hbase.NotServingRegionException; @@ -72,7 +69,6 @@ import org.apache.hadoop.hbase.UnknownScannerException; import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.CheckAndMutate; import org.apache.hadoop.hbase.client.CheckAndMutateResult; -import org.apache.hadoop.hbase.client.ConnectionUtils; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Get; @@ -86,35 +82,24 @@ import org.apache.hadoop.hbase.client.Row; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.VersionInfoUtil; -import org.apache.hadoop.hbase.conf.ConfigurationObserver; import org.apache.hadoop.hbase.exceptions.FailedSanityCheckException; import org.apache.hadoop.hbase.exceptions.OutOfOrderScannerNextException; import org.apache.hadoop.hbase.exceptions.ScannerResetException; import org.apache.hadoop.hbase.exceptions.UnknownProtocolException; import org.apache.hadoop.hbase.io.ByteBuffAllocator; import org.apache.hadoop.hbase.io.hfile.BlockCache; -import org.apache.hadoop.hbase.ipc.HBaseRPCErrorHandler; import org.apache.hadoop.hbase.ipc.HBaseRpcController; import org.apache.hadoop.hbase.ipc.PriorityFunction; import org.apache.hadoop.hbase.ipc.QosPriority; import org.apache.hadoop.hbase.ipc.RpcCall; import org.apache.hadoop.hbase.ipc.RpcCallContext; import org.apache.hadoop.hbase.ipc.RpcCallback; -import org.apache.hadoop.hbase.ipc.RpcScheduler; import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface; import org.apache.hadoop.hbase.ipc.RpcServerFactory; import org.apache.hadoop.hbase.ipc.RpcServerInterface; import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException; import org.apache.hadoop.hbase.ipc.ServerRpcController; -import org.apache.hadoop.hbase.log.HBaseMarkers; -import org.apache.hadoop.hbase.master.HMaster; -import org.apache.hadoop.hbase.master.MasterRpcServices; -import org.apache.hadoop.hbase.namequeues.NamedQueuePayload; -import org.apache.hadoop.hbase.namequeues.NamedQueueRecorder; -import org.apache.hadoop.hbase.namequeues.RpcLogDetails; -import org.apache.hadoop.hbase.namequeues.request.NamedQueueGetRequest; -import org.apache.hadoop.hbase.namequeues.response.NamedQueueGetResponse; import org.apache.hadoop.hbase.net.Address; import org.apache.hadoop.hbase.procedure2.RSProcedureCallable; import org.apache.hadoop.hbase.quotas.ActivePolicyEnforcement; @@ -138,13 +123,10 @@ import org.apache.hadoop.hbase.replication.ReplicationUtils; import org.apache.hadoop.hbase.replication.regionserver.RejectReplicationRequestStateChecker; import org.apache.hadoop.hbase.replication.regionserver.RejectRequestsFromClientStateChecker; import org.apache.hadoop.hbase.security.Superusers; -import org.apache.hadoop.hbase.security.User; -import org.apache.hadoop.hbase.security.access.AccessChecker; -import org.apache.hadoop.hbase.security.access.NoopAccessChecker; import org.apache.hadoop.hbase.security.access.Permission; -import org.apache.hadoop.hbase.security.access.ZKPermissionWatcher; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.DNS; +import org.apache.hadoop.hbase.util.DNS.ServerType; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil; @@ -155,7 +137,6 @@ import org.apache.hadoop.hbase.wal.WALSplitUtil; import org.apache.hadoop.hbase.wal.WALSplitUtil.MutationReplay; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.zookeeper.KeeperException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -179,8 +160,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompac import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearRegionBlockCacheRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearRegionBlockCacheResponse; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearSlowLogResponseRequest; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearSlowLogResponses; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest; @@ -210,12 +189,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWA import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SlowLogResponseRequest; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SlowLogResponses; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest; -import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WALEntry; @@ -250,7 +225,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanReques import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad; -import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameBytesPair; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameInt64Pair; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier; @@ -259,20 +233,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MapReduceProtos.ScanMet import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsResponse.TableQuotaSnapshot; -import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader; import org.apache.hadoop.hbase.shaded.protobuf.generated.RegistryProtos.ClientMetaService; -import org.apache.hadoop.hbase.shaded.protobuf.generated.RegistryProtos.GetActiveMasterRequest; -import org.apache.hadoop.hbase.shaded.protobuf.generated.RegistryProtos.GetActiveMasterResponse; -import org.apache.hadoop.hbase.shaded.protobuf.generated.RegistryProtos.GetBootstrapNodesRequest; -import org.apache.hadoop.hbase.shaded.protobuf.generated.RegistryProtos.GetBootstrapNodesResponse; -import org.apache.hadoop.hbase.shaded.protobuf.generated.RegistryProtos.GetClusterIdRequest; -import org.apache.hadoop.hbase.shaded.protobuf.generated.RegistryProtos.GetClusterIdResponse; -import org.apache.hadoop.hbase.shaded.protobuf.generated.RegistryProtos.GetMastersRequest; -import org.apache.hadoop.hbase.shaded.protobuf.generated.RegistryProtos.GetMastersResponse; -import org.apache.hadoop.hbase.shaded.protobuf.generated.RegistryProtos.GetMastersResponseEntry; -import org.apache.hadoop.hbase.shaded.protobuf.generated.RegistryProtos.GetMetaRegionLocationsRequest; -import org.apache.hadoop.hbase.shaded.protobuf.generated.RegistryProtos.GetMetaRegionLocationsResponse; -import org.apache.hadoop.hbase.shaded.protobuf.generated.TooSlowLog.SlowLogPayload; import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.BulkLoadDescriptor; import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor; import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor; @@ -282,20 +243,15 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDe * Implements the regionserver RPC services. */ @InterfaceAudience.Private -@SuppressWarnings("deprecation") -public class RSRpcServices implements HBaseRPCErrorHandler, - AdminService.BlockingInterface, ClientService.BlockingInterface, - ClientMetaService.BlockingInterface, PriorityFunction, ConfigurationObserver { +public class RSRpcServices extends HBaseRpcServicesBase + implements ClientService.BlockingInterface { + private static final Logger LOG = LoggerFactory.getLogger(RSRpcServices.class); /** RPC scheduler to use for the region server. */ public static final String REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS = "hbase.region.server.rpc.scheduler.factory.class"; - /** RPC scheduler to use for the master. */ - public static final String MASTER_RPC_SCHEDULER_FACTORY_CLASS = - "hbase.master.rpc.scheduler.factory.class"; - /** * Minimum allowable time limit delta (in milliseconds) that can be enforced during scans. This * configuration exists to prevent the scenario where a time limit is specified to be so @@ -320,10 +276,6 @@ public class RSRpcServices implements HBaseRPCErrorHandler, */ private static final boolean DEFAULT_REJECT_BATCH_ROWS_OVER_THRESHOLD = false; - public static final String CLIENT_BOOTSTRAP_NODE_LIMIT = "hbase.client.bootstrap.node.limit"; - - public static final int DEFAULT_CLIENT_BOOTSTRAP_NODE_LIMIT = 10; - // Request counter. (Includes requests that are not serviced by regions.) // Count only once for requests with multiple actions like multi/caching-scan/replayBatch final LongAdder requestCount = new LongAdder(); @@ -343,16 +295,8 @@ public class RSRpcServices implements HBaseRPCErrorHandler, // Request counter for rpc mutate final LongAdder rpcMutateRequestCount = new LongAdder(); - // Server to handle client requests. - final RpcServerInterface rpcServer; - final InetSocketAddress isa; - - protected final HRegionServer regionServer; private final long maxScannerResultSize; - // The reference to the priority extraction function - private final PriorityFunction priority; - private ScannerIdGenerator scannerIdGenerator; private final ConcurrentMap scanners = new ConcurrentHashMap<>(); // Hold the name of a closed scanner for a while. This is used to keep compatible for old clients @@ -386,9 +330,6 @@ public class RSRpcServices implements HBaseRPCErrorHandler, final AtomicBoolean clearCompactionQueues = new AtomicBoolean(false); - private AccessChecker accessChecker; - private ZKPermissionWatcher zkPermissionWatcher; - /** * Services launched in RSRpcServices. By default they are on but you can use the below * booleans to selectively enable/disable either Admin or Client Service (Rare is the case @@ -439,7 +380,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, // Rpc call and we are at end of the call now. Time to add it back. if (scanners.containsKey(scannerName)) { if (lease != null) { - regionServer.getLeaseManager().addLease(lease); + server.getLeaseManager().addLease(lease); } } } @@ -536,7 +477,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, RegionScanner s = rsh.s; HRegion region = null; try { - region = regionServer.getRegion(s.getRegionInfo().getRegionName()); + region = server.getRegion(s.getRegionInfo().getRegionName()); if (region != null && region.getCoprocessorHost() != null) { region.getCoprocessorHost().preScannerClose(s); } @@ -629,7 +570,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, int countOfCompleteMutation = 0; try { if (!region.getRegionInfo().isMetaRegion()) { - regionServer.getMemStoreFlusher().reclaimMemStoreMemory(); + server.getMemStoreFlusher().reclaimMemStoreMemory(); } List mutations = new ArrayList<>(); long nonce = HConstants.NO_NONCE; @@ -717,8 +658,8 @@ public class RSRpcServices implements HBaseRPCErrorHandler, quota.addMutation(append); long nonce = mutation.hasNonce() ? mutation.getNonce() : HConstants.NO_NONCE; Result r = region.append(append, nonceGroup, nonce); - if (regionServer.getMetrics() != null) { - regionServer.getMetrics().updateAppend(region.getTableDescriptor().getTableName(), + if (server.getMetrics() != null) { + server.getMetrics().updateAppend(region.getTableDescriptor().getTableName(), EnvironmentEdgeManager.currentTime() - before); } return r == null ? Result.EMPTY_RESULT : r; @@ -738,7 +679,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, quota.addMutation(increment); long nonce = mutation.hasNonce() ? mutation.getNonce() : HConstants.NO_NONCE; Result r = region.increment(increment, nonceGroup, nonce); - final MetricsRegionServer metricsRegionServer = regionServer.getMetrics(); + final MetricsRegionServer metricsRegionServer = server.getMetrics(); if (metricsRegionServer != null) { metricsRegionServer.updateIncrement(region.getTableDescriptor().getTableName(), EnvironmentEdgeManager.currentTime() - before); @@ -831,7 +772,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, r = region.get(get); } } finally { - final MetricsRegionServer metricsRegionServer = regionServer.getMetrics(); + final MetricsRegionServer metricsRegionServer = server.getMetrics(); if (metricsRegionServer != null) { metricsRegionServer.updateGet( region.getTableDescriptor().getTableName(), @@ -1030,7 +971,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, } if (!region.getRegionInfo().isMetaRegion()) { - regionServer.getMemStoreFlusher().reclaimMemStoreMemory(); + server.getMemStoreFlusher().reclaimMemStoreMemory(); } // HBASE-17924 @@ -1127,7 +1068,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, private void updateMutationMetrics(HRegion region, long starttime, boolean batchContainsPuts, boolean batchContainsDelete) { - final MetricsRegionServer metricsRegionServer = regionServer.getMetrics(); + final MetricsRegionServer metricsRegionServer = server.getMetrics(); if (metricsRegionServer != null) { long after = EnvironmentEdgeManager.currentTime(); if (batchContainsPuts) { @@ -1200,7 +1141,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, } requestCount.increment(); if (!region.getRegionInfo().isMetaRegion()) { - regionServer.getMemStoreFlusher().reclaimMemStoreMemory(); + server.getMemStoreFlusher().reclaimMemStoreMemory(); } return region.batchReplay(mutations.toArray( new MutationReplay[mutations.size()]), replaySeqId); @@ -1223,54 +1164,12 @@ public class RSRpcServices implements HBaseRPCErrorHandler, // Directly invoked only for testing public RSRpcServices(final HRegionServer rs) throws IOException { + super(rs, rs.getProcessName()); final Configuration conf = rs.getConfiguration(); - regionServer = rs; rowSizeWarnThreshold = conf.getInt( HConstants.BATCH_ROWS_THRESHOLD_NAME, HConstants.BATCH_ROWS_THRESHOLD_DEFAULT); rejectRowsWithSizeOverThreshold = conf.getBoolean(REJECT_BATCH_ROWS_OVER_THRESHOLD, DEFAULT_REJECT_BATCH_ROWS_OVER_THRESHOLD); - - final RpcSchedulerFactory rpcSchedulerFactory; - try { - rpcSchedulerFactory = getRpcSchedulerFactoryClass().asSubclass(RpcSchedulerFactory.class) - .getDeclaredConstructor().newInstance(); - } catch (NoSuchMethodException | InvocationTargetException | - InstantiationException | IllegalAccessException e) { - throw new IllegalArgumentException(e); - } - // Server to handle client requests. - final InetSocketAddress initialIsa; - final InetSocketAddress bindAddress; - if(this instanceof MasterRpcServices) { - String hostname = DNS.getHostname(conf, DNS.ServerType.MASTER); - int port = conf.getInt(HConstants.MASTER_PORT, HConstants.DEFAULT_MASTER_PORT); - // Creation of a HSA will force a resolve. - initialIsa = new InetSocketAddress(hostname, port); - bindAddress = new InetSocketAddress(conf.get("hbase.master.ipc.address", hostname), port); - } else { - String hostname = DNS.getHostname(conf, DNS.ServerType.REGIONSERVER); - int port = conf.getInt(HConstants.REGIONSERVER_PORT, - HConstants.DEFAULT_REGIONSERVER_PORT); - // Creation of a HSA will force a resolve. - initialIsa = new InetSocketAddress(hostname, port); - bindAddress = - new InetSocketAddress(conf.get("hbase.regionserver.ipc.address", hostname), port); - } - if (initialIsa.getAddress() == null) { - throw new IllegalArgumentException("Failed resolve of " + initialIsa); - } - priority = createPriority(); - // Using Address means we don't get the IP too. Shorten it more even to just the host name - // w/o the domain. - final String name = rs.getProcessName() + "/" + - Address.fromParts(initialIsa.getHostName(), initialIsa.getPort()).toStringWithoutDomain(); - // Set how many times to retry talking to another server over Connection. - ConnectionUtils.setServerSideHConnectionRetriesConfig(conf, name, LOG); - rpcServer = createRpcServer(rs, rpcSchedulerFactory, bindAddress, name); - rpcServer.setRsRpcServices(this); - if (!(rs instanceof HMaster)) { - rpcServer.setNamedQueueRecorder(rs.getNamedQueueRecorder()); - } scannerLeaseTimeoutPeriod = conf.getInt( HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, HConstants.DEFAULT_HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD); @@ -1283,21 +1182,44 @@ public class RSRpcServices implements HBaseRPCErrorHandler, minimumScanTimeLimitDelta = conf.getLong( REGION_SERVER_RPC_MINIMUM_SCAN_TIME_LIMIT_DELTA, DEFAULT_REGION_SERVER_RPC_MINIMUM_SCAN_TIME_LIMIT_DELTA); - - final InetSocketAddress address = rpcServer.getListenerAddress(); - if (address == null) { - throw new IOException("Listener channel is closed"); - } - // Set our address, however we need the final port that was given to rpcServer - isa = new InetSocketAddress(initialIsa.getHostName(), address.getPort()); - rpcServer.setErrorHandler(this); - rs.setName(name); - + rpcServer.setNamedQueueRecorder(rs.getNamedQueueRecorder()); closedScanners = CacheBuilder.newBuilder() .expireAfterAccess(scannerLeaseTimeoutPeriod, TimeUnit.MILLISECONDS).build(); } - protected RpcServerInterface createRpcServer( + @Override + protected boolean defaultReservoirEnabled() { + return true; + } + + @Override + protected ServerType getDNSServerType() { + return DNS.ServerType.REGIONSERVER; + } + + @Override + protected String getHostname(Configuration conf, String defaultHostname) { + return conf.get("hbase.regionserver.ipc.address", defaultHostname); + } + + @Override + protected String getPortConfigName() { + return HConstants.REGIONSERVER_PORT; + } + + @Override + protected int getDefaultPort() { + return HConstants.DEFAULT_REGIONSERVER_PORT; + } + + @Override + protected Class getRpcSchedulerFactoryClass(Configuration conf) { + return conf.getClass(REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS, + SimpleRpcSchedulerFactory.class); + } + + protected RpcServerInterface + createRpcServer( final Server server, final RpcSchedulerFactory rpcSchedulerFactory, final InetSocketAddress bindAddress, @@ -1317,28 +1239,15 @@ public class RSRpcServices implements HBaseRPCErrorHandler, } protected Class getRpcSchedulerFactoryClass() { - final Configuration conf = regionServer.getConfiguration(); + final Configuration conf = server.getConfiguration(); return conf.getClass(REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS, SimpleRpcSchedulerFactory.class); } - @Override - public void onConfigurationChange(Configuration newConf) { - if (rpcServer instanceof ConfigurationObserver) { - ((ConfigurationObserver)rpcServer).onConfigurationChange(newConf); - } - } - protected PriorityFunction createPriority() { return new RSAnnotationReadingPriorityFunction(this); } - protected void requirePermission(String request, Permission.Action perm) throws IOException { - if (accessChecker != null) { - accessChecker.requirePermission(RpcServer.getRequestUser().orElse(null), request, null, perm); - } - } - public int getScannersCount() { return scanners.size(); } @@ -1471,7 +1380,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, private RegionScannerHolder addScanner(String scannerName, RegionScanner s, Shipper shipper, HRegion r, boolean needCursor, boolean fullRegionScan) throws LeaseStillHeldException { - Lease lease = regionServer.getLeaseManager().createLease( + Lease lease = server.getLeaseManager().createLease( scannerName, this.scannerLeaseTimeoutPeriod, new ScannerListener(scannerName)); RpcCallback shippedCallback = new RegionScannerShippedCallBack(scannerName, shipper, lease); RpcCallback closeCallback = s instanceof RpcCallback? @@ -1508,7 +1417,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, */ public HRegion getRegion( final RegionSpecifier regionSpecifier) throws IOException { - return regionServer.getRegion(regionSpecifier.getValue().toByteArray()); + return server.getRegion(regionSpecifier.getValue().toByteArray()); } /** @@ -1524,7 +1433,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, List regions = Lists.newArrayListWithCapacity(regionSpecifiers.size()); for (RegionSpecifier regionSpecifier: regionSpecifiers) { try { - regions.add(regionServer.getRegion(regionSpecifier.getValue().toByteArray())); + regions.add(server.getRegion(regionSpecifier.getValue().toByteArray())); } catch (NotServingRegionException e) { stats.addException(regionSpecifier.getValue().toByteArray(), e); } @@ -1532,45 +1441,26 @@ public class RSRpcServices implements HBaseRPCErrorHandler, return regions; } - public PriorityFunction getPriority() { + PriorityFunction getPriority() { return priority; } - public Configuration getConfiguration() { - return regionServer.getConfiguration(); - } - private RegionServerRpcQuotaManager getRpcQuotaManager() { - return regionServer.getRegionServerRpcQuotaManager(); + return server.getRegionServerRpcQuotaManager(); } private RegionServerSpaceQuotaManager getSpaceQuotaManager() { - return regionServer.getRegionServerSpaceQuotaManager(); + return server.getRegionServerSpaceQuotaManager(); } void start(ZKWatcher zkWatcher) { - if (AccessChecker.isAuthorizationSupported(getConfiguration())) { - accessChecker = new AccessChecker(getConfiguration()); - } else { - accessChecker = new NoopAccessChecker(getConfiguration()); - } - zkPermissionWatcher = - new ZKPermissionWatcher(zkWatcher, accessChecker.getAuthManager(), getConfiguration()); - try { - zkPermissionWatcher.start(); - } catch (KeeperException e) { - LOG.error("ZooKeeper permission watcher initialization failed", e); - } - this.scannerIdGenerator = new ScannerIdGenerator(this.regionServer.serverName); - rpcServer.start(); + this.scannerIdGenerator = new ScannerIdGenerator(this.server.getServerName()); + internalStart(zkWatcher); } void stop() { - if (zkPermissionWatcher != null) { - zkPermissionWatcher.close(); - } closeAllScanners(); - rpcServer.stop(); + internalStop(); } /** @@ -1578,17 +1468,17 @@ public class RSRpcServices implements HBaseRPCErrorHandler, */ // TODO : Rename this and HMaster#checkInitialized to isRunning() (or a better name). protected void checkOpen() throws IOException { - if (regionServer.isAborted()) { - throw new RegionServerAbortedException("Server " + regionServer.serverName + " aborting"); + if (server.isAborted()) { + throw new RegionServerAbortedException("Server " + server.getServerName() + " aborting"); } - if (regionServer.isStopped()) { - throw new RegionServerStoppedException("Server " + regionServer.serverName + " stopping"); + if (server.isStopped()) { + throw new RegionServerStoppedException("Server " + server.getServerName() + " stopping"); } - if (!regionServer.isDataFileSystemOk()) { + if (!server.isDataFileSystemOk()) { throw new RegionServerStoppedException("File system not available"); } - if (!regionServer.isOnline()) { - throw new ServerNotRunningYetException("Server " + regionServer.serverName + if (!server.isOnline()) { + throw new ServerNotRunningYetException("Server " + server.getServerName() + " is not running yet"); } } @@ -1622,52 +1512,6 @@ public class RSRpcServices implements HBaseRPCErrorHandler, return new ImmutableList.Builder().addAll(bssi).build(); } - public InetSocketAddress getSocketAddress() { - return isa; - } - - @Override - public int getPriority(RequestHeader header, Message param, User user) { - return priority.getPriority(header, param, user); - } - - @Override - public long getDeadline(RequestHeader header, Message param) { - return priority.getDeadline(header, param); - } - - /* - * Check if an OOME and, if so, abort immediately to avoid creating more objects. - * - * @param e - * - * @return True if we OOME'd and are aborting. - */ - @Override - public boolean checkOOME(final Throwable e) { - return exitIfOOME(e); - } - - public static boolean exitIfOOME(final Throwable e ){ - boolean stop = false; - try { - if (e instanceof OutOfMemoryError - || (e.getCause() != null && e.getCause() instanceof OutOfMemoryError) - || (e.getMessage() != null && e.getMessage().contains( - "java.lang.OutOfMemoryError"))) { - stop = true; - LOG.error(HBaseMarkers.FATAL, "Run out of memory; " - + RSRpcServices.class.getSimpleName() + " will abort itself immediately", - e); - } - } finally { - if (stop) { - Runtime.getRuntime().halt(1); - } - } - return stop; - } - /** * Close a region on the region server. * @@ -1693,7 +1537,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, } else { LOG.info("Close " + encodedRegionName + ", moving to " + sn); } - boolean closed = regionServer.closeRegion(encodedRegionName, false, sn); + boolean closed = server.closeRegion(encodedRegionName, false, sn); CloseRegionResponse.Builder builder = CloseRegionResponse.newBuilder().setClosed(closed); return builder.build(); } catch (IOException ie) { @@ -1720,7 +1564,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, // and a quota policy is enforced that disables compactions. if (QuotaUtil.isQuotaEnabled(getConfiguration()) && !Superusers.isSuperUser(RpcServer.getRequestUser().orElse(null)) && - this.regionServer.getRegionServerSpaceQuotaManager() + this.server.getRegionServerSpaceQuotaManager() .areCompactionsDisabled(region.getTableDescriptor().getTableName())) { throw new DoNotRetryIOException( "Compactions on this region are " + "disabled due to a space quota violation."); @@ -1752,7 +1596,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, public CompactionSwitchResponse compactionSwitch(RpcController controller, CompactionSwitchRequest request) throws ServiceException { rpcPreCheck("compactionSwitch"); - final CompactSplit compactSplitThread = regionServer.getCompactSplitThread(); + final CompactSplit compactSplitThread = server.getCompactSplitThread(); requestCount.increment(); boolean prevState = compactSplitThread.isCompactionsEnabled(); CompactionSwitchResponse response = @@ -1801,7 +1645,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, } boolean compactionNeeded = flushResult.isCompactionNeeded(); if (compactionNeeded) { - regionServer.getCompactSplitThread().requestSystemCompaction(region, + server.getCompactSplitThread().requestSystemCompaction(region, "Compaction through user triggered flush"); } builder.setFlushed(flushResult.isFlushSucceeded()); @@ -1814,7 +1658,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, // section, we get a DroppedSnapshotException and a replay of wal // is required. Currently the only way to do this is a restart of // the server. - regionServer.abort("Replay of WAL required. Forcing server shutdown", ex); + server.abort("Replay of WAL required. Forcing server shutdown", ex); throw new ServiceException(ex); } catch (IOException ie) { throw new ServiceException(ie); @@ -1828,7 +1672,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, try { checkOpen(); requestCount.increment(); - Map onlineRegions = regionServer.getOnlineRegions(); + Map onlineRegions = server.getOnlineRegions(); List list = new ArrayList<>(onlineRegions.size()); for (HRegion region: onlineRegions.values()) { list.add(region.getRegionInfo()); @@ -1888,9 +1732,9 @@ public class RSRpcServices implements HBaseRPCErrorHandler, List regions; if (request.hasTableName()) { TableName tableName = ProtobufUtil.toTableName(request.getTableName()); - regions = regionServer.getRegions(tableName); + regions = server.getRegions(tableName); } else { - regions = regionServer.getRegions(); + regions = server.getRegions(); } List rLoads = new ArrayList<>(regions.size()); RegionLoad.Builder regionLoadBuilder = ClusterStatusProtos.RegionLoad.newBuilder(); @@ -1898,7 +1742,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, try { for (HRegion region : regions) { - rLoads.add(regionServer.createRegionLoad(region, regionLoadBuilder, regionSpecifier)); + rLoads.add(server.createRegionLoad(region, regionLoadBuilder, regionSpecifier)); } } catch (IOException e) { throw new ServiceException(e); @@ -1917,10 +1761,10 @@ public class RSRpcServices implements HBaseRPCErrorHandler, ClearCompactionQueuesResponse.Builder respBuilder = ClearCompactionQueuesResponse.newBuilder(); requestCount.increment(); if (clearCompactionQueues.compareAndSet(false,true)) { - final CompactSplit compactSplitThread = regionServer.getCompactSplitThread(); + final CompactSplit compactSplitThread = server.getCompactSplitThread(); try { checkOpen(); - regionServer.getRegionServerCoprocessorHost().preClearCompactionQueues(); + server.getRegionServerCoprocessorHost().preClearCompactionQueues(); for (String queueName : request.getQueueNameList()) { LOG.debug("clear " + queueName + " compaction queue"); switch (queueName) { @@ -1935,7 +1779,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, throw new IOException("Unknown queue name " + queueName); } } - regionServer.getRegionServerCoprocessorHost().postClearCompactionQueues(); + server.getRegionServerCoprocessorHost().postClearCompactionQueues(); } catch (IOException ie) { throw new ServiceException(ie); } finally { @@ -1964,8 +1808,8 @@ public class RSRpcServices implements HBaseRPCErrorHandler, throw new ServiceException(ie); } requestCount.increment(); - int infoPort = regionServer.infoServer != null ? regionServer.infoServer.getPort() : -1; - return ResponseConverter.buildGetServerInfoResponse(regionServer.serverName, infoPort); + int infoPort = server.getInfoServer() != null ? server.getInfoServer().getPort() : -1; + return ResponseConverter.buildGetServerInfoResponse(server.getServerName(), infoPort); } @Override @@ -2014,10 +1858,10 @@ public class RSRpcServices implements HBaseRPCErrorHandler, private void throwOnWrongStartCode(long serverStartCode) throws ServiceException { // check that we are the same server that this RPC is intended for. - if (regionServer.serverName.getStartcode() != serverStartCode) { + if (server.getServerName().getStartcode() != serverStartCode) { throw new ServiceException(new DoNotRetryIOException( "This RPC was intended for a " + "different server with startCode: " + serverStartCode + - ", this server is: " + regionServer.serverName)); + ", this server is: " + server.getServerName())); } } @@ -2082,14 +1926,14 @@ public class RSRpcServices implements HBaseRPCErrorHandler, throw new ServiceException(ie); } // We are assigning meta, wait a little for regionserver to finish initialization. - int timeout = regionServer.getConfiguration().getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, + int timeout = server.getConfiguration().getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT) >> 2; // Quarter of RPC timeout long endTime = EnvironmentEdgeManager.currentTime() + timeout; - synchronized (regionServer.online) { + synchronized (server.online) { try { while (EnvironmentEdgeManager.currentTime() <= endTime - && !regionServer.isStopped() && !regionServer.isOnline()) { - regionServer.online.wait(regionServer.msgInterval); + && !server.isStopped() && !server.isOnline()) { + server.online.wait(server.getMsgInterval()); } checkOpen(); } catch (InterruptedException t) { @@ -2109,31 +1953,31 @@ public class RSRpcServices implements HBaseRPCErrorHandler, try { String encodedName = region.getEncodedName(); byte[] encodedNameBytes = region.getEncodedNameAsBytes(); - final HRegion onlineRegion = regionServer.getRegion(encodedName); + final HRegion onlineRegion = server.getRegion(encodedName); if (onlineRegion != null) { // The region is already online. This should not happen any more. String error = "Received OPEN for the region:" + region.getRegionNameAsString() + ", which is already online"; LOG.warn(error); - //regionServer.abort(error); + //server.abort(error); //throw new IOException(error); builder.addOpeningState(RegionOpeningState.OPENED); continue; } LOG.info("Open " + region.getRegionNameAsString()); - final Boolean previous = regionServer.getRegionsInTransitionInRS().putIfAbsent( + final Boolean previous = server.getRegionsInTransitionInRS().putIfAbsent( encodedNameBytes, Boolean.TRUE); if (Boolean.FALSE.equals(previous)) { - if (regionServer.getRegion(encodedName) != null) { + if (server.getRegion(encodedName) != null) { // There is a close in progress. This should not happen any more. String error = "Received OPEN for the region:" + region.getRegionNameAsString() + ", which we are already trying to CLOSE"; - regionServer.abort(error); + server.abort(error); throw new IOException(error); } - regionServer.getRegionsInTransitionInRS().put(encodedNameBytes, Boolean.TRUE); + server.getRegionsInTransitionInRS().put(encodedNameBytes, Boolean.TRUE); } if (Boolean.TRUE.equals(previous)) { @@ -2145,12 +1989,12 @@ public class RSRpcServices implements HBaseRPCErrorHandler, // We are opening this region. If it moves back and forth for whatever reason, we don't // want to keep returning the stale moved record while we are opening/if we close again. - regionServer.removeFromMovedRegions(region.getEncodedName()); + server.removeFromMovedRegions(region.getEncodedName()); if (previous == null || !previous.booleanValue()) { htd = htds.get(region.getTable()); if (htd == null) { - htd = regionServer.tableDescriptors.get(region.getTable()); + htd = server.getTableDescriptors().get(region.getTable()); htds.put(region.getTable(), htd); } if (htd == null) { @@ -2158,23 +2002,23 @@ public class RSRpcServices implements HBaseRPCErrorHandler, } // If there is no action in progress, we can submit a specific handler. // Need to pass the expected version in the constructor. - if (regionServer.executorService == null) { + if (server.getExecutorService() == null) { LOG.info("No executor executorService; skipping open request"); } else { if (region.isMetaRegion()) { - regionServer.executorService.submit(new OpenMetaHandler( - regionServer, regionServer, region, htd, masterSystemTime)); + server.getExecutorService() + .submit(new OpenMetaHandler(server, server, region, htd, masterSystemTime)); } else { if (regionOpenInfo.getFavoredNodesCount() > 0) { - regionServer.updateRegionFavoredNodesMapping(region.getEncodedName(), - regionOpenInfo.getFavoredNodesList()); + server.updateRegionFavoredNodesMapping(region.getEncodedName(), + regionOpenInfo.getFavoredNodesList()); } if (htd.getPriority() >= HConstants.ADMIN_QOS || region.getTable().isSystemTable()) { - regionServer.executorService.submit(new OpenPriorityRegionHandler( - regionServer, regionServer, region, htd, masterSystemTime)); + server.getExecutorService().submit( + new OpenPriorityRegionHandler(server, server, region, htd, masterSystemTime)); } else { - regionServer.executorService.submit(new OpenRegionHandler( - regionServer, regionServer, region, htd, masterSystemTime)); + server.getExecutorService() + .submit(new OpenRegionHandler(server, server, region, htd, masterSystemTime)); } } } @@ -2207,19 +2051,19 @@ public class RSRpcServices implements HBaseRPCErrorHandler, checkOpen(); String encodedName = region.getEncodedName(); byte[] encodedNameBytes = region.getEncodedNameAsBytes(); - final HRegion onlineRegion = regionServer.getRegion(encodedName); + final HRegion onlineRegion = server.getRegion(encodedName); if (onlineRegion != null) { LOG.info("{} is online; skipping warmup", region); return response; } - TableDescriptor htd = regionServer.tableDescriptors.get(region.getTable()); - if (regionServer.getRegionsInTransitionInRS().containsKey(encodedNameBytes)) { + TableDescriptor htd = server.getTableDescriptors().get(region.getTable()); + if (server.getRegionsInTransitionInRS().containsKey(encodedNameBytes)) { LOG.info("{} is in transition; skipping warmup", region); return response; } LOG.info("Warmup {}", region.getRegionNameAsString()); - HRegion.warmupHRegion(region, htd, regionServer.getWAL(region), - regionServer.getConfiguration(), regionServer, null); + HRegion.warmupHRegion(region, htd, server.getWAL(region), + server.getConfiguration(), server, null); } catch (IOException ie) { LOG.error("Failed warmup of {}", region.getRegionNameAsString(), ie); throw new ServiceException(ie); @@ -2250,7 +2094,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, return ReplicateWALEntryResponse.newBuilder().build(); } ByteString regionName = entries.get(0).getKey().getEncodedRegionName(); - HRegion region = regionServer.getRegionByEncodedName(regionName.toStringUtf8()); + HRegion region = server.getRegionByEncodedName(regionName.toStringUtf8()); RegionCoprocessorHost coprocessorHost = ServerRegionReplicaUtil.isDefaultReplica(region.getRegionInfo()) ? region.getCoprocessorHost() @@ -2267,11 +2111,11 @@ public class RSRpcServices implements HBaseRPCErrorHandler, "regions. First region:" + regionName.toStringUtf8() + " , other region:" + entry.getKey().getEncodedRegionName()); } - if (regionServer.nonceManager != null && isPrimary) { + if (server.nonceManager != null && isPrimary) { long nonceGroup = entry.getKey().hasNonceGroup() ? entry.getKey().getNonceGroup() : HConstants.NO_NONCE; long nonce = entry.getKey().hasNonce() ? entry.getKey().getNonce() : HConstants.NO_NONCE; - regionServer.nonceManager.reportOperationFromWal( + server.nonceManager.reportOperationFromWal( nonceGroup, nonce, entry.getKey().getWriteTime()); @@ -2321,7 +2165,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, } catch (IOException ie) { throw new ServiceException(ie); } finally { - final MetricsRegionServer metricsRegionServer = regionServer.getMetrics(); + final MetricsRegionServer metricsRegionServer = server.getMetrics(); if (metricsRegionServer != null) { metricsRegionServer.updateReplay(EnvironmentEdgeManager.currentTime() - before); } @@ -2329,7 +2173,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, } private void checkShouldRejectReplicationRequest(List entries) throws IOException { - ReplicationSourceService replicationSource = regionServer.getReplicationSourceService(); + ReplicationSourceService replicationSource = server.getReplicationSourceService(); if (replicationSource == null || entries.isEmpty()) { return; } @@ -2357,16 +2201,16 @@ public class RSRpcServices implements HBaseRPCErrorHandler, final ReplicateWALEntryRequest request) throws ServiceException { try { checkOpen(); - if (regionServer.getReplicationSinkService() != null) { + if (server.getReplicationSinkService() != null) { requestCount.increment(); List entries = request.getEntryList(); checkShouldRejectReplicationRequest(entries); CellScanner cellScanner = ((HBaseRpcController) controller).cellScanner(); - regionServer.getRegionServerCoprocessorHost().preReplicateLogEntries(); - regionServer.getReplicationSinkService().replicateLogEntries(entries, cellScanner, + server.getRegionServerCoprocessorHost().preReplicateLogEntries(); + server.getReplicationSinkService().replicateLogEntries(entries, cellScanner, request.getReplicationClusterId(), request.getSourceBaseNamespaceDirPath(), request.getSourceHFileArchiveDirPath()); - regionServer.getRegionServerCoprocessorHost().postReplicateLogEntries(); + server.getRegionServerCoprocessorHost().postReplicateLogEntries(); return ReplicateWALEntryResponse.newBuilder().build(); } else { throw new ServiceException("Replication services are not initialized yet"); @@ -2388,9 +2232,9 @@ public class RSRpcServices implements HBaseRPCErrorHandler, try { checkOpen(); requestCount.increment(); - regionServer.getRegionServerCoprocessorHost().preRollWALWriterRequest(); - regionServer.getWalRoller().requestRollAll(); - regionServer.getRegionServerCoprocessorHost().postRollWALWriterRequest(); + server.getRegionServerCoprocessorHost().preRollWALWriterRequest(); + server.getWalRoller().requestRollAll(); + server.getRegionServerCoprocessorHost().postRollWALWriterRequest(); RollWALWriterResponse.Builder builder = RollWALWriterResponse.newBuilder(); return builder.build(); } catch (IOException ie) { @@ -2413,7 +2257,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, rpcPreCheck("stopServer"); requestCount.increment(); String reason = request.getReason(); - regionServer.stop(reason); + server.stop(reason); return StopServerResponse.newBuilder().build(); } @@ -2426,7 +2270,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, for (UpdateFavoredNodesRequest.RegionUpdateInfo regionUpdateInfo : openInfoList) { RegionInfo hri = ProtobufUtil.toRegionInfo(regionUpdateInfo.getRegion()); if (regionUpdateInfo.getFavoredNodesCount() > 0) { - regionServer.updateRegionFavoredNodesMapping(hri.getEncodedName(), + server.updateRegionFavoredNodesMapping(hri.getEncodedName(), regionUpdateInfo.getFavoredNodesList()); } } @@ -2444,10 +2288,10 @@ public class RSRpcServices implements HBaseRPCErrorHandler, final BulkLoadHFileRequest request) throws ServiceException { long start = EnvironmentEdgeManager.currentTime(); List clusterIds = new ArrayList<>(request.getClusterIdsList()); - if(clusterIds.contains(this.regionServer.clusterId)){ + if(clusterIds.contains(this.server.getClusterId())){ return BulkLoadHFileResponse.newBuilder().setLoaded(true).build(); } else { - clusterIds.add(this.regionServer.clusterId); + clusterIds.add(this.server.getClusterId()); } try { checkOpen(); @@ -2473,7 +2317,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, } // secure bulk load Map> map = - regionServer.getSecureBulkLoadManager().secureBulkLoadHFiles(region, request, clusterIds); + server.getSecureBulkLoadManager().secureBulkLoadHFiles(region, request, clusterIds); BulkLoadHFileResponse.Builder builder = BulkLoadHFileResponse.newBuilder(); builder.setLoaded(map != null); if (map != null) { @@ -2493,7 +2337,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, } catch (IOException ie) { throw new ServiceException(ie); } finally { - final MetricsRegionServer metricsRegionServer = regionServer.getMetrics(); + final MetricsRegionServer metricsRegionServer = server.getMetrics(); if (metricsRegionServer != null) { metricsRegionServer.updateBulkLoad(EnvironmentEdgeManager.currentTime() - start); } @@ -2509,7 +2353,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, HRegion region = getRegion(request.getRegion()); - String bulkToken = regionServer.getSecureBulkLoadManager().prepareBulkLoad(region, request); + String bulkToken = server.getSecureBulkLoadManager().prepareBulkLoad(region, request); PrepareBulkLoadResponse.Builder builder = PrepareBulkLoadResponse.newBuilder(); builder.setBulkToken(bulkToken); return builder.build(); @@ -2527,7 +2371,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, HRegion region = getRegion(request.getRegion()); - regionServer.getSecureBulkLoadManager().cleanupBulkLoad(region, request); + server.getSecureBulkLoadManager().cleanupBulkLoad(region, request); return CleanupBulkLoadResponse.newBuilder().build(); } catch (IOException ie) { throw new ServiceException(ie); @@ -2558,10 +2402,10 @@ public class RSRpcServices implements HBaseRPCErrorHandler, private FileSystem getFileSystem(List filePaths) throws IOException { if (filePaths.isEmpty()) { // local hdfs - return regionServer.getFileSystem(); + return server.getFileSystem(); } // source hdfs - return new Path(filePaths.get(0)).getFileSystem(regionServer.getConfiguration()); + return new Path(filePaths.get(0)).getFileSystem(server.getConfiguration()); } private Message execServiceOnRegion(HRegion region, @@ -2573,7 +2417,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, private boolean shouldRejectRequestsFromClient(HRegion region) { TableName table = region.getRegionInfo().getTable(); - ReplicationSourceService service = regionServer.getReplicationSourceService(); + ReplicationSourceService service = server.getReplicationSourceService(); return service != null && service.getSyncReplicationPeerInfoProvider() .checkState(table, RejectRequestsFromClientStateChecker.get()); } @@ -2665,7 +2509,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, } catch (IOException ie) { throw new ServiceException(ie); } finally { - final MetricsRegionServer metricsRegionServer = regionServer.getMetrics(); + final MetricsRegionServer metricsRegionServer = server.getMetrics(); if (metricsRegionServer != null) { TableDescriptor td = region != null? region.getTableDescriptor(): null; if (td != null) { @@ -3079,7 +2923,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, MutateResponse.Builder builder = MutateResponse.newBuilder(); MutationProto mutation = request.getMutation(); if (!region.getRegionInfo().isMetaRegion()) { - regionServer.getMemStoreFlusher().reclaimMemStoreMemory(); + server.getMemStoreFlusher().reclaimMemStoreMemory(); } long nonceGroup = request.hasNonceGroup() ? request.getNonceGroup() : HConstants.NO_NONCE; quota = getRpcQuotaManager().checkQuota(region, OperationQuota.OperationType.MUTATE); @@ -3130,7 +2974,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, } return builder.build(); } catch (IOException ie) { - regionServer.checkFileSystem(); + server.checkFileSystem(); throw new ServiceException(ie); } finally { if (quota != null) { @@ -3148,7 +2992,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, quota.addMutation(put); region.put(put); - MetricsRegionServer metricsRegionServer = regionServer.getMetrics(); + MetricsRegionServer metricsRegionServer = server.getMetrics(); if (metricsRegionServer != null) { long after = EnvironmentEdgeManager.currentTime(); metricsRegionServer.updatePut(region.getRegionInfo().getTable(), after - before); @@ -3164,7 +3008,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, quota.addMutation(delete); region.delete(delete); - MetricsRegionServer metricsRegionServer = regionServer.getMetrics(); + MetricsRegionServer metricsRegionServer = server.getMetrics(); if (metricsRegionServer != null) { long after = EnvironmentEdgeManager.currentTime(); metricsRegionServer.updateDelete(region.getRegionInfo().getTable(), after - before); @@ -3192,7 +3036,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, result = region.getCoprocessorHost().postCheckAndMutate(checkAndMutate, result); } } - MetricsRegionServer metricsRegionServer = regionServer.getMetrics(); + MetricsRegionServer metricsRegionServer = server.getMetrics(); if (metricsRegionServer != null) { long after = EnvironmentEdgeManager.currentTime(); metricsRegionServer.updateCheckAndMutate( @@ -3250,7 +3094,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, rejectIfInStandByState(rsh.r); RegionInfo hri = rsh.s.getRegionInfo(); // Yes, should be the same instance - if (regionServer.getOnlineRegion(hri.getRegionName()) != rsh.r) { + if (server.getOnlineRegion(hri.getRegionName()) != rsh.r) { String msg = "Region has changed on the scanner " + scannerName + ": regionName=" + hri.getRegionNameAsString() + ", scannerRegionName=" + rsh.r; LOG.warn(msg + ", closing..."); @@ -3261,7 +3105,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, LOG.warn("Getting exception closing " + scannerName, e); } finally { try { - regionServer.getLeaseManager().cancelLease(scannerName); + server.getLeaseManager().cancelLease(scannerName); } catch (LeaseException e) { LOG.warn("Getting exception closing " + scannerName, e); } @@ -3345,7 +3189,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, private void addScannerLeaseBack(LeaseManager.Lease lease) { try { - regionServer.getLeaseManager().addLease(lease); + server.getLeaseManager().addLease(lease); } catch (LeaseStillHeldException e) { // should not happen as the scanner id is unique. throw new AssertionError(e); @@ -3568,7 +3412,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, long end = EnvironmentEdgeManager.currentTime(); long responseCellSize = context != null ? context.getResponseCellSize() : 0; region.getMetrics().updateScanTime(end - before); - final MetricsRegionServer metricsRegionServer = regionServer.getMetrics(); + final MetricsRegionServer metricsRegionServer = server.getMetrics(); if (metricsRegionServer != null) { metricsRegionServer.updateScanSize( region.getTableDescriptor().getTableName(), responseCellSize); @@ -3611,7 +3455,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, LOG.debug( "Server shutting down and client tried to access missing scanner " + scannerName); } - final LeaseManager leaseManager = regionServer.getLeaseManager(); + final LeaseManager leaseManager = server.getLeaseManager(); if (leaseManager != null) { try { leaseManager.cancelLease(scannerName); @@ -3659,7 +3503,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, try { // Remove lease while its being processed in server; protects against case // where processing of request takes > lease expiration time. or null if none found. - lease = regionServer.getLeaseManager().removeLease(scannerName); + lease = server.getLeaseManager().removeLease(scannerName); } catch (LeaseException e) { throw new ServiceException(e); } @@ -3860,20 +3704,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, public CoprocessorServiceResponse execRegionServerService(RpcController controller, CoprocessorServiceRequest request) throws ServiceException { rpcPreCheck("execRegionServerService"); - return regionServer.execRegionServerService(controller, request); - } - - @Override - public UpdateConfigurationResponse updateConfiguration( - RpcController controller, UpdateConfigurationRequest request) - throws ServiceException { - try { - requirePermission("updateConfiguration", Permission.Action.ADMIN); - this.regionServer.updateConfiguration(); - } catch (Exception e) { - throw new ServiceException(e); - } - return UpdateConfigurationResponse.getDefaultInstance(); + return server.execRegionServerService(controller, request); } @Override @@ -3881,7 +3712,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, RpcController controller, GetSpaceQuotaSnapshotsRequest request) throws ServiceException { try { final RegionServerSpaceQuotaManager manager = - regionServer.getRegionServerSpaceQuotaManager(); + server.getRegionServerSpaceQuotaManager(); final GetSpaceQuotaSnapshotsResponse.Builder builder = GetSpaceQuotaSnapshotsResponse.newBuilder(); if (manager != null) { @@ -3909,12 +3740,12 @@ public class RSRpcServices implements HBaseRPCErrorHandler, List regions = getRegions(request.getRegionList(), stats); for (HRegion region : regions) { try { - stats = stats.append(this.regionServer.clearRegionBlockCache(region)); + stats = stats.append(this.server.clearRegionBlockCache(region)); } catch (Exception e) { stats.addException(region.getRegionInfo().getRegionName(), e); } } - stats.withMaxCacheSize(regionServer.getBlockCache().map(BlockCache::getMaxSize).orElse(0L)); + stats.withMaxCacheSize(server.getBlockCache().map(BlockCache::getMaxSize).orElse(0L)); return builder.setStats(ProtobufUtil.toCacheEvictionStats(stats.build())).build(); } @@ -3926,7 +3757,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, TableDescriptor tableDesc = tdCache.get(regionInfo.getTable()); if (tableDesc == null) { try { - tableDesc = regionServer.getTableDescriptors().get(regionInfo.getTable()); + tableDesc = server.getTableDescriptors().get(regionInfo.getTable()); } catch (IOException e) { // Here we do not fail the whole method since we also need deal with other // procedures, and we can not ignore this one, so we still schedule a @@ -3937,13 +3768,13 @@ public class RSRpcServices implements HBaseRPCErrorHandler, } } if (regionOpenInfo.getFavoredNodesCount() > 0) { - regionServer.updateRegionFavoredNodesMapping(regionInfo.getEncodedName(), + server.updateRegionFavoredNodesMapping(regionInfo.getEncodedName(), regionOpenInfo.getFavoredNodesList()); } long procId = regionOpenInfo.getOpenProcId(); - if (regionServer.submitRegionProcedure(procId)) { - regionServer.executorService.submit(AssignRegionHandler - .create(regionServer, regionInfo, procId, tableDesc, + if (server.submitRegionProcedure(procId)) { + server.getExecutorService().submit(AssignRegionHandler + .create(server, regionInfo, procId, tableDesc, masterSystemTime)); } } @@ -3960,9 +3791,9 @@ public class RSRpcServices implements HBaseRPCErrorHandler, ProtobufUtil.toServerName(request.getDestinationServer()) : null; long procId = request.getCloseProcId(); - if (regionServer.submitRegionProcedure(procId)) { - regionServer.executorService.submit(UnassignRegionHandler - .create(regionServer, encodedName, procId, false, destination)); + if (server.submitRegionProcedure(procId)) { + server.getExecutorService().submit(UnassignRegionHandler + .create(server, encodedName, procId, false, destination)); } } @@ -3974,12 +3805,12 @@ public class RSRpcServices implements HBaseRPCErrorHandler, } catch (Exception e) { LOG.warn("Failed to instantiating remote procedure {}, pid={}", request.getProcClass(), request.getProcId(), e); - regionServer.remoteProcedureComplete(request.getProcId(), e); + server.remoteProcedureComplete(request.getProcId(), e); return; } - callable.init(request.getProcData().toByteArray(), regionServer); + callable.init(request.getProcData().toByteArray(), server); LOG.debug("Executing remote procedure {}, pid={}", callable.getClass(), request.getProcId()); - regionServer.executeProcedure(request.getProcId(), callable); + server.executeProcedure(request.getProcId(), callable); } @Override @@ -3989,7 +3820,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, try { checkOpen(); throwOnWrongStartCode(request); - regionServer.getRegionServerCoprocessorHost().preExecuteProcedures(); + server.getRegionServerCoprocessorHost().preExecuteProcedures(); if (request.getOpenRegionCount() > 0) { // Avoid reading from the TableDescritor every time(usually it will read from the file // system) @@ -4002,137 +3833,10 @@ public class RSRpcServices implements HBaseRPCErrorHandler, if (request.getProcCount() > 0) { request.getProcList().forEach(this::executeProcedures); } - regionServer.getRegionServerCoprocessorHost().postExecuteProcedures(); + server.getRegionServerCoprocessorHost().postExecuteProcedures(); return ExecuteProceduresResponse.getDefaultInstance(); } catch (IOException e) { throw new ServiceException(e); } } - - private List getSlowLogPayloads(SlowLogResponseRequest request, - NamedQueueRecorder namedQueueRecorder) { - if (namedQueueRecorder == null) { - return Collections.emptyList(); - } - List slowLogPayloads; - NamedQueueGetRequest namedQueueGetRequest = new NamedQueueGetRequest(); - namedQueueGetRequest.setNamedQueueEvent(RpcLogDetails.SLOW_LOG_EVENT); - namedQueueGetRequest.setSlowLogResponseRequest(request); - NamedQueueGetResponse namedQueueGetResponse = - namedQueueRecorder.getNamedQueueRecords(namedQueueGetRequest); - slowLogPayloads = namedQueueGetResponse != null ? - namedQueueGetResponse.getSlowLogPayloads() : - Collections.emptyList(); - return slowLogPayloads; - } - - @Override - @QosPriority(priority = HConstants.ADMIN_QOS) - public ClearSlowLogResponses clearSlowLogsResponses(final RpcController controller, - final ClearSlowLogResponseRequest request) throws ServiceException { - rpcPreCheck("clearSlowLogsResponses"); - final NamedQueueRecorder namedQueueRecorder = - this.regionServer.getNamedQueueRecorder(); - boolean slowLogsCleaned = Optional.ofNullable(namedQueueRecorder) - .map(queueRecorder -> - queueRecorder.clearNamedQueue(NamedQueuePayload.NamedQueueEvent.SLOW_LOG)) - .orElse(false); - ClearSlowLogResponses clearSlowLogResponses = ClearSlowLogResponses.newBuilder() - .setIsCleaned(slowLogsCleaned) - .build(); - return clearSlowLogResponses; - } - - @Override - @QosPriority(priority = HConstants.ADMIN_QOS) - public HBaseProtos.LogEntry getLogEntries(RpcController controller, - HBaseProtos.LogRequest request) throws ServiceException { - try { - final String logClassName = request.getLogClassName(); - Class logClass = Class.forName(logClassName) - .asSubclass(Message.class); - Method method = logClass.getMethod("parseFrom", ByteString.class); - if (logClassName.contains("SlowLogResponseRequest")) { - SlowLogResponseRequest slowLogResponseRequest = - (SlowLogResponseRequest) method.invoke(null, request.getLogMessage()); - final NamedQueueRecorder namedQueueRecorder = - this.regionServer.getNamedQueueRecorder(); - final List slowLogPayloads = - getSlowLogPayloads(slowLogResponseRequest, namedQueueRecorder); - SlowLogResponses slowLogResponses = SlowLogResponses.newBuilder() - .addAllSlowLogPayloads(slowLogPayloads) - .build(); - return HBaseProtos.LogEntry.newBuilder() - .setLogClassName(slowLogResponses.getClass().getName()) - .setLogMessage(slowLogResponses.toByteString()).build(); - } - } catch (ClassNotFoundException | NoSuchMethodException | IllegalAccessException - | InvocationTargetException e) { - LOG.error("Error while retrieving log entries.", e); - throw new ServiceException(e); - } - throw new ServiceException("Invalid request params"); - } - - public RpcScheduler getRpcScheduler() { - return rpcServer.getScheduler(); - } - - protected AccessChecker getAccessChecker() { - return accessChecker; - } - - protected ZKPermissionWatcher getZkPermissionWatcher() { - return zkPermissionWatcher; - } - - @Override - public GetClusterIdResponse getClusterId(RpcController controller, GetClusterIdRequest request) - throws ServiceException { - return GetClusterIdResponse.newBuilder().setClusterId(regionServer.getClusterId()).build(); - } - - @Override - public GetActiveMasterResponse getActiveMaster(RpcController controller, - GetActiveMasterRequest request) throws ServiceException { - GetActiveMasterResponse.Builder builder = GetActiveMasterResponse.newBuilder(); - regionServer.getActiveMaster() - .ifPresent(name -> builder.setServerName(ProtobufUtil.toServerName(name))); - return builder.build(); - } - - @Override - public GetMastersResponse getMasters(RpcController controller, GetMastersRequest request) - throws ServiceException { - GetMastersResponse.Builder builder = GetMastersResponse.newBuilder(); - regionServer.getActiveMaster() - .ifPresent(activeMaster -> builder.addMasterServers(GetMastersResponseEntry.newBuilder() - .setServerName(ProtobufUtil.toServerName(activeMaster)).setIsActive(true))); - regionServer.getBackupMasters() - .forEach(backupMaster -> builder.addMasterServers(GetMastersResponseEntry.newBuilder() - .setServerName(ProtobufUtil.toServerName(backupMaster)).setIsActive(false))); - return builder.build(); - } - - @Override - public GetMetaRegionLocationsResponse getMetaRegionLocations(RpcController controller, - GetMetaRegionLocationsRequest request) throws ServiceException { - GetMetaRegionLocationsResponse.Builder builder = GetMetaRegionLocationsResponse.newBuilder(); - regionServer.getMetaLocations() - .forEach(location -> builder.addMetaLocations(ProtobufUtil.toRegionLocation(location))); - return builder.build(); - } - - @Override - public final GetBootstrapNodesResponse getBootstrapNodes(RpcController controller, - GetBootstrapNodesRequest request) throws ServiceException { - List bootstrapNodes = new ArrayList<>(regionServer.getRegionServers()); - Collections.shuffle(bootstrapNodes, ThreadLocalRandom.current()); - int maxNodeCount = regionServer.getConfiguration().getInt(CLIENT_BOOTSTRAP_NODE_LIMIT, - DEFAULT_CLIENT_BOOTSTRAP_NODE_LIMIT); - GetBootstrapNodesResponse.Builder builder = GetBootstrapNodesResponse.newBuilder(); - bootstrapNodes.stream().limit(maxNodeCount).map(ProtobufUtil::toServerName) - .forEach(builder::addServerName); - return builder.build(); - } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java index 11222a5dd24..fb13abb8da0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.replication.regionserver; import static org.apache.hadoop.hbase.wal.AbstractFSWALProvider.findArchivedLog; + import java.io.FileNotFoundException; import java.io.IOException; import java.lang.reflect.InvocationTargetException; @@ -46,7 +47,6 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableDescriptors; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.regionserver.HRegionServer; -import org.apache.hadoop.hbase.regionserver.RSRpcServices; import org.apache.hadoop.hbase.regionserver.RegionServerCoprocessorHost; import org.apache.hadoop.hbase.replication.ChainWALEntryFilter; import org.apache.hadoop.hbase.replication.ClusterMarkingEntryFilter; @@ -58,6 +58,7 @@ import org.apache.hadoop.hbase.replication.ReplicationQueueStorage; import org.apache.hadoop.hbase.replication.SystemTableWALEntryFilter; import org.apache.hadoop.hbase.replication.WALEntryFilter; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.OOMEChecker; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; @@ -65,6 +66,7 @@ import org.apache.hadoop.hbase.wal.WAL.Entry; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.collect.Lists; /** @@ -428,7 +430,7 @@ public class ReplicationSource implements ReplicationSourceInterface { protected final void uncaughtException(Thread t, Throwable e, ReplicationSourceManager manager, String peerId) { - RSRpcServices.exitIfOOME(e); + OOMEChecker.exitIfOOME(e, getClass().getSimpleName()); LOG.error("Unexpected exception in {} currentPath={}", t.getName(), getCurrentPath(), e); if(abortOnError){ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessChecker.java index 7282a1fe87f..6a2308c61ce 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessChecker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessChecker.java @@ -28,7 +28,6 @@ import java.util.List; import java.util.Map; import java.util.Set; import java.util.TreeMap; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.AuthUtil; import org.apache.hadoop.hbase.Cell; @@ -51,6 +50,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableSet; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/NettyEventLoopGroupConfig.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/NettyEventLoopGroupConfig.java index 3e247f3061e..346f3df5183 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/NettyEventLoopGroupConfig.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/NettyEventLoopGroupConfig.java @@ -29,6 +29,8 @@ import org.apache.hbase.thirdparty.io.netty.channel.socket.nio.NioSocketChannel; import org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultThreadFactory; import java.util.concurrent.ThreadFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.ipc.NettyRpcClientConfigHelper; +import org.apache.hadoop.hbase.wal.NettyAsyncFSWALConfigHelper; import org.apache.yetus.audience.InterfaceAudience; /** @@ -76,4 +78,12 @@ public class NettyEventLoopGroupConfig { public Class clientChannelClass() { return clientChannelClass; } + + public static NettyEventLoopGroupConfig setup(Configuration conf, String threadPoolName) { + // Initialize netty event loop group at start as we may use it for rpc server, rpc client & WAL. + NettyEventLoopGroupConfig nelgc = new NettyEventLoopGroupConfig(conf, threadPoolName); + NettyRpcClientConfigHelper.setEventLoopConfig(conf, nelgc.group(), nelgc.clientChannelClass()); + NettyAsyncFSWALConfigHelper.setEventLoopConfig(conf, nelgc.group(), nelgc.clientChannelClass()); + return nelgc; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/OOMEChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/OOMEChecker.java new file mode 100644 index 00000000000..9fdf7ea74b3 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/OOMEChecker.java @@ -0,0 +1,50 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.util; + +import org.apache.hadoop.hbase.log.HBaseMarkers; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@InterfaceAudience.Private +public final class OOMEChecker { + + private static final Logger LOG = LoggerFactory.getLogger(OOMEChecker.class); + + private OOMEChecker() { + } + + public static boolean exitIfOOME(Throwable e, String service) { + boolean stop = false; + try { + if (e instanceof OutOfMemoryError || + (e.getCause() != null && e.getCause() instanceof OutOfMemoryError) || + (e.getMessage() != null && e.getMessage().contains("java.lang.OutOfMemoryError"))) { + stop = true; + LOG.error(HBaseMarkers.FATAL, "Run out of memory; {} will abort itself immediately", + service, e); + } + } finally { + if (stop) { + Runtime.getRuntime().halt(1); + } + } + return stop; + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java index 37a255ba869..589b1a6452a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java @@ -93,7 +93,6 @@ import org.apache.hadoop.hbase.io.hfile.BlockCache; import org.apache.hadoop.hbase.io.hfile.ChecksumUtil; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.ipc.RpcServerInterface; -import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException; import org.apache.hadoop.hbase.logging.Log4jUtils; import org.apache.hadoop.hbase.mapreduce.MapreduceTestingShim; import org.apache.hadoop.hbase.master.HMaster; @@ -2941,17 +2940,6 @@ public class HBaseTestingUtil extends HBaseZKTestingUtil { // That's fine. } } - for (MasterThread mt : cluster.getLiveMasterThreads()) { - try { - for (RegionInfo region : ProtobufUtil.getOnlineRegions(mt.getMaster().getRSRpcServices())) { - online.add(region.getRegionNameAsString()); - } - } catch (RegionServerStoppedException e) { - // That's fine. - } catch (ServerNotRunningYetException e) { - // That's fine. - } - } return online; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/SingleProcessHBaseCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/SingleProcessHBaseCluster.java index a405f7b24a9..71091753cfe 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/SingleProcessHBaseCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/SingleProcessHBaseCluster.java @@ -806,15 +806,6 @@ public class SingleProcessHBaseCluster extends HBaseClusterInterface { @Override public ServerName getServerHoldingRegion(final TableName tn, byte[] regionName) throws IOException { - // Assume there is only one master thread which is the active master. - // If there are multiple master threads, the backup master threads - // should hold some regions. Please refer to #countServedRegions - // to see how we find out all regions. - HMaster master = getMaster(); - Region region = master.getOnlineRegion(regionName); - if (region != null) { - return master.getServerName(); - } int index = getServerWith(regionName); if (index < 0) { return null; @@ -833,9 +824,6 @@ public class SingleProcessHBaseCluster extends HBaseClusterInterface { for (JVMClusterUtil.RegionServerThread rst : getLiveRegionServerThreads()) { count += rst.getRegionServer().getNumberOfOnlineRegions(); } - for (JVMClusterUtil.MasterThread mt : getLiveMasterThreads()) { - count += mt.getMaster().getNumberOfOnlineRegions(); - } return count; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaUpdatesGoToPriorityQueue.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaUpdatesGoToPriorityQueue.java index cd04157ae6e..61a4fdfd5d4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaUpdatesGoToPriorityQueue.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaUpdatesGoToPriorityQueue.java @@ -113,13 +113,8 @@ public class TestMetaUpdatesGoToPriorityQueue { // find the meta server SingleProcessHBaseCluster cluster = UTIL.getMiniHBaseCluster(); int rsIndex = cluster.getServerWithMeta(); - HRegionServer rs; - if (rsIndex >= 0) { - rs = cluster.getRegionServer(rsIndex); - } else { - // it is in master - rs = cluster.getMaster(); - } + assertTrue(rsIndex >= 0); + HRegionServer rs = cluster.getRegionServer(rsIndex); SpyingRpcScheduler scheduler = (SpyingRpcScheduler) rs.getRpcServer().getScheduler(); long prevCalls = scheduler.numPriorityCalls; long time = EnvironmentEdgeManager.currentTime(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java index 5274813fb92..4c23ef73c04 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSeparateClientZKCluster.java @@ -133,7 +133,7 @@ public class TestSeparateClientZKCluster { HMaster master = cluster.getMaster(); master.stopMaster(); LOG.info("Stopped master {}", master.getServerName()); - while (!master.isShutDown()) { + while (master.isAlive()) { Thread.sleep(200); } LOG.info("Shutdown master {}", master.getServerName()); @@ -225,7 +225,7 @@ public class TestSeparateClientZKCluster { int metaServerId = cluster.getServerWithMeta(); HRegionServer metaServer = cluster.getRegionServer(metaServerId); metaServer.stop("Stop current RS holding meta region"); - while (!metaServer.isShutDown()) { + while (metaServer.isAlive()) { Thread.sleep(200); } // wait for meta region online diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableFavoredNodes.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableFavoredNodes.java index 3ad1676278f..9e31cc33862 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableFavoredNodes.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestTableFavoredNodes.java @@ -290,7 +290,6 @@ public class TestTableFavoredNodes { * 3. Is the FN information consistent between Master and the respective RegionServer? */ private void checkIfFavoredNodeInformationIsCorrect(TableName tableName) throws Exception { - /* * Since we need HRegionServer to check for consistency of FN between Master and RS, * lets construct a map for each serverName lookup. Makes it easy later. @@ -300,11 +299,6 @@ public class TestTableFavoredNodes { TEST_UTIL.getMiniHBaseCluster().getLiveRegionServerThreads()) { snRSMap.put(rst.getRegionServer().getServerName(), rst.getRegionServer()); } - // Also include master, since it can also host user regions. - for (JVMClusterUtil.MasterThread rst : - TEST_UTIL.getMiniHBaseCluster().getLiveMasterThreads()) { - snRSMap.put(rst.getMaster().getServerName(), rst.getMaster()); - } int dnPort = FavoredNodeAssignmentHelper.getDataNodePort(TEST_UTIL.getConfiguration()); RegionLocator regionLocator = admin.getConnection().getRegionLocator(tableName); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestMasterFifoRpcScheduler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestMasterFifoRpcScheduler.java index 9822294a993..af9cf66523a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestMasterFifoRpcScheduler.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestMasterFifoRpcScheduler.java @@ -32,7 +32,6 @@ import org.apache.hadoop.hbase.HBaseTestingUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.MasterRpcServices; -import org.apache.hadoop.hbase.regionserver.RSRpcServices; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.RPCTests; import org.junit.AfterClass; @@ -43,8 +42,6 @@ import org.junit.Test; import org.junit.experimental.categories.Category; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos; @@ -55,8 +52,6 @@ public class TestMasterFifoRpcScheduler { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestMasterFifoRpcScheduler.class); - private static final Logger LOG = LoggerFactory.getLogger(TestMasterFifoRpcScheduler.class); - private static final String REGION_SERVER_REPORT = "RegionServerReport"; private static final String OTHER = "Other"; private static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil(); @@ -64,7 +59,7 @@ public class TestMasterFifoRpcScheduler { @BeforeClass public static void setupBeforeClass() throws Exception { Configuration conf = TEST_UTIL.getConfiguration(); - conf.set(RSRpcServices.MASTER_RPC_SCHEDULER_FACTORY_CLASS, + conf.set(MasterRpcServices.MASTER_RPC_SCHEDULER_FACTORY_CLASS, "org.apache.hadoop.hbase.regionserver.MasterFifoRpcSchedulerFactory"); conf.setInt(HConstants.REGION_SERVER_HANDLER_COUNT, 5); conf.setInt(MasterFifoRpcScheduler.MASTER_SERVER_REPORT_HANDLER_COUNT, 2); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/AbstractTestDLS.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/AbstractTestDLS.java index 230b5cd7b28..a0f26c001e2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/AbstractTestDLS.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/AbstractTestDLS.java @@ -22,6 +22,7 @@ import static org.apache.hadoop.hbase.SplitLogCounters.tot_mgr_wait_for_zk_delet import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; + import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; @@ -53,7 +54,6 @@ import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.coordination.ZKSplitLogManagerCoordination; -import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException; import org.apache.hadoop.hbase.master.assignment.RegionStates; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl; @@ -78,6 +78,7 @@ import org.junit.Test; import org.junit.rules.TestName; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; /** @@ -384,27 +385,6 @@ public abstract class AbstractTestDLS { putData(region, hri.getStartKey(), nrows, Bytes.toBytes("q"), COLUMN_FAMILY); } } - - for (MasterThread mt : cluster.getLiveMasterThreads()) { - HRegionServer hrs = mt.getMaster(); - List hris; - try { - hris = ProtobufUtil.getOnlineRegions(hrs.getRSRpcServices()); - } catch (ServerNotRunningYetException e) { - // It's ok: this master may be a backup. Ignored. - continue; - } - for (RegionInfo hri : hris) { - if (hri.getTable().isSystemTable()) { - continue; - } - LOG.debug( - "adding data to rs = " + mt.getName() + " region = " + hri.getRegionNameAsString()); - Region region = hrs.getOnlineRegion(hri.getRegionName()); - assertTrue(region != null); - putData(region, hri.getStartKey(), nrows, Bytes.toBytes("q"), COLUMN_FAMILY); - } - } } public void makeWAL(HRegionServer hrs, List regions, int num_edits, int edit_size) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestGetLastFlushedSequenceId.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestGetLastFlushedSequenceId.java index b3af4145158..d88f6dd0dc1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestGetLastFlushedSequenceId.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestGetLastFlushedSequenceId.java @@ -93,18 +93,16 @@ public class TestGetLastFlushedSequenceId { } assertNotNull(region); Thread.sleep(2000); - RegionStoreSequenceIds ids = - testUtil.getHBaseCluster().getMaster() - .getLastSequenceId(region.getRegionInfo().getEncodedNameAsBytes()); + RegionStoreSequenceIds ids = testUtil.getHBaseCluster().getMaster().getServerManager() + .getLastFlushedSequenceId(region.getRegionInfo().getEncodedNameAsBytes()); assertEquals(HConstants.NO_SEQNUM, ids.getLastFlushedSequenceId()); // This will be the sequenceid just before that of the earliest edit in memstore. long storeSequenceId = ids.getStoreSequenceId(0).getSequenceId(); assertTrue(storeSequenceId > 0); testUtil.getAdmin().flush(tableName); Thread.sleep(2000); - ids = - testUtil.getHBaseCluster().getMaster() - .getLastSequenceId(region.getRegionInfo().getEncodedNameAsBytes()); + ids = testUtil.getHBaseCluster().getMaster().getServerManager() + .getLastFlushedSequenceId(region.getRegionInfo().getEncodedNameAsBytes()); assertTrue(ids.getLastFlushedSequenceId() + " > " + storeSequenceId, ids.getLastFlushedSequenceId() > storeSequenceId); assertEquals(ids.getLastFlushedSequenceId(), ids.getStoreSequenceId(0).getSequenceId()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestGetReplicationLoad.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestGetReplicationLoad.java index a1a1d7de626..d7d5b38ec6d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestGetReplicationLoad.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestGetReplicationLoad.java @@ -56,11 +56,6 @@ public class TestGetReplicationLoad { public MyMaster(Configuration conf) throws IOException, KeeperException, InterruptedException { super(conf); } - - @Override - protected void tryRegionServerReport(long reportStartTime, long reportEndTime) { - // do nothing - } } @BeforeClass diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetrics.java index 705943bc72c..068dead7255 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetrics.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetrics.java @@ -29,7 +29,6 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.SingleProcessHBaseCluster; import org.apache.hadoop.hbase.StartTestingClusterOption; import org.apache.hadoop.hbase.YouAreDeadException; -import org.apache.hadoop.hbase.regionserver.RSRpcServices; import org.apache.hadoop.hbase.test.MetricsAssertHelper; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -74,12 +73,7 @@ public class TestMasterMetrics { } @Override - protected void tryRegionServerReport(long reportStartTime, long reportEndTime) { - // do nothing - } - - @Override - protected RSRpcServices createRpcServices() throws IOException { + protected MasterRpcServices createRpcServices() throws IOException { return new MasterRpcServices(this) { @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNotCarryTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNotCarryTable.java deleted file mode 100644 index c39a58e898f..00000000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNotCarryTable.java +++ /dev/null @@ -1,81 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.master; - -import static org.junit.Assert.assertFalse; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HBaseClassTestRule; -import org.apache.hadoop.hbase.HBaseTestingUtil; -import org.apache.hadoop.hbase.testclassification.MasterTests; -import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.apache.hadoop.hbase.util.CommonFSUtils; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.ClassRule; -import org.junit.Test; -import org.junit.experimental.categories.Category; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@Category({MasterTests.class, MediumTests.class}) -public class TestMasterNotCarryTable { - - @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMasterNotCarryTable.class); - - private static final Logger LOG = LoggerFactory.getLogger(TestMasterNotCarryTable.class); - - private static final HBaseTestingUtil UTIL = new HBaseTestingUtil(); - - private static HMaster master; - - @BeforeClass - public static void setUp() throws Exception { - Configuration c = UTIL.getConfiguration(); - // We use local filesystem. Set it so it writes into the testdir. - CommonFSUtils.setRootDir(c, UTIL.getDataTestDir()); - UTIL.startMiniZKCluster(); - master = new HMaster(UTIL.getConfiguration()); - master.start(); - // As no regionservers, only wait master to create AssignmentManager. - while (master.getAssignmentManager() != null) { - LOG.debug("Wait master to create AssignmentManager"); - Thread.sleep(1000); - } - } - - @AfterClass - public static void tearDown() throws Exception { - master.stop("Shutdown"); - UTIL.shutdownMiniZKCluster(); - } - - @Test - public void testMasterBlockCache() { - // no need to instantiate block cache. - assertFalse(master.getBlockCache().isPresent()); - } - - @Test - public void testMasterMOBFileCache() { - // no need to instantiate mob file cache. - assertFalse(master.getMobFileCache().isPresent()); - } -} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterQosFunction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterQosFunction.java index 52a611a8060..63e5018f644 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterQosFunction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterQosFunction.java @@ -103,8 +103,5 @@ public class TestMasterQosFunction extends QosTestBase { @Test public void testAnnotations() { checkMethod(conf, "GetLastFlushedSequenceId", HConstants.ADMIN_QOS, qosFunction); - checkMethod(conf, "CompactRegion", HConstants.ADMIN_QOS, qosFunction); - checkMethod(conf, "GetLastFlushedSequenceId", HConstants.ADMIN_QOS, qosFunction); - checkMethod(conf, "GetRegionInfo", HConstants.ADMIN_QOS, qosFunction); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRollingRestart.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRollingRestart.java index e4984de470a..878bb4f0b79 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRollingRestart.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRollingRestart.java @@ -261,9 +261,6 @@ public class TestRollingRestart { for (RegionServerThread rst : cluster.getLiveRegionServerThreads()) { numFound += rst.getRegionServer().getNumberOfOnlineRegions(); } - for (MasterThread mt : cluster.getMasterThreads()) { - numFound += mt.getMaster().getNumberOfOnlineRegions(); - } return numFound; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestWakeUpUnexpectedProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestWakeUpUnexpectedProcedure.java index ee01223a944..1bfd8ae4654 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestWakeUpUnexpectedProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestWakeUpUnexpectedProcedure.java @@ -107,7 +107,7 @@ public class TestWakeUpUnexpectedProcedure { ExecuteProceduresRequest request) throws ServiceException { if (request.getOpenRegionCount() > 0) { if (ARRIVE_EXEC_PROC != null) { - SERVER_TO_KILL = regionServer.getServerName(); + SERVER_TO_KILL = getServer().getServerName(); ARRIVE_EXEC_PROC.countDown(); ARRIVE_EXEC_PROC = null; try { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/http/TestMasterStatusServlet.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/http/TestMasterStatusServlet.java index 7be0d37e236..d46ece42849 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/http/TestMasterStatusServlet.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/http/TestMasterStatusServlet.java @@ -41,13 +41,10 @@ import org.apache.hadoop.hbase.master.RegionState; import org.apache.hadoop.hbase.master.ServerManager; import org.apache.hadoop.hbase.master.assignment.AssignmentManager; import org.apache.hadoop.hbase.master.assignment.RegionStates; -import org.apache.hadoop.hbase.regionserver.MetricsRegionServer; -import org.apache.hadoop.hbase.regionserver.MetricsRegionServerWrapperStub; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.tmpl.master.MasterStatusTmpl; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.hbase.zookeeper.ZNodePaths; import org.junit.Before; @@ -112,18 +109,9 @@ public class TestMasterStatusServlet { Mockito.doReturn("fakequorum").when(zkw).getQuorum(); Mockito.doReturn(zkw).when(master).getZooKeeper(); - // Fake MasterAddressTracker - MasterAddressTracker tracker = Mockito.mock(MasterAddressTracker.class); - Mockito.doReturn(tracker).when(master).getMasterAddressTracker(); - Mockito.doReturn(FAKE_HOST).when(tracker).getMasterAddress(); - // Fake ActiveMaster Mockito.doReturn(Optional.of(FAKE_HOST)).when(master).getActiveMaster(); - MetricsRegionServer rms = Mockito.mock(MetricsRegionServer.class); - Mockito.doReturn(new MetricsRegionServerWrapperStub()).when(rms).getRegionServerWrapper(); - Mockito.doReturn(rms).when(master).getMetrics(); - // Mock admin admin = Mockito.mock(Admin.class); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestSlowLogAccessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestSlowLogAccessor.java index 908f2e278a0..c94d8e45154 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestSlowLogAccessor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/namequeues/TestSlowLogAccessor.java @@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.namequeues; import java.io.IOException; -import java.lang.reflect.Field; import java.util.List; import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; @@ -93,9 +92,7 @@ public class TestSlowLogAccessor { @Before public void setUp() throws Exception { HRegionServer hRegionServer = HBASE_TESTING_UTILITY.getMiniHBaseCluster().getRegionServer(0); - Field slowLogRecorder = HRegionServer.class.getDeclaredField("namedQueueRecorder"); - slowLogRecorder.setAccessible(true); - this.namedQueueRecorder = (NamedQueueRecorder) slowLogRecorder.get(hRegionServer); + this.namedQueueRecorder = hRegionServer.getNamedQueueRecorder(); } private List getSlowLogPayloads( diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiLogThreshold.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiLogThreshold.java index 7cb2aa47e02..defea10ebd9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiLogThreshold.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMultiLogThreshold.java @@ -178,7 +178,6 @@ public class TestMultiLogThreshold { } private void assertLogBatchWarnings(boolean expected) { - assertFalse(logs.isEmpty()); boolean actual = false; for (LevelAndMessage event : logs) { if (event.level == org.apache.logging.log4j.Level.WARN && diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMutateRowsRecovery.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMutateRowsRecovery.java index 3dfb43eedd1..5a2ce0b46b5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMutateRowsRecovery.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMutateRowsRecovery.java @@ -125,7 +125,7 @@ public class TestMutateRowsRecovery { // Send the RS Load to ensure correct lastflushedseqid for stores rs1.tryRegionServerReport(now - 30000, now); // Kill the RS to trigger wal replay - cluster.killRegionServer(rs1.serverName); + cluster.killRegionServer(rs1.getServerName()); // Ensure correct data exists Get g1 = new Get(row1); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java index 737ba46b707..60f65be66c4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java @@ -529,7 +529,7 @@ public class TestRegionMergeTransactionOnCluster { } @Override - protected RSRpcServices createRpcServices() throws IOException { + protected MasterRpcServices createRpcServices() throws IOException { return new MyMasterRpcServices(this); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerAbort.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerAbort.java index 87837866dc5..0dd16de9641 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerAbort.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerAbort.java @@ -55,7 +55,6 @@ import org.apache.hadoop.hbase.coprocessor.RegionObserver; import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessor; import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionServerObserver; -import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.Bytes; @@ -121,12 +120,6 @@ public class TestRegionServerAbort { StopBlockingRegionObserver cp = (StopBlockingRegionObserver)cpHost.findCoprocessor(className); cp.setStopAllowed(true); } - HMaster master = cluster.getMaster(); - RegionServerCoprocessorHost host = master.getRegionServerCoprocessorHost(); - if (host != null) { - StopBlockingRegionObserver obs = (StopBlockingRegionObserver) host.findCoprocessor(className); - if (obs != null) obs.setStopAllowed(true); - } testUtil.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java index 4260b1d57c2..3f3b2bcf1c9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java @@ -161,7 +161,7 @@ public class TestRegionServerNoMaster { throws Exception { AdminProtos.OpenRegionRequest orr = RequestConverter.buildOpenRegionRequest(rs.getServerName(), hri, null); - AdminProtos.OpenRegionResponse responseOpen = rs.rpcServices.openRegion(null, orr); + AdminProtos.OpenRegionResponse responseOpen = rs.getRpcServices().openRegion(null, orr); Assert.assertTrue(responseOpen.getOpeningStateCount() == 1); Assert.assertTrue(responseOpen.getOpeningState(0). @@ -184,7 +184,7 @@ public class TestRegionServerNoMaster { throws Exception { AdminProtos.CloseRegionRequest crr = ProtobufUtil.buildCloseRegionRequest( rs.getServerName(), hri.getRegionName()); - AdminProtos.CloseRegionResponse responseClose = rs.rpcServices.closeRegion(null, crr); + AdminProtos.CloseRegionResponse responseClose = rs.getRpcServices().closeRegion(null, crr); Assert.assertTrue(responseClose.getClosed()); checkRegionIsClosed(HTU, rs, hri); } @@ -209,7 +209,7 @@ public class TestRegionServerNoMaster { // no transition in ZK AdminProtos.CloseRegionRequest crr = ProtobufUtil.buildCloseRegionRequest(getRS().getServerName(), regionName); - AdminProtos.CloseRegionResponse responseClose = getRS().rpcServices.closeRegion(null, crr); + AdminProtos.CloseRegionResponse responseClose = getRS().getRpcServices().closeRegion(null, crr); Assert.assertTrue(responseClose.getClosed()); // now waiting & checking. After a while, the transition should be done and the region closed @@ -227,11 +227,12 @@ public class TestRegionServerNoMaster { public void testMultipleCloseFromMaster() throws Exception { for (int i = 0; i < 10; i++) { AdminProtos.CloseRegionRequest crr = - ProtobufUtil.buildCloseRegionRequest(getRS().getServerName(), regionName, null); + ProtobufUtil.buildCloseRegionRequest(getRS().getServerName(), regionName, null); try { - AdminProtos.CloseRegionResponse responseClose = getRS().rpcServices.closeRegion(null, crr); + AdminProtos.CloseRegionResponse responseClose = + getRS().getRpcServices().closeRegion(null, crr); Assert.assertTrue("request " + i + " failed", - responseClose.getClosed() || responseClose.hasClosed()); + responseClose.getClosed() || responseClose.hasClosed()); } catch (org.apache.hbase.thirdparty.com.google.protobuf.ServiceException se) { Assert.assertTrue("The next queries may throw an exception.", i > 0); } @@ -258,7 +259,7 @@ public class TestRegionServerNoMaster { AdminProtos.CloseRegionRequest crr = ProtobufUtil.buildCloseRegionRequest(getRS().getServerName(), regionName); try { - getRS().rpcServices.closeRegion(null, crr); + getRS().getRpcServices().closeRegion(null, crr); Assert.assertTrue(false); } catch (org.apache.hbase.thirdparty.com.google.protobuf.ServiceException expected) { } @@ -268,9 +269,9 @@ public class TestRegionServerNoMaster { hri.getEncodedNameAsBytes())); // Let's start the open handler - TableDescriptor htd = getRS().tableDescriptors.get(hri.getTable()); + TableDescriptor htd = getRS().getTableDescriptors().get(hri.getTable()); - getRS().executorService.submit(new OpenRegionHandler(getRS(), getRS(), hri, htd, -1)); + getRS().getExecutorService().submit(new OpenRegionHandler(getRS(), getRS(), hri, htd, -1)); // The open handler should have removed the region from RIT but kept the region closed checkRegionIsClosed(HTU, getRS(), hri); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerRPCScanMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerRPCScanMetrics.java index 6ec22fb100e..2d47ff63043 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerRPCScanMetrics.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerRPCScanMetrics.java @@ -111,7 +111,7 @@ public class TestScannerRPCScanMetrics { scanNextIterate(ht, dummyScan); RSRpcServices testClusterRSRPCServices = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0) - .rpcServices; + .getRpcServices(); assertEquals(4, testClusterRSRPCServices.rpcFullScanRequestCount.intValue()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java index 73cbfde00cb..d163a6d9738 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java @@ -1032,7 +1032,7 @@ public class TestSplitTransactionOnCluster { } @Override - protected RSRpcServices createRpcServices() throws IOException { + protected MasterRpcServices createRpcServices() throws IOException { return new MyMasterRpcServices(this); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatus.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatus.java index 6ba041f8109..56264104ff4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatus.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStatus.java @@ -109,7 +109,7 @@ public class TestReplicationStatus extends TestReplicationBase { // Stop rs1, then the queue of rs1 will be transfered to rs0 HRegionServer hrs = UTIL1.getHBaseCluster().getRegionServer(1); hrs.stop("Stop RegionServer"); - while(!hrs.isShutDown()) { + while(hrs.isAlive()) { Threads.sleep(100); } // To be sure it dead and references cleaned up. TODO: Change this to a barrier. diff --git a/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index 24ef9389cc8..1821f708f7f 100644 --- a/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -75,11 +75,11 @@ import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Scan.ReadType; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.client.TableState; -import org.apache.hadoop.hbase.client.Scan.ReadType; import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.compress.Compression.Algorithm; @@ -88,7 +88,6 @@ import org.apache.hadoop.hbase.io.hfile.BlockCache; import org.apache.hadoop.hbase.io.hfile.ChecksumUtil; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.ipc.RpcServerInterface; -import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException; import org.apache.hadoop.hbase.logging.Log4jUtils; import org.apache.hadoop.hbase.mapreduce.MapreduceTestingShim; import org.apache.hadoop.hbase.master.HMaster; @@ -3415,18 +3414,6 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { // That's fine. } } - for (MasterThread mt : cluster.getLiveMasterThreads()) { - try { - for (RegionInfo region : - ProtobufUtil.getOnlineRegions(mt.getMaster().getRSRpcServices())) { - online.add(region.getRegionNameAsString()); - } - } catch (RegionServerStoppedException e) { - // That's fine. - } catch (ServerNotRunningYetException e) { - // That's fine. - } - } return online; } diff --git a/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/MiniHBaseCluster.java b/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/MiniHBaseCluster.java index f8dce256386..17d64b5ca65 100644 --- a/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/MiniHBaseCluster.java +++ b/hbase-testing-util/src/main/java/org/apache/hadoop/hbase/MiniHBaseCluster.java @@ -858,16 +858,7 @@ public class MiniHBaseCluster extends HBaseCluster { @Override public ServerName getServerHoldingRegion(final TableName tn, byte[] regionName) - throws IOException { - // Assume there is only one master thread which is the active master. - // If there are multiple master threads, the backup master threads - // should hold some regions. Please refer to #countServedRegions - // to see how we find out all regions. - HMaster master = getMaster(); - Region region = master.getOnlineRegion(regionName); - if (region != null) { - return master.getServerName(); - } + throws IOException { int index = getServerWith(regionName); if (index < 0) { return null; @@ -886,9 +877,6 @@ public class MiniHBaseCluster extends HBaseCluster { for (JVMClusterUtil.RegionServerThread rst : getLiveRegionServerThreads()) { count += rst.getRegionServer().getNumberOfOnlineRegions(); } - for (JVMClusterUtil.MasterThread mt : getLiveMasterThreads()) { - count += mt.getMaster().getNumberOfOnlineRegions(); - } return count; }