HBASE-22676 Move all the code in hbase-rsgroup to hbase-server and remove hbase-rsgroup module (#399)

Signed-off-by: Zheng Hu <openinx@gmail.com>
This commit is contained in:
Duo Zhang 2019-07-23 09:40:02 +08:00
parent 3032c713e1
commit f7bbdde975
32 changed files with 647 additions and 1064 deletions

View File

@ -144,14 +144,6 @@
</includes>
<fileMode>0644</fileMode>
</fileSet>
<fileSet>
<directory>${project.basedir}/../hbase-rsgroup/target/</directory>
<outputDirectory>lib</outputDirectory>
<includes>
<include>${rsgroup.test.jar}</include>
</includes>
<fileMode>0644</fileMode>
</fileSet>
<fileSet>
<directory>${project.basedir}/../hbase-mapreduce/target/</directory>
<outputDirectory>lib</outputDirectory>

View File

@ -52,7 +52,6 @@
<include>org.apache.hbase:hbase-protocol-shaded</include>
<include>org.apache.hbase:hbase-replication</include>
<include>org.apache.hbase:hbase-rest</include>
<include>org.apache.hbase:hbase-rsgroup</include>
<include>org.apache.hbase:hbase-server</include>
<include>org.apache.hbase:hbase-shell</include>
<include>org.apache.hbase:hbase-testing-util</include>

View File

@ -181,16 +181,6 @@
<type>test-jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-rsgroup</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-rsgroup</artifactId>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-server</artifactId>

View File

@ -1,13 +0,0 @@
ON PROTOBUFS
This maven module has protobuf definition files ('.protos') used by hbase
Coprocessor Endpoints that ship with hbase core including tests. Coprocessor
Endpoints are meant to be standalone, independent code not reliant on hbase
internals. They define their Service using protobuf. The protobuf version
they use can be distinct from that used by HBase internally since HBase started
shading its protobuf references. Endpoints have no access to the shaded protobuf
hbase uses. They do have access to the content of hbase-protocol -- the
.protos found in here -- but avoid using as much of this as you can as it is
liable to change.
Generation of java files from protobuf .proto files included here is done as
part of the build.

View File

@ -1,256 +0,0 @@
<?xml version="1.0"?>
<project xmlns="https://maven.apache.org/POM/4.0.0" xmlns:xsi="https://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="https://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<!--
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-->
<modelVersion>4.0.0</modelVersion>
<parent>
<artifactId>hbase-build-configuration</artifactId>
<groupId>org.apache.hbase</groupId>
<version>3.0.0-SNAPSHOT</version>
<relativePath>../hbase-build-configuration</relativePath>
</parent>
<artifactId>hbase-rsgroup</artifactId>
<name>Apache HBase - RSGroup</name>
<description>Regionserver Groups for HBase</description>
<build>
<plugins>
<plugin>
<!--Make it so assembly:single does nothing in here-->
<artifactId>maven-assembly-plugin</artifactId>
<configuration>
<skipAssembly>true</skipAssembly>
</configuration>
</plugin>
<!-- Make a jar and put the sources in the jar -->
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-source-plugin</artifactId>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-checkstyle-plugin</artifactId>
<configuration>
<failOnViolation>true</failOnViolation>
</configuration>
</plugin>
<plugin>
<groupId>net.revelc.code</groupId>
<artifactId>warbucks-maven-plugin</artifactId>
</plugin>
</plugins>
</build>
<dependencies>
<!-- Intra-project dependencies -->
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-annotations</artifactId>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-client</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-server</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-common</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-procedure</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-protocol</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-protocol-shaded</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-testing-util</artifactId>
<scope>test</scope>
</dependency>
<!-- General dependencies -->
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hbase.thirdparty</groupId>
<artifactId>hbase-shaded-miscellaneous</artifactId>
</dependency>
<dependency>
<groupId>org.apache.zookeeper</groupId>
<artifactId>zookeeper</artifactId>
</dependency>
<dependency>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.mockito</groupId>
<artifactId>mockito-core</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<scope>test</scope>
</dependency>
</dependencies>
<profiles>
<!-- Skip the tests in this module -->
<profile>
<id>skipRSGroupTests</id>
<activation>
<property>
<name>skipRSGroupTests</name>
</property>
</activation>
<properties>
<surefire.skipFirstPart>true</surefire.skipFirstPart>
<surefire.skipSecondPart>true</surefire.skipSecondPart>
</properties>
</profile>
<!-- profile against Hadoop 2.x: This is the default. -->
<profile>
<id>hadoop-2.0</id>
<activation>
<property>
<!--Below formatting for dev-support/generate-hadoopX-poms.sh-->
<!--h2-->
<name>!hadoop.profile</name>
</property>
</activation>
<dependencies>
<dependency>
<groupId>com.github.stephenc.findbugs</groupId>
<artifactId>findbugs-annotations</artifactId>
<optional>true</optional>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<exclusions>
<exclusion>
<groupId>net.java.dev.jets3t</groupId>
<artifactId>jets3t</artifactId>
</exclusion>
<exclusion>
<groupId>javax.servlet.jsp</groupId>
<artifactId>jsp-api</artifactId>
</exclusion>
<exclusion>
<groupId>org.mortbay.jetty</groupId>
<artifactId>jetty</artifactId>
</exclusion>
<exclusion>
<groupId>com.sun.jersey</groupId>
<artifactId>jersey-server</artifactId>
</exclusion>
<exclusion>
<groupId>com.sun.jersey</groupId>
<artifactId>jersey-core</artifactId>
</exclusion>
<exclusion>
<groupId>com.sun.jersey</groupId>
<artifactId>jersey-json</artifactId>
</exclusion>
<exclusion>
<groupId>javax.servlet</groupId>
<artifactId>servlet-api</artifactId>
</exclusion>
<exclusion>
<groupId>tomcat</groupId>
<artifactId>jasper-compiler</artifactId>
</exclusion>
<exclusion>
<groupId>tomcat</groupId>
<artifactId>jasper-runtime</artifactId>
</exclusion>
<exclusion>
<groupId>com.google.code.findbugs</groupId>
<artifactId>jsr305</artifactId>
</exclusion>
</exclusions>
</dependency>
</dependencies>
</profile>
<!--
profile for building against Hadoop 3.0.x. Activate using:
mvn -Dhadoop.profile=3.0
-->
<profile>
<id>hadoop-3.0</id>
<activation>
<property>
<name>hadoop.profile</name>
<value>3.0</value>
</property>
</activation>
<properties>
<hadoop.version>3.0-SNAPSHOT</hadoop.version>
</properties>
<dependencies>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
</dependency>
</dependencies>
</profile>
<profile>
<id>eclipse-specific</id>
<activation>
<property>
<name>m2e.version</name>
</property>
</activation>
<build>
<pluginManagement>
<plugins>
<!--This plugin's configuration is used to store Eclipse m2e settings
only. It has no influence on the Maven build itself.-->
<plugin>
<groupId>org.eclipse.m2e</groupId>
<artifactId>lifecycle-mapping</artifactId>
<configuration>
<lifecycleMappingMetadata>
<pluginExecutions>
</pluginExecutions>
</lifecycleMappingMetadata>
</configuration>
</plugin>
</plugins>
</pluginManagement>
</build>
</profile>
</profiles>
</project>

View File

@ -1,559 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.rsgroup;
import com.google.protobuf.RpcCallback;
import com.google.protobuf.RpcController;
import com.google.protobuf.Service;
import java.io.IOException;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Optional;
import java.util.Set;
import java.util.stream.Collectors;
import org.apache.hadoop.hbase.CoprocessorEnvironment;
import org.apache.hadoop.hbase.HBaseIOException;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.MasterNotRunningException;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.PleaseHoldException;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.SnapshotDescription;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.constraint.ConstraintException;
import org.apache.hadoop.hbase.coprocessor.CoreCoprocessor;
import org.apache.hadoop.hbase.coprocessor.HasMasterServices;
import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor;
import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
import org.apache.hadoop.hbase.coprocessor.MasterObserver;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
import org.apache.hadoop.hbase.ipc.RpcServer;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.net.Address;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupResponse;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponse;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersRequest;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersResponse;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesRequest;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesResponse;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RSGroupAdminService;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersRequest;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersResponse;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.security.UserProvider;
import org.apache.hadoop.hbase.security.access.AccessChecker;
import org.apache.hadoop.hbase.security.access.Permission.Action;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
// TODO: Encapsulate MasterObserver functions into separate subclass.
@CoreCoprocessor
@InterfaceAudience.Private
public class RSGroupAdminEndpoint implements MasterCoprocessor, MasterObserver {
private static final Logger LOG = LoggerFactory.getLogger(RSGroupAdminEndpoint.class);
private MasterServices master = null;
// Only instance of RSGroupInfoManager. RSGroup aware load balancers ask for this instance on
// their setup.
private RSGroupInfoManager groupInfoManager;
private RSGroupAdminServer groupAdminServer;
private final RSGroupAdminService groupAdminService = new RSGroupAdminServiceImpl();
private AccessChecker accessChecker;
/** Provider for mapping principal names to Users */
private UserProvider userProvider;
@Override
public void start(CoprocessorEnvironment env) throws IOException {
if (!(env instanceof HasMasterServices)) {
throw new IOException("Does not implement HMasterServices");
}
master = ((HasMasterServices)env).getMasterServices();
groupInfoManager = RSGroupInfoManagerImpl.getInstance(master);
groupAdminServer = new RSGroupAdminServer(master, groupInfoManager);
Class<?> clazz =
master.getConfiguration().getClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, null);
if (!RSGroupableBalancer.class.isAssignableFrom(clazz)) {
throw new IOException("Configured balancer does not support RegionServer groups.");
}
accessChecker = ((HasMasterServices) env).getMasterServices().getAccessChecker();
// set the user-provider.
this.userProvider = UserProvider.instantiate(env.getConfiguration());
}
@Override
public void stop(CoprocessorEnvironment env) {
}
@Override
public Iterable<Service> getServices() {
return Collections.singleton(groupAdminService);
}
@Override
public Optional<MasterObserver> getMasterObserver() {
return Optional.of(this);
}
RSGroupInfoManager getGroupInfoManager() {
return groupInfoManager;
}
/**
* Implementation of RSGroupAdminService defined in RSGroupAdmin.proto.
* This class calls {@link RSGroupAdminServer} for actual work, converts result to protocol
* buffer response, handles exceptions if any occurred and then calls the {@code RpcCallback} with
* the response.
*/
private class RSGroupAdminServiceImpl extends RSGroupAdminProtos.RSGroupAdminService {
@Override
public void getRSGroupInfo(RpcController controller,
GetRSGroupInfoRequest request, RpcCallback<GetRSGroupInfoResponse> done) {
GetRSGroupInfoResponse.Builder builder = GetRSGroupInfoResponse.newBuilder();
String groupName = request.getRSGroupName();
LOG.info(master.getClientIdAuditPrefix() + " initiates rsgroup info retrieval, group="
+ groupName);
try {
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().preGetRSGroupInfo(groupName);
}
checkPermission("getRSGroupInfo");
RSGroupInfo rsGroupInfo = groupAdminServer.getRSGroupInfo(groupName);
if (rsGroupInfo != null) {
builder.setRSGroupInfo(ProtobufUtil.toProtoGroupInfo(rsGroupInfo));
}
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().postGetRSGroupInfo(groupName);
}
} catch (IOException e) {
CoprocessorRpcUtils.setControllerException(controller, e);
}
done.run(builder.build());
}
@Override
public void getRSGroupInfoOfTable(RpcController controller,
GetRSGroupInfoOfTableRequest request, RpcCallback<GetRSGroupInfoOfTableResponse> done) {
GetRSGroupInfoOfTableResponse.Builder builder = GetRSGroupInfoOfTableResponse.newBuilder();
TableName tableName = ProtobufUtil.toTableName(request.getTableName());
LOG.info(master.getClientIdAuditPrefix() + " initiates rsgroup info retrieval, table="
+ tableName);
try {
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().preGetRSGroupInfoOfTable(tableName);
}
checkPermission("getRSGroupInfoOfTable");
RSGroupInfo RSGroupInfo = groupAdminServer.getRSGroupInfoOfTable(tableName);
if (RSGroupInfo != null) {
builder.setRSGroupInfo(ProtobufUtil.toProtoGroupInfo(RSGroupInfo));
}
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().postGetRSGroupInfoOfTable(tableName);
}
} catch (IOException e) {
CoprocessorRpcUtils.setControllerException(controller, e);
}
done.run(builder.build());
}
@Override
public void moveServers(RpcController controller, MoveServersRequest request,
RpcCallback<MoveServersResponse> done) {
MoveServersResponse.Builder builder = MoveServersResponse.newBuilder();
Set<Address> hostPorts = Sets.newHashSet();
for (HBaseProtos.ServerName el : request.getServersList()) {
hostPorts.add(Address.fromParts(el.getHostName(), el.getPort()));
}
LOG.info(master.getClientIdAuditPrefix() + " move servers " + hostPorts +" to rsgroup "
+ request.getTargetGroup());
try {
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().preMoveServers(hostPorts, request.getTargetGroup());
}
checkPermission("moveServers");
groupAdminServer.moveServers(hostPorts, request.getTargetGroup());
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().postMoveServers(hostPorts, request.getTargetGroup());
}
} catch (IOException e) {
CoprocessorRpcUtils.setControllerException(controller, e);
}
done.run(builder.build());
}
@Override
public void moveTables(RpcController controller, MoveTablesRequest request,
RpcCallback<MoveTablesResponse> done) {
MoveTablesResponse.Builder builder = MoveTablesResponse.newBuilder();
Set<TableName> tables = new HashSet<>(request.getTableNameList().size());
for (HBaseProtos.TableName tableName : request.getTableNameList()) {
tables.add(ProtobufUtil.toTableName(tableName));
}
LOG.info(master.getClientIdAuditPrefix() + " move tables " + tables +" to rsgroup "
+ request.getTargetGroup());
try {
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().preMoveTables(tables, request.getTargetGroup());
}
checkPermission("moveTables");
groupAdminServer.moveTables(tables, request.getTargetGroup());
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().postMoveTables(tables, request.getTargetGroup());
}
} catch (IOException e) {
CoprocessorRpcUtils.setControllerException(controller, e);
}
done.run(builder.build());
}
@Override
public void addRSGroup(RpcController controller, AddRSGroupRequest request,
RpcCallback<AddRSGroupResponse> done) {
AddRSGroupResponse.Builder builder = AddRSGroupResponse.newBuilder();
LOG.info(master.getClientIdAuditPrefix() + " add rsgroup " + request.getRSGroupName());
try {
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().preAddRSGroup(request.getRSGroupName());
}
checkPermission("addRSGroup");
groupAdminServer.addRSGroup(request.getRSGroupName());
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().postAddRSGroup(request.getRSGroupName());
}
} catch (IOException e) {
CoprocessorRpcUtils.setControllerException(controller, e);
}
done.run(builder.build());
}
@Override
public void removeRSGroup(RpcController controller,
RemoveRSGroupRequest request, RpcCallback<RemoveRSGroupResponse> done) {
RemoveRSGroupResponse.Builder builder =
RemoveRSGroupResponse.newBuilder();
LOG.info(master.getClientIdAuditPrefix() + " remove rsgroup " + request.getRSGroupName());
try {
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().preRemoveRSGroup(request.getRSGroupName());
}
checkPermission("removeRSGroup");
groupAdminServer.removeRSGroup(request.getRSGroupName());
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().postRemoveRSGroup(request.getRSGroupName());
}
} catch (IOException e) {
CoprocessorRpcUtils.setControllerException(controller, e);
}
done.run(builder.build());
}
@Override
public void balanceRSGroup(RpcController controller,
BalanceRSGroupRequest request, RpcCallback<BalanceRSGroupResponse> done) {
BalanceRSGroupResponse.Builder builder = BalanceRSGroupResponse.newBuilder();
LOG.info(master.getClientIdAuditPrefix() + " balance rsgroup, group=" +
request.getRSGroupName());
try {
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().preBalanceRSGroup(request.getRSGroupName());
}
checkPermission("balanceRSGroup");
boolean balancerRan = groupAdminServer.balanceRSGroup(request.getRSGroupName());
builder.setBalanceRan(balancerRan);
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().postBalanceRSGroup(request.getRSGroupName(),
balancerRan);
}
} catch (IOException e) {
CoprocessorRpcUtils.setControllerException(controller, e);
builder.setBalanceRan(false);
}
done.run(builder.build());
}
@Override
public void listRSGroupInfos(RpcController controller,
ListRSGroupInfosRequest request, RpcCallback<ListRSGroupInfosResponse> done) {
ListRSGroupInfosResponse.Builder builder = ListRSGroupInfosResponse.newBuilder();
LOG.info(master.getClientIdAuditPrefix() + " list rsgroup");
try {
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().preListRSGroups();
}
checkPermission("listRSGroup");
for (RSGroupInfo RSGroupInfo : groupAdminServer.listRSGroups()) {
builder.addRSGroupInfo(ProtobufUtil.toProtoGroupInfo(RSGroupInfo));
}
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().postListRSGroups();
}
} catch (IOException e) {
CoprocessorRpcUtils.setControllerException(controller, e);
}
done.run(builder.build());
}
@Override
public void getRSGroupInfoOfServer(RpcController controller,
GetRSGroupInfoOfServerRequest request, RpcCallback<GetRSGroupInfoOfServerResponse> done) {
GetRSGroupInfoOfServerResponse.Builder builder = GetRSGroupInfoOfServerResponse.newBuilder();
Address hp = Address.fromParts(request.getServer().getHostName(),
request.getServer().getPort());
LOG.info(master.getClientIdAuditPrefix() + " initiates rsgroup info retrieval, server="
+ hp);
try {
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().preGetRSGroupInfoOfServer(hp);
}
checkPermission("getRSGroupInfoOfServer");
RSGroupInfo info = groupAdminServer.getRSGroupOfServer(hp);
if (info != null) {
builder.setRSGroupInfo(ProtobufUtil.toProtoGroupInfo(info));
}
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().postGetRSGroupInfoOfServer(hp);
}
} catch (IOException e) {
CoprocessorRpcUtils.setControllerException(controller, e);
}
done.run(builder.build());
}
@Override
public void moveServersAndTables(RpcController controller,
MoveServersAndTablesRequest request, RpcCallback<MoveServersAndTablesResponse> done) {
MoveServersAndTablesResponse.Builder builder = MoveServersAndTablesResponse.newBuilder();
Set<Address> hostPorts = Sets.newHashSet();
for (HBaseProtos.ServerName el : request.getServersList()) {
hostPorts.add(Address.fromParts(el.getHostName(), el.getPort()));
}
Set<TableName> tables = new HashSet<>(request.getTableNameList().size());
for (HBaseProtos.TableName tableName : request.getTableNameList()) {
tables.add(ProtobufUtil.toTableName(tableName));
}
LOG.info(master.getClientIdAuditPrefix() + " move servers " + hostPorts
+ " and tables " + tables + " to rsgroup" + request.getTargetGroup());
try {
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().preMoveServersAndTables(hostPorts, tables,
request.getTargetGroup());
}
checkPermission("moveServersAndTables");
groupAdminServer.moveServersAndTables(hostPorts, tables, request.getTargetGroup());
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().postMoveServersAndTables(hostPorts, tables,
request.getTargetGroup());
}
} catch (IOException e) {
CoprocessorRpcUtils.setControllerException(controller, e);
}
done.run(builder.build());
}
@Override
public void removeServers(RpcController controller,
RemoveServersRequest request,
RpcCallback<RemoveServersResponse> done) {
RemoveServersResponse.Builder builder =
RemoveServersResponse.newBuilder();
Set<Address> servers = Sets.newHashSet();
for (HBaseProtos.ServerName el : request.getServersList()) {
servers.add(Address.fromParts(el.getHostName(), el.getPort()));
}
LOG.info(master.getClientIdAuditPrefix()
+ " remove decommissioned servers from rsgroup: " + servers);
try {
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().preRemoveServers(servers);
}
checkPermission("removeServers");
groupAdminServer.removeServers(servers);
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().postRemoveServers(servers);
}
} catch (IOException e) {
CoprocessorRpcUtils.setControllerException(controller, e);
}
done.run(builder.build());
}
}
boolean rsgroupHasServersOnline(TableDescriptor desc) throws IOException {
String groupName;
try {
groupName =
master.getClusterSchema().getNamespace(desc.getTableName().getNamespaceAsString())
.getConfigurationValue(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP);
if (groupName == null) {
groupName = RSGroupInfo.DEFAULT_GROUP;
}
} catch (MasterNotRunningException | PleaseHoldException e) {
LOG.info("Master has not initialized yet; temporarily using default RSGroup '" +
RSGroupInfo.DEFAULT_GROUP + "' for deploy of system table");
groupName = RSGroupInfo.DEFAULT_GROUP;
}
RSGroupInfo rsGroupInfo = groupAdminServer.getRSGroupInfo(groupName);
if (rsGroupInfo == null) {
throw new ConstraintException(
"Default RSGroup (" + groupName + ") for this table's " + "namespace does not exist.");
}
for (ServerName onlineServer : master.getServerManager().createDestinationServersList()) {
if (rsGroupInfo.getServers().contains(onlineServer.getAddress())) {
return true;
}
}
return false;
}
void assignTableToGroup(TableDescriptor desc) throws IOException {
String groupName =
master.getClusterSchema().getNamespace(desc.getTableName().getNamespaceAsString())
.getConfigurationValue(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP);
if (groupName == null) {
groupName = RSGroupInfo.DEFAULT_GROUP;
}
RSGroupInfo rsGroupInfo = groupAdminServer.getRSGroupInfo(groupName);
if (rsGroupInfo == null) {
throw new ConstraintException("Default RSGroup (" + groupName + ") for this table's "
+ "namespace does not exist.");
}
if (!rsGroupInfo.containsTable(desc.getTableName())) {
LOG.debug("Pre-moving table " + desc.getTableName() + " to RSGroup " + groupName);
groupAdminServer.moveTables(Sets.newHashSet(desc.getTableName()), groupName);
}
}
/////////////////////////////////////////////////////////////////////////////
// MasterObserver overrides
/////////////////////////////////////////////////////////////////////////////
@Override
public void preCreateTableAction(
final ObserverContext<MasterCoprocessorEnvironment> ctx,
final TableDescriptor desc,
final RegionInfo[] regions) throws IOException {
if (!desc.getTableName().isSystemTable() && !rsgroupHasServersOnline(desc)) {
throw new HBaseIOException("No online servers in the rsgroup, which table " +
desc.getTableName().getNameAsString() + " belongs to");
}
}
// Assign table to default RSGroup.
@Override
public void postCreateTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableDescriptor desc, RegionInfo[] regions) throws IOException {
assignTableToGroup(desc);
}
// Remove table from its RSGroup.
@Override
public void postDeleteTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName) throws IOException {
try {
RSGroupInfo group = groupAdminServer.getRSGroupInfoOfTable(tableName);
if (group != null) {
LOG.debug(String.format("Removing deleted table '%s' from rsgroup '%s'", tableName,
group.getName()));
groupAdminServer.moveTables(Sets.newHashSet(tableName), null);
}
} catch (IOException ex) {
LOG.debug("Failed to perform RSGroup information cleanup for table: " + tableName, ex);
}
}
@Override
public void preCreateNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
NamespaceDescriptor ns) throws IOException {
String group = ns.getConfigurationValue(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP);
if(group != null && groupAdminServer.getRSGroupInfo(group) == null) {
throw new ConstraintException("Region server group "+group+" does not exit");
}
}
@Override
public void preModifyNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
NamespaceDescriptor currentNsDesc, NamespaceDescriptor newNsDesc) throws IOException {
preCreateNamespace(ctx, newNsDesc);
}
@Override
public void preCloneSnapshot(ObserverContext<MasterCoprocessorEnvironment> ctx,
SnapshotDescription snapshot, TableDescriptor desc) throws IOException {
assignTableToGroup(desc);
}
@Override
public void postClearDeadServers(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<ServerName> servers, List<ServerName> notClearedServers)
throws IOException {
Set<Address> clearedServer = servers.stream().
filter(server -> !notClearedServers.contains(server)).
map(ServerName::getAddress).
collect(Collectors.toSet());
if(!clearedServer.isEmpty()) {
groupAdminServer.removeServers(clearedServer);
}
}
public void checkPermission(String request) throws IOException {
accessChecker.requirePermission(getActiveUser(), request, null, Action.ADMIN);
}
/**
* Returns the active user to which authorization checks should be applied.
* If we are in the context of an RPC call, the remote user is used,
* otherwise the currently logged in user is used.
*/
private User getActiveUser() throws IOException {
// for non-rpc handling, fallback to system user
Optional<User> optionalUser = RpcServer.getRequestUser();
if (optionalUser.isPresent()) {
return optionalUser.get();
}
return userProvider.getCurrent();
}
}

View File

@ -1,32 +0,0 @@
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
/**
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-->
<configuration>
<property>
<name>hbase.defaults.for.version.skip</name>
<value>true</value>
</property>
<property>
<name>hbase.hconnection.threads.keepalivetime</name>
<value>3</value>
</property>
</configuration>

View File

@ -1,68 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Define some default values that can be overridden by system properties
hbase.root.logger=INFO,console
hbase.log.dir=.
hbase.log.file=hbase.log
# Define the root logger to the system property "hbase.root.logger".
log4j.rootLogger=${hbase.root.logger}
# Logging Threshold
log4j.threshold=ALL
#
# Daily Rolling File Appender
#
log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
log4j.appender.DRFA.File=${hbase.log.dir}/${hbase.log.file}
# Rollver at midnight
log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
# 30-day backup
#log4j.appender.DRFA.MaxBackupIndex=30
log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
# Debugging Pattern format
log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %C{2}(%L): %m%n
#
# console
# Add "console" to rootlogger above if you want to use this
#
log4j.appender.console=org.apache.log4j.ConsoleAppender
log4j.appender.console.target=System.err
log4j.appender.console.layout=org.apache.log4j.PatternLayout
log4j.appender.console.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %C{2}(%L): %m%n
# Custom Logging levels
#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
log4j.logger.org.apache.hadoop=WARN
log4j.logger.org.apache.zookeeper=ERROR
log4j.logger.org.apache.hadoop.hbase=DEBUG
#These settings are workarounds against spurious logs from the minicluster.
#See HBASE-4709
log4j.logger.org.apache.hadoop.metrics2.impl.MetricsConfig=WARN
log4j.logger.org.apache.hadoop.metrics2.impl.MetricsSinkAdapter=WARN
log4j.logger.org.apache.hadoop.metrics2.impl.MetricsSystemImpl=WARN
log4j.logger.org.apache.hadoop.metrics2.util.MBeans=WARN
# Enable this to get detailed connection error/retry logging.
# log4j.logger.org.apache.hadoop.hbase.client.ConnectionImplementation=TRACE

View File

@ -0,0 +1,226 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.rsgroup;
import com.google.protobuf.Service;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
import java.util.Optional;
import java.util.Set;
import java.util.stream.Collectors;
import org.apache.hadoop.hbase.CoprocessorEnvironment;
import org.apache.hadoop.hbase.HBaseIOException;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.MasterNotRunningException;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.PleaseHoldException;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.SnapshotDescription;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.constraint.ConstraintException;
import org.apache.hadoop.hbase.coprocessor.CoreCoprocessor;
import org.apache.hadoop.hbase.coprocessor.HasMasterServices;
import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor;
import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
import org.apache.hadoop.hbase.coprocessor.MasterObserver;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.net.Address;
import org.apache.hadoop.hbase.security.UserProvider;
import org.apache.hadoop.hbase.security.access.AccessChecker;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
// TODO: Encapsulate MasterObserver functions into separate subclass.
@CoreCoprocessor
@InterfaceAudience.Private
public class RSGroupAdminEndpoint implements MasterCoprocessor, MasterObserver {
static final Logger LOG = LoggerFactory.getLogger(RSGroupAdminEndpoint.class);
private MasterServices master;
// Only instance of RSGroupInfoManager. RSGroup aware load balancers ask for this instance on
// their setup.
private RSGroupInfoManager groupInfoManager;
private RSGroupAdminServer groupAdminServer;
private RSGroupAdminServiceImpl groupAdminService = new RSGroupAdminServiceImpl();
@Override
public void start(CoprocessorEnvironment env) throws IOException {
if (!(env instanceof HasMasterServices)) {
throw new IOException("Does not implement HMasterServices");
}
master = ((HasMasterServices) env).getMasterServices();
groupInfoManager = RSGroupInfoManagerImpl.getInstance(master);
groupAdminServer = new RSGroupAdminServer(master, groupInfoManager);
Class<?> clazz =
master.getConfiguration().getClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, null);
if (!RSGroupableBalancer.class.isAssignableFrom(clazz)) {
throw new IOException("Configured balancer does not support RegionServer groups.");
}
AccessChecker accessChecker = ((HasMasterServices) env).getMasterServices().getAccessChecker();
// set the user-provider.
UserProvider userProvider = UserProvider.instantiate(env.getConfiguration());
groupAdminService.initialize(master, groupAdminServer, accessChecker, userProvider);
}
@Override
public void stop(CoprocessorEnvironment env) {
}
@Override
public Iterable<Service> getServices() {
return Collections.singleton(groupAdminService);
}
@Override
public Optional<MasterObserver> getMasterObserver() {
return Optional.of(this);
}
RSGroupInfoManager getGroupInfoManager() {
return groupInfoManager;
}
@VisibleForTesting
RSGroupAdminServiceImpl getGroupAdminService() {
return groupAdminService;
}
private void assignTableToGroup(TableDescriptor desc) throws IOException {
String groupName =
master.getClusterSchema().getNamespace(desc.getTableName().getNamespaceAsString())
.getConfigurationValue(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP);
if (groupName == null) {
groupName = RSGroupInfo.DEFAULT_GROUP;
}
RSGroupInfo rsGroupInfo = groupAdminServer.getRSGroupInfo(groupName);
if (rsGroupInfo == null) {
throw new ConstraintException(
"Default RSGroup (" + groupName + ") for this table's namespace does not exist.");
}
if (!rsGroupInfo.containsTable(desc.getTableName())) {
LOG.debug("Pre-moving table " + desc.getTableName() + " to RSGroup " + groupName);
groupAdminServer.moveTables(Sets.newHashSet(desc.getTableName()), groupName);
}
}
/////////////////////////////////////////////////////////////////////////////
// MasterObserver overrides
/////////////////////////////////////////////////////////////////////////////
private boolean rsgroupHasServersOnline(TableDescriptor desc) throws IOException {
String groupName;
try {
groupName = master.getClusterSchema().getNamespace(desc.getTableName().getNamespaceAsString())
.getConfigurationValue(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP);
if (groupName == null) {
groupName = RSGroupInfo.DEFAULT_GROUP;
}
} catch (MasterNotRunningException | PleaseHoldException e) {
LOG.info("Master has not initialized yet; temporarily using default RSGroup '" +
RSGroupInfo.DEFAULT_GROUP + "' for deploy of system table");
groupName = RSGroupInfo.DEFAULT_GROUP;
}
RSGroupInfo rsGroupInfo = groupAdminServer.getRSGroupInfo(groupName);
if (rsGroupInfo == null) {
throw new ConstraintException(
"Default RSGroup (" + groupName + ") for this table's " + "namespace does not exist.");
}
for (ServerName onlineServer : master.getServerManager().createDestinationServersList()) {
if (rsGroupInfo.getServers().contains(onlineServer.getAddress())) {
return true;
}
}
return false;
}
@Override
public void preCreateTableAction(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final TableDescriptor desc, final RegionInfo[] regions) throws IOException {
if (!desc.getTableName().isSystemTable() && !rsgroupHasServersOnline(desc)) {
throw new HBaseIOException("No online servers in the rsgroup, which table " +
desc.getTableName().getNameAsString() + " belongs to");
}
}
// Assign table to default RSGroup.
@Override
public void postCreateTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableDescriptor desc, RegionInfo[] regions) throws IOException {
assignTableToGroup(desc);
}
// Remove table from its RSGroup.
@Override
public void postDeleteTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName) throws IOException {
try {
RSGroupInfo group = groupAdminServer.getRSGroupInfoOfTable(tableName);
if (group != null) {
LOG.debug(String.format("Removing deleted table '%s' from rsgroup '%s'", tableName,
group.getName()));
groupAdminServer.moveTables(Sets.newHashSet(tableName), null);
}
} catch (IOException ex) {
LOG.debug("Failed to perform RSGroup information cleanup for table: " + tableName, ex);
}
}
@Override
public void preCreateNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
NamespaceDescriptor ns) throws IOException {
String group = ns.getConfigurationValue(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP);
if (group != null && groupAdminServer.getRSGroupInfo(group) == null) {
throw new ConstraintException("Region server group " + group + " does not exit");
}
}
@Override
public void preModifyNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
NamespaceDescriptor currentNsDesc, NamespaceDescriptor newNsDesc) throws IOException {
preCreateNamespace(ctx, newNsDesc);
}
@Override
public void preCloneSnapshot(ObserverContext<MasterCoprocessorEnvironment> ctx,
SnapshotDescription snapshot, TableDescriptor desc) throws IOException {
assignTableToGroup(desc);
}
@Override
public void postClearDeadServers(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<ServerName> servers, List<ServerName> notClearedServers) throws IOException {
Set<Address> clearedServer =
servers.stream().filter(server -> !notClearedServers.contains(server))
.map(ServerName::getAddress).collect(Collectors.toSet());
if (!clearedServer.isEmpty()) {
groupAdminServer.removeServers(clearedServer);
}
}
}

View File

@ -0,0 +1,378 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.rsgroup;
import com.google.protobuf.RpcCallback;
import com.google.protobuf.RpcController;
import java.io.IOException;
import java.util.HashSet;
import java.util.Optional;
import java.util.Set;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
import org.apache.hadoop.hbase.ipc.RpcServer;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.net.Address;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupResponse;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponse;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersRequest;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersResponse;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesRequest;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesResponse;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersRequest;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersResponse;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.security.UserProvider;
import org.apache.hadoop.hbase.security.access.AccessChecker;
import org.apache.hadoop.hbase.security.access.Permission.Action;
import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
/**
* Implementation of RSGroupAdminService defined in RSGroupAdmin.proto. This class calls
* {@link RSGroupAdminServer} for actual work, converts result to protocol buffer response, handles
* exceptions if any occurred and then calls the {@code RpcCallback} with the response.
*/
class RSGroupAdminServiceImpl extends RSGroupAdminProtos.RSGroupAdminService {
private MasterServices master;
private RSGroupAdminServer groupAdminServer;
private AccessChecker accessChecker;
/** Provider for mapping principal names to Users */
private UserProvider userProvider;
RSGroupAdminServiceImpl() {
}
void initialize(MasterServices master, RSGroupAdminServer groupAdminServer,
AccessChecker accessChecker, UserProvider userProvider) {
this.master = master;
this.groupAdminServer = groupAdminServer;
this.accessChecker = accessChecker;
this.userProvider = userProvider;
}
@VisibleForTesting
void checkPermission(String request) throws IOException {
accessChecker.requirePermission(getActiveUser(), request, null, Action.ADMIN);
}
/**
* Returns the active user to which authorization checks should be applied. If we are in the
* context of an RPC call, the remote user is used, otherwise the currently logged in user is
* used.
*/
private User getActiveUser() throws IOException {
// for non-rpc handling, fallback to system user
Optional<User> optionalUser = RpcServer.getRequestUser();
if (optionalUser.isPresent()) {
return optionalUser.get();
}
return userProvider.getCurrent();
}
@Override
public void getRSGroupInfo(RpcController controller, GetRSGroupInfoRequest request,
RpcCallback<GetRSGroupInfoResponse> done) {
GetRSGroupInfoResponse.Builder builder = GetRSGroupInfoResponse.newBuilder();
String groupName = request.getRSGroupName();
RSGroupAdminEndpoint.LOG.info(
master.getClientIdAuditPrefix() + " initiates rsgroup info retrieval, group=" + groupName);
try {
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().preGetRSGroupInfo(groupName);
}
checkPermission("getRSGroupInfo");
RSGroupInfo rsGroupInfo = groupAdminServer.getRSGroupInfo(groupName);
if (rsGroupInfo != null) {
builder.setRSGroupInfo(ProtobufUtil.toProtoGroupInfo(rsGroupInfo));
}
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().postGetRSGroupInfo(groupName);
}
} catch (IOException e) {
CoprocessorRpcUtils.setControllerException(controller, e);
}
done.run(builder.build());
}
@Override
public void getRSGroupInfoOfTable(RpcController controller, GetRSGroupInfoOfTableRequest request,
RpcCallback<GetRSGroupInfoOfTableResponse> done) {
GetRSGroupInfoOfTableResponse.Builder builder = GetRSGroupInfoOfTableResponse.newBuilder();
TableName tableName = ProtobufUtil.toTableName(request.getTableName());
RSGroupAdminEndpoint.LOG.info(
master.getClientIdAuditPrefix() + " initiates rsgroup info retrieval, table=" + tableName);
try {
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().preGetRSGroupInfoOfTable(tableName);
}
checkPermission("getRSGroupInfoOfTable");
RSGroupInfo RSGroupInfo = groupAdminServer.getRSGroupInfoOfTable(tableName);
if (RSGroupInfo != null) {
builder.setRSGroupInfo(ProtobufUtil.toProtoGroupInfo(RSGroupInfo));
}
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().postGetRSGroupInfoOfTable(tableName);
}
} catch (IOException e) {
CoprocessorRpcUtils.setControllerException(controller, e);
}
done.run(builder.build());
}
@Override
public void moveServers(RpcController controller, MoveServersRequest request,
RpcCallback<MoveServersResponse> done) {
MoveServersResponse.Builder builder = MoveServersResponse.newBuilder();
Set<Address> hostPorts = Sets.newHashSet();
for (HBaseProtos.ServerName el : request.getServersList()) {
hostPorts.add(Address.fromParts(el.getHostName(), el.getPort()));
}
RSGroupAdminEndpoint.LOG.info(master.getClientIdAuditPrefix() + " move servers " + hostPorts +
" to rsgroup " + request.getTargetGroup());
try {
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().preMoveServers(hostPorts, request.getTargetGroup());
}
checkPermission("moveServers");
groupAdminServer.moveServers(hostPorts, request.getTargetGroup());
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().postMoveServers(hostPorts, request.getTargetGroup());
}
} catch (IOException e) {
CoprocessorRpcUtils.setControllerException(controller, e);
}
done.run(builder.build());
}
@Override
public void moveTables(RpcController controller, MoveTablesRequest request,
RpcCallback<MoveTablesResponse> done) {
MoveTablesResponse.Builder builder = MoveTablesResponse.newBuilder();
Set<TableName> tables = new HashSet<>(request.getTableNameList().size());
for (HBaseProtos.TableName tableName : request.getTableNameList()) {
tables.add(ProtobufUtil.toTableName(tableName));
}
RSGroupAdminEndpoint.LOG.info(master.getClientIdAuditPrefix() + " move tables " + tables +
" to rsgroup " + request.getTargetGroup());
try {
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().preMoveTables(tables, request.getTargetGroup());
}
checkPermission("moveTables");
groupAdminServer.moveTables(tables, request.getTargetGroup());
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().postMoveTables(tables, request.getTargetGroup());
}
} catch (IOException e) {
CoprocessorRpcUtils.setControllerException(controller, e);
}
done.run(builder.build());
}
@Override
public void addRSGroup(RpcController controller, AddRSGroupRequest request,
RpcCallback<AddRSGroupResponse> done) {
AddRSGroupResponse.Builder builder = AddRSGroupResponse.newBuilder();
RSGroupAdminEndpoint.LOG
.info(master.getClientIdAuditPrefix() + " add rsgroup " + request.getRSGroupName());
try {
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().preAddRSGroup(request.getRSGroupName());
}
checkPermission("addRSGroup");
groupAdminServer.addRSGroup(request.getRSGroupName());
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().postAddRSGroup(request.getRSGroupName());
}
} catch (IOException e) {
CoprocessorRpcUtils.setControllerException(controller, e);
}
done.run(builder.build());
}
@Override
public void removeRSGroup(RpcController controller, RemoveRSGroupRequest request,
RpcCallback<RemoveRSGroupResponse> done) {
RemoveRSGroupResponse.Builder builder = RemoveRSGroupResponse.newBuilder();
RSGroupAdminEndpoint.LOG
.info(master.getClientIdAuditPrefix() + " remove rsgroup " + request.getRSGroupName());
try {
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().preRemoveRSGroup(request.getRSGroupName());
}
checkPermission("removeRSGroup");
groupAdminServer.removeRSGroup(request.getRSGroupName());
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().postRemoveRSGroup(request.getRSGroupName());
}
} catch (IOException e) {
CoprocessorRpcUtils.setControllerException(controller, e);
}
done.run(builder.build());
}
@Override
public void balanceRSGroup(RpcController controller, BalanceRSGroupRequest request,
RpcCallback<BalanceRSGroupResponse> done) {
BalanceRSGroupResponse.Builder builder = BalanceRSGroupResponse.newBuilder();
RSGroupAdminEndpoint.LOG.info(
master.getClientIdAuditPrefix() + " balance rsgroup, group=" + request.getRSGroupName());
try {
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().preBalanceRSGroup(request.getRSGroupName());
}
checkPermission("balanceRSGroup");
boolean balancerRan = groupAdminServer.balanceRSGroup(request.getRSGroupName());
builder.setBalanceRan(balancerRan);
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().postBalanceRSGroup(request.getRSGroupName(), balancerRan);
}
} catch (IOException e) {
CoprocessorRpcUtils.setControllerException(controller, e);
builder.setBalanceRan(false);
}
done.run(builder.build());
}
@Override
public void listRSGroupInfos(RpcController controller, ListRSGroupInfosRequest request,
RpcCallback<ListRSGroupInfosResponse> done) {
ListRSGroupInfosResponse.Builder builder = ListRSGroupInfosResponse.newBuilder();
RSGroupAdminEndpoint.LOG.info(master.getClientIdAuditPrefix() + " list rsgroup");
try {
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().preListRSGroups();
}
checkPermission("listRSGroup");
for (RSGroupInfo RSGroupInfo : groupAdminServer.listRSGroups()) {
builder.addRSGroupInfo(ProtobufUtil.toProtoGroupInfo(RSGroupInfo));
}
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().postListRSGroups();
}
} catch (IOException e) {
CoprocessorRpcUtils.setControllerException(controller, e);
}
done.run(builder.build());
}
@Override
public void getRSGroupInfoOfServer(RpcController controller,
GetRSGroupInfoOfServerRequest request, RpcCallback<GetRSGroupInfoOfServerResponse> done) {
GetRSGroupInfoOfServerResponse.Builder builder = GetRSGroupInfoOfServerResponse.newBuilder();
Address hp =
Address.fromParts(request.getServer().getHostName(), request.getServer().getPort());
RSGroupAdminEndpoint.LOG
.info(master.getClientIdAuditPrefix() + " initiates rsgroup info retrieval, server=" + hp);
try {
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().preGetRSGroupInfoOfServer(hp);
}
checkPermission("getRSGroupInfoOfServer");
RSGroupInfo info = groupAdminServer.getRSGroupOfServer(hp);
if (info != null) {
builder.setRSGroupInfo(ProtobufUtil.toProtoGroupInfo(info));
}
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().postGetRSGroupInfoOfServer(hp);
}
} catch (IOException e) {
CoprocessorRpcUtils.setControllerException(controller, e);
}
done.run(builder.build());
}
@Override
public void moveServersAndTables(RpcController controller, MoveServersAndTablesRequest request,
RpcCallback<MoveServersAndTablesResponse> done) {
MoveServersAndTablesResponse.Builder builder = MoveServersAndTablesResponse.newBuilder();
Set<Address> hostPorts = Sets.newHashSet();
for (HBaseProtos.ServerName el : request.getServersList()) {
hostPorts.add(Address.fromParts(el.getHostName(), el.getPort()));
}
Set<TableName> tables = new HashSet<>(request.getTableNameList().size());
for (HBaseProtos.TableName tableName : request.getTableNameList()) {
tables.add(ProtobufUtil.toTableName(tableName));
}
RSGroupAdminEndpoint.LOG.info(master.getClientIdAuditPrefix() + " move servers " + hostPorts +
" and tables " + tables + " to rsgroup" + request.getTargetGroup());
try {
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().preMoveServersAndTables(hostPorts, tables,
request.getTargetGroup());
}
checkPermission("moveServersAndTables");
groupAdminServer.moveServersAndTables(hostPorts, tables, request.getTargetGroup());
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().postMoveServersAndTables(hostPorts, tables,
request.getTargetGroup());
}
} catch (IOException e) {
CoprocessorRpcUtils.setControllerException(controller, e);
}
done.run(builder.build());
}
@Override
public void removeServers(RpcController controller, RemoveServersRequest request,
RpcCallback<RemoveServersResponse> done) {
RemoveServersResponse.Builder builder = RemoveServersResponse.newBuilder();
Set<Address> servers = Sets.newHashSet();
for (HBaseProtos.ServerName el : request.getServersList()) {
servers.add(Address.fromParts(el.getHostName(), el.getPort()));
}
RSGroupAdminEndpoint.LOG.info(
master.getClientIdAuditPrefix() + " remove decommissioned servers from rsgroup: " + servers);
try {
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().preRemoveServers(servers);
}
checkPermission("removeServers");
groupAdminServer.removeServers(servers);
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().postRemoveServers(servers);
}
} catch (IOException e) {
CoprocessorRpcUtils.setControllerException(controller, e);
}
done.run(builder.build());
}
}

View File

@ -15,7 +15,6 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.rsgroup;
import java.io.IOException;
@ -56,18 +55,17 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
import org.apache.hbase.thirdparty.com.google.common.collect.Maps;
/**
* GroupBasedLoadBalancer, used when Region Server Grouping is configured (HBase-6721)
* It does region balance based on a table's group membership.
*
* Most assignment methods contain two exclusive code paths: Online - when the group
* table is online and Offline - when it is unavailable.
*
* During Offline, assignments are assigned based on cached information in zookeeper.
* If unavailable (ie bootstrap) then regions are assigned randomly.
*
* Once the GROUP table has been assigned, the balancer switches to Online and will then
* start providing appropriate assignments for user tables.
*
* GroupBasedLoadBalancer, used when Region Server Grouping is configured (HBase-6721) It does
* region balance based on a table's group membership.
* <p/>
* Most assignment methods contain two exclusive code paths: Online - when the group table is online
* and Offline - when it is unavailable.
* <p/>
* During Offline, assignments are assigned based on cached information in zookeeper. If unavailable
* (ie bootstrap) then regions are assigned randomly.
* <p/>
* Once the GROUP table has been assigned, the balancer switches to Online and will then start
* providing appropriate assignments for user tables.
*/
@InterfaceAudience.Private
public class RSGroupBasedLoadBalancer implements RSGroupableBalancer {

View File

@ -39,7 +39,6 @@ import org.apache.hadoop.hbase.security.access.SecureTestUtil;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.SecurityTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.ClassRule;
@ -49,8 +48,8 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Performs authorization checks for rsgroup operations, according to different
* levels of authorized users.
* Performs authorization checks for rsgroup operations, according to different levels of authorized
* users.
*/
@Category({ SecurityTests.class, MediumTests.class })
public class TestRSGroupsWithACL extends SecureTestUtil {
@ -98,8 +97,7 @@ public class TestRSGroupsWithACL extends SecureTestUtil{
public static void setupBeforeClass() throws Exception {
// setup configuration
conf = TEST_UTIL.getConfiguration();
conf.set(HConstants.HBASE_MASTER_LOADBALANCER_CLASS,
RSGroupBasedLoadBalancer.class.getName());
conf.set(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, RSGroupBasedLoadBalancer.class.getName());
// Enable security
enableSecurity(conf);
// Verify enableSecurity sets up what we require
@ -108,8 +106,8 @@ public class TestRSGroupsWithACL extends SecureTestUtil{
configureRSGroupAdminEndpoint(conf);
TEST_UTIL.startMiniCluster();
rsGroupAdminEndpoint = (RSGroupAdminEndpoint) TEST_UTIL.getMiniHBaseCluster().getMaster().
getMasterCoprocessorHost().findCoprocessor(RSGroupAdminEndpoint.class.getName());
rsGroupAdminEndpoint = (RSGroupAdminEndpoint) TEST_UTIL.getMiniHBaseCluster().getMaster()
.getMasterCoprocessorHost().findCoprocessor(RSGroupAdminEndpoint.class.getName());
// Wait for the ACL table to become available
TEST_UTIL.waitUntilAllRegionsAssigned(PermissionStorage.ACL_TABLE_NAME);
@ -141,30 +139,20 @@ public class TestRSGroupsWithACL extends SecureTestUtil{
cfd.setMaxVersions(100);
tableBuilder.setColumnFamily(cfd.build());
tableBuilder.setValue(TableDescriptorBuilder.OWNER, USER_OWNER.getShortName());
createTable(TEST_UTIL, tableBuilder.build(),
new byte[][] { Bytes.toBytes("s") });
createTable(TEST_UTIL, tableBuilder.build(), new byte[][] { Bytes.toBytes("s") });
// Set up initial grants
grantGlobal(TEST_UTIL, USER_ADMIN.getShortName(),
Permission.Action.ADMIN,
Permission.Action.CREATE,
Permission.Action.READ,
Permission.Action.WRITE);
grantGlobal(TEST_UTIL, USER_ADMIN.getShortName(), Permission.Action.ADMIN,
Permission.Action.CREATE, Permission.Action.READ, Permission.Action.WRITE);
grantOnTable(TEST_UTIL, USER_RW.getShortName(),
TEST_TABLE, TEST_FAMILY, null,
Permission.Action.READ,
Permission.Action.WRITE);
grantOnTable(TEST_UTIL, USER_RW.getShortName(), TEST_TABLE, TEST_FAMILY, null,
Permission.Action.READ, Permission.Action.WRITE);
// USER_CREATE is USER_RW plus CREATE permissions
grantOnTable(TEST_UTIL, USER_CREATE.getShortName(),
TEST_TABLE, null, null,
Permission.Action.CREATE,
Permission.Action.READ,
Permission.Action.WRITE);
grantOnTable(TEST_UTIL, USER_CREATE.getShortName(), TEST_TABLE, null, null,
Permission.Action.CREATE, Permission.Action.READ, Permission.Action.WRITE);
grantOnTable(TEST_UTIL, USER_RO.getShortName(),
TEST_TABLE, TEST_FAMILY, null,
grantOnTable(TEST_UTIL, USER_RO.getShortName(), TEST_TABLE, TEST_FAMILY, null,
Permission.Action.READ);
grantGlobal(TEST_UTIL, toGroupEntry(GROUP_ADMIN), Permission.Action.ADMIN);
@ -174,8 +162,8 @@ public class TestRSGroupsWithACL extends SecureTestUtil{
assertEquals(4, PermissionStorage.getTablePermissions(conf, TEST_TABLE).size());
try {
assertEquals(4, AccessControlClient.getUserPermissions(systemUserConnection,
TEST_TABLE.toString()).size());
assertEquals(4,
AccessControlClient.getUserPermissions(systemUserConnection, TEST_TABLE.toString()).size());
} catch (AssertionError e) {
fail(e.getMessage());
} catch (Throwable e) {
@ -210,14 +198,13 @@ public class TestRSGroupsWithACL extends SecureTestUtil{
coprocessors += "," + currentCoprocessors;
}
conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, coprocessors);
conf.set(HConstants.HBASE_MASTER_LOADBALANCER_CLASS,
RSGroupBasedLoadBalancer.class.getName());
conf.set(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, RSGroupBasedLoadBalancer.class.getName());
}
@Test
public void testGetRSGroupInfo() throws Exception {
AccessTestAction action = () -> {
rsGroupAdminEndpoint.checkPermission("getRSGroupInfo");
rsGroupAdminEndpoint.getGroupAdminService().checkPermission("getRSGroupInfo");
return null;
};
@ -227,7 +214,7 @@ public class TestRSGroupsWithACL extends SecureTestUtil{
@Test
public void testGetRSGroupInfoOfTable() throws Exception {
AccessTestAction action = () -> {
rsGroupAdminEndpoint.checkPermission("getRSGroupInfoOfTable");
rsGroupAdminEndpoint.getGroupAdminService().checkPermission("getRSGroupInfoOfTable");
return null;
};
@ -237,7 +224,7 @@ public class TestRSGroupsWithACL extends SecureTestUtil{
@Test
public void testMoveServers() throws Exception {
AccessTestAction action = () -> {
rsGroupAdminEndpoint.checkPermission("moveServers");
rsGroupAdminEndpoint.getGroupAdminService().checkPermission("moveServers");
return null;
};
@ -247,7 +234,7 @@ public class TestRSGroupsWithACL extends SecureTestUtil{
@Test
public void testMoveTables() throws Exception {
AccessTestAction action = () -> {
rsGroupAdminEndpoint.checkPermission("moveTables");
rsGroupAdminEndpoint.getGroupAdminService().checkPermission("moveTables");
return null;
};
@ -257,7 +244,7 @@ public class TestRSGroupsWithACL extends SecureTestUtil{
@Test
public void testAddRSGroup() throws Exception {
AccessTestAction action = () -> {
rsGroupAdminEndpoint.checkPermission("addRSGroup");
rsGroupAdminEndpoint.getGroupAdminService().checkPermission("addRSGroup");
return null;
};
@ -267,7 +254,7 @@ public class TestRSGroupsWithACL extends SecureTestUtil{
@Test
public void testRemoveRSGroup() throws Exception {
AccessTestAction action = () -> {
rsGroupAdminEndpoint.checkPermission("removeRSGroup");
rsGroupAdminEndpoint.getGroupAdminService().checkPermission("removeRSGroup");
return null;
};
@ -277,7 +264,7 @@ public class TestRSGroupsWithACL extends SecureTestUtil{
@Test
public void testBalanceRSGroup() throws Exception {
AccessTestAction action = () -> {
rsGroupAdminEndpoint.checkPermission("balanceRSGroup");
rsGroupAdminEndpoint.getGroupAdminService().checkPermission("balanceRSGroup");
return null;
};
@ -287,7 +274,7 @@ public class TestRSGroupsWithACL extends SecureTestUtil{
@Test
public void testListRSGroup() throws Exception {
AccessTestAction action = () -> {
rsGroupAdminEndpoint.checkPermission("listRSGroup");
rsGroupAdminEndpoint.getGroupAdminService().checkPermission("listRSGroup");
return null;
};
@ -297,7 +284,7 @@ public class TestRSGroupsWithACL extends SecureTestUtil{
@Test
public void testGetRSGroupInfoOfServer() throws Exception {
AccessTestAction action = () -> {
rsGroupAdminEndpoint.checkPermission("getRSGroupInfoOfServer");
rsGroupAdminEndpoint.getGroupAdminService().checkPermission("getRSGroupInfoOfServer");
return null;
};
@ -307,7 +294,7 @@ public class TestRSGroupsWithACL extends SecureTestUtil{
@Test
public void testMoveServersAndTables() throws Exception {
AccessTestAction action = () -> {
rsGroupAdminEndpoint.checkPermission("moveServersAndTables");
rsGroupAdminEndpoint.getGroupAdminService().checkPermission("moveServersAndTables");
return null;
};
@ -317,7 +304,7 @@ public class TestRSGroupsWithACL extends SecureTestUtil{
@Test
public void testRemoveServers() throws Exception {
AccessTestAction action = () -> {
rsGroupAdminEndpoint.checkPermission("removeServers");
rsGroupAdminEndpoint.getGroupAdminService().checkPermission("removeServers");
return null;
};
@ -326,7 +313,7 @@ public class TestRSGroupsWithACL extends SecureTestUtil{
private void validateAdminPermissions(AccessTestAction action) throws Exception {
verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_GROUP_ADMIN);
verifyDenied(action, USER_CREATE, USER_OWNER, USER_RW, USER_RO,
USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE, USER_GROUP_CREATE);
verifyDenied(action, USER_CREATE, USER_OWNER, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ,
USER_GROUP_WRITE, USER_GROUP_CREATE);
}
}

View File

@ -187,41 +187,6 @@
</dependency>
</dependencies>
<profiles>
<profile>
<id>rsgroup</id>
<activation>
<property>
<name>!skip-rsgroup</name>
</property>
</activation>
<dependencies>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-rsgroup</artifactId>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>build-helper-maven-plugin</artifactId>
<executions>
<execution>
<id>add-test-source</id>
<goals>
<goal>add-test-source</goal>
</goals>
<configuration>
<sources>
<source>src/test/rsgroup</source>
</sources>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</profile>
<!-- Skip the tests in this module -->
<profile>
<id>skipShellTests</id>

24
pom.xml
View File

@ -1540,7 +1540,6 @@
<procedure.test.jar>hbase-procedure-${project.version}-tests.jar</procedure.test.jar>
<it.test.jar>hbase-it-${project.version}-tests.jar</it.test.jar>
<annotations.test.jar>hbase-annotations-${project.version}-tests.jar</annotations.test.jar>
<rsgroup.test.jar>hbase-rsgroup-${project.version}-tests.jar</rsgroup.test.jar>
<mapreduce.test.jar>hbase-mapreduce-${project.version}-tests.jar</mapreduce.test.jar>
<zookeeper.test.jar>hbase-zookeeper-${project.version}-tests.jar</zookeeper.test.jar>
<shell-executable>bash</shell-executable>
@ -1670,18 +1669,6 @@
<type>test-jar</type>
<scope>test</scope>
</dependency>
<dependency>
<artifactId>hbase-rsgroup</artifactId>
<groupId>org.apache.hbase</groupId>
<version>${project.version}</version>
</dependency>
<dependency>
<artifactId>hbase-rsgroup</artifactId>
<groupId>org.apache.hbase</groupId>
<version>${project.version}</version>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<dependency>
<artifactId>hbase-replication</artifactId>
<groupId>org.apache.hbase</groupId>
@ -2315,17 +2302,6 @@
-->
<profiles>
<profile>
<id>rsgroup</id>
<activation>
<property>
<name>!skip-rsgroup</name>
</property>
</activation>
<modules>
<module>hbase-rsgroup</module>
</modules>
</profile>
<profile>
<id>build-with-jdk8</id>
<activation>