HBASE-6721 RegionServer Group based Assignment (Francis Liu)

This commit is contained in:
Enis Soztutar 2016-03-14 18:28:50 -07:00
parent 122e6f5793
commit ca816f0780
62 changed files with 20652 additions and 64 deletions

View File

@ -18,7 +18,19 @@
package org.apache.hadoop.hbase.protobuf; package org.apache.hadoop.hbase.protobuf;
import static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME; import com.google.common.collect.ArrayListMultimap;
import com.google.common.collect.ListMultimap;
import com.google.common.collect.Lists;
import com.google.common.net.HostAndPort;
import com.google.protobuf.ByteString;
import com.google.protobuf.CodedInputStream;
import com.google.protobuf.InvalidProtocolBufferException;
import com.google.protobuf.Message;
import com.google.protobuf.Parser;
import com.google.protobuf.RpcChannel;
import com.google.protobuf.Service;
import com.google.protobuf.ServiceException;
import com.google.protobuf.TextFormat;
import java.io.ByteArrayOutputStream; import java.io.ByteArrayOutputStream;
import java.io.IOException; import java.io.IOException;
@ -124,6 +136,7 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableReques
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService;
import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos; import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest;
import org.apache.hadoop.hbase.protobuf.generated.WALProtos; import org.apache.hadoop.hbase.protobuf.generated.WALProtos;
@ -139,6 +152,7 @@ import org.apache.hadoop.hbase.quotas.QuotaType;
import org.apache.hadoop.hbase.quotas.ThrottleType; import org.apache.hadoop.hbase.quotas.ThrottleType;
import org.apache.hadoop.hbase.replication.ReplicationLoadSink; import org.apache.hadoop.hbase.replication.ReplicationLoadSink;
import org.apache.hadoop.hbase.replication.ReplicationLoadSource; import org.apache.hadoop.hbase.replication.ReplicationLoadSource;
import org.apache.hadoop.hbase.rsgroup.RSGroupInfo;
import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.security.access.Permission; import org.apache.hadoop.hbase.security.access.Permission;
import org.apache.hadoop.hbase.security.access.TablePermission; import org.apache.hadoop.hbase.security.access.TablePermission;
@ -157,18 +171,8 @@ import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.Token;
import com.google.common.collect.ArrayListMultimap; import static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier
import com.google.common.collect.ListMultimap; .RegionSpecifierType.REGION_NAME;
import com.google.common.collect.Lists;
import com.google.protobuf.ByteString;
import com.google.protobuf.CodedInputStream;
import com.google.protobuf.InvalidProtocolBufferException;
import com.google.protobuf.Message;
import com.google.protobuf.Parser;
import com.google.protobuf.RpcChannel;
import com.google.protobuf.Service;
import com.google.protobuf.ServiceException;
import com.google.protobuf.TextFormat;
/** /**
* Protobufs utility. * Protobufs utility.
@ -3057,7 +3061,7 @@ public final class ProtobufUtil {
* @param builder current message builder * @param builder current message builder
* @param in InputStream containing protobuf data * @param in InputStream containing protobuf data
* @param size known size of protobuf data * @param size known size of protobuf data
* @throws IOException * @throws IOException
*/ */
public static void mergeFrom(Message.Builder builder, InputStream in, int size) public static void mergeFrom(Message.Builder builder, InputStream in, int size)
throws IOException { throws IOException {
@ -3072,7 +3076,7 @@ public final class ProtobufUtil {
* buffers where the message size is not known * buffers where the message size is not known
* @param builder current message builder * @param builder current message builder
* @param in InputStream containing protobuf data * @param in InputStream containing protobuf data
* @throws IOException * @throws IOException
*/ */
public static void mergeFrom(Message.Builder builder, InputStream in) public static void mergeFrom(Message.Builder builder, InputStream in)
throws IOException { throws IOException {
@ -3086,8 +3090,8 @@ public final class ProtobufUtil {
* This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding * This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding
* buffers when working with ByteStrings * buffers when working with ByteStrings
* @param builder current message builder * @param builder current message builder
* @param bs ByteString containing the * @param bs ByteString containing the
* @throws IOException * @throws IOException
*/ */
public static void mergeFrom(Message.Builder builder, ByteString bs) throws IOException { public static void mergeFrom(Message.Builder builder, ByteString bs) throws IOException {
final CodedInputStream codedInput = bs.newCodedInput(); final CodedInputStream codedInput = bs.newCodedInput();
@ -3101,7 +3105,7 @@ public final class ProtobufUtil {
* buffers when working with byte arrays * buffers when working with byte arrays
* @param builder current message builder * @param builder current message builder
* @param b byte array * @param b byte array
* @throws IOException * @throws IOException
*/ */
public static void mergeFrom(Message.Builder builder, byte[] b) throws IOException { public static void mergeFrom(Message.Builder builder, byte[] b) throws IOException {
final CodedInputStream codedInput = CodedInputStream.newInstance(b); final CodedInputStream codedInput = CodedInputStream.newInstance(b);
@ -3221,4 +3225,33 @@ public final class ProtobufUtil {
return new TimeRange(minStamp, maxStamp); return new TimeRange(minStamp, maxStamp);
} }
public static RSGroupInfo toGroupInfo(RSGroupProtos.RSGroupInfo proto) {
RSGroupInfo RSGroupInfo = new RSGroupInfo(proto.getName());
for(HBaseProtos.ServerName el: proto.getServersList()) {
RSGroupInfo.addServer(HostAndPort.fromParts(el.getHostName(), el.getPort()));
}
for(HBaseProtos.TableName pTableName: proto.getTablesList()) {
RSGroupInfo.addTable(ProtobufUtil.toTableName(pTableName));
}
return RSGroupInfo;
}
public static RSGroupProtos.RSGroupInfo toProtoGroupInfo(RSGroupInfo pojo) {
List<HBaseProtos.TableName> tables =
new ArrayList<HBaseProtos.TableName>(pojo.getTables().size());
for(TableName arg: pojo.getTables()) {
tables.add(ProtobufUtil.toProtoTableName(arg));
}
List<HBaseProtos.ServerName> hostports =
new ArrayList<HBaseProtos.ServerName>(pojo.getServers().size());
for(HostAndPort el: pojo.getServers()) {
hostports.add(HBaseProtos.ServerName.newBuilder()
.setHostName(el.getHostText())
.setPort(el.getPort())
.build());
}
return RSGroupProtos.RSGroupInfo.newBuilder().setName(pojo.getName())
.addAllServers(hostports)
.addAllTables(tables).build();
}
} }

View File

@ -18,6 +18,10 @@
*/ */
package org.apache.hadoop.hbase; package org.apache.hadoop.hbase;
import com.google.common.net.HostAndPort;
import com.google.common.net.InetAddresses;
import com.google.protobuf.InvalidProtocolBufferException;
import java.io.Serializable; import java.io.Serializable;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.List; import java.util.List;
@ -31,9 +35,6 @@ import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
import org.apache.hadoop.hbase.util.Addressing; import org.apache.hadoop.hbase.util.Addressing;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import com.google.common.net.InetAddresses;
import com.google.protobuf.InvalidProtocolBufferException;
/** /**
* Instance of an HBase ServerName. * Instance of an HBase ServerName.
* A server name is used uniquely identifying a server instance in a cluster and is made * A server name is used uniquely identifying a server instance in a cluster and is made
@ -54,7 +55,7 @@ import com.google.protobuf.InvalidProtocolBufferException;
*/ */
@InterfaceAudience.Public @InterfaceAudience.Public
@InterfaceStability.Evolving @InterfaceStability.Evolving
public class ServerName implements Comparable<ServerName>, Serializable { public class ServerName implements Comparable<ServerName>, Serializable {
private static final long serialVersionUID = 1367463982557264981L; private static final long serialVersionUID = 1367463982557264981L;
/** /**
@ -91,6 +92,7 @@ public class ServerName implements Comparable<ServerName>, Serializable {
private final String hostnameOnly; private final String hostnameOnly;
private final int port; private final int port;
private final long startcode; private final long startcode;
private transient HostAndPort hostAndPort;
/** /**
* Cached versioned bytes of this ServerName instance. * Cached versioned bytes of this ServerName instance.
@ -105,7 +107,7 @@ public class ServerName implements Comparable<ServerName>, Serializable {
this.hostnameOnly = hostname; this.hostnameOnly = hostname;
this.port = port; this.port = port;
this.startcode = startcode; this.startcode = startcode;
this.servername = getServerName(this.hostnameOnly, port, startcode); this.servername = getServerName(hostname, port, startcode);
} }
/** /**
@ -189,7 +191,8 @@ public class ServerName implements Comparable<ServerName>, Serializable {
* in compares, etc. * in compares, etc.
*/ */
public String toShortString() { public String toShortString() {
return Addressing.createHostAndPortStr(getHostNameMinusDomain(this.hostnameOnly), this.port); return Addressing.createHostAndPortStr(
getHostNameMinusDomain(hostnameOnly), port);
} }
/** /**
@ -256,7 +259,14 @@ public class ServerName implements Comparable<ServerName>, Serializable {
* {@link Addressing#createHostAndPortStr(String, int)} * {@link Addressing#createHostAndPortStr(String, int)}
*/ */
public String getHostAndPort() { public String getHostAndPort() {
return Addressing.createHostAndPortStr(this.hostnameOnly, this.port); return Addressing.createHostAndPortStr(hostnameOnly, port);
}
public HostAndPort getHostPort() {
if (hostAndPort == null) {
hostAndPort = HostAndPort.fromParts(hostnameOnly, port);
}
return hostAndPort;
} }
/** /**

View File

@ -0,0 +1,187 @@
/**
* Copyright The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.rsgroup;
import com.google.common.collect.Sets;
import com.google.common.net.HostAndPort;
import java.util.Collection;
import java.util.NavigableSet;
import java.util.Set;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
/**
* Stores the group information of region server groups.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class RSGroupInfo {
public static final String DEFAULT_GROUP = "default";
public static final String NAMESPACEDESC_PROP_GROUP = "hbase.rsgroup.name";
private String name;
private Set<HostAndPort> servers;
private NavigableSet<TableName> tables;
public RSGroupInfo(String name) {
this(name, Sets.<HostAndPort>newHashSet(), Sets.<TableName>newTreeSet());
}
RSGroupInfo(String name,
Set<HostAndPort> servers,
NavigableSet<TableName> tables) {
this.name = name;
this.servers = servers;
this.tables = tables;
}
public RSGroupInfo(RSGroupInfo src) {
name = src.getName();
servers = Sets.newHashSet(src.getServers());
tables = Sets.newTreeSet(src.getTables());
}
/**
* Get group name.
*
* @return group name
*/
public String getName() {
return name;
}
/**
* Adds the server to the group.
*
* @param hostPort the server
*/
public void addServer(HostAndPort hostPort){
servers.add(hostPort);
}
/**
* Adds a group of servers.
*
* @param hostPort the servers
*/
public void addAllServers(Collection<HostAndPort> hostPort){
servers.addAll(hostPort);
}
/**
* @param hostPort hostPort of the server
* @return true, if a server with hostPort is found
*/
public boolean containsServer(HostAndPort hostPort) {
return servers.contains(hostPort);
}
/**
* Get list of servers.
*
* @return set of servers
*/
public Set<HostAndPort> getServers() {
return servers;
}
/**
* Remove a server from this group.
*
* @param hostPort HostPort of the server to remove
*/
public boolean removeServer(HostAndPort hostPort) {
return servers.remove(hostPort);
}
/**
* Set of tables that are members of this group
* @return set of tables
*/
public NavigableSet<TableName> getTables() {
return tables;
}
public void addTable(TableName table) {
tables.add(table);
}
public void addAllTables(Collection<TableName> arg) {
tables.addAll(arg);
}
public boolean containsTable(TableName table) {
return tables.contains(table);
}
public boolean removeTable(TableName table) {
return tables.remove(table);
}
@Override
public String toString() {
StringBuffer sb = new StringBuffer();
sb.append("Name:");
sb.append(this.name);
sb.append(", ");
sb.append(" Servers:");
sb.append(this.servers);
return sb.toString();
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
RSGroupInfo RSGroupInfo = (RSGroupInfo) o;
if (!name.equals(RSGroupInfo.name)) {
return false;
}
if (!servers.equals(RSGroupInfo.servers)) {
return false;
}
if (!tables.equals(RSGroupInfo.tables)) {
return false;
}
return true;
}
@Override
public int hashCode() {
int result = servers.hashCode();
result = 31 * result + tables.hashCode();
result = 31 * result + name.hashCode();
return result;
}
}

View File

@ -207,6 +207,16 @@
<groupId>org.apache.hbase</groupId> <groupId>org.apache.hbase</groupId>
<artifactId>hbase-shell</artifactId> <artifactId>hbase-shell</artifactId>
</dependency> </dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-rsgroup</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-rsgroup</artifactId>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<dependency> <dependency>
<groupId>org.apache.hbase</groupId> <groupId>org.apache.hbase</groupId>
<artifactId>hbase-server</artifactId> <artifactId>hbase-server</artifactId>

View File

@ -0,0 +1,99 @@
/**
* Copyright The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.rsgroup;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.IntegrationTestingUtility;
import org.apache.hadoop.hbase.Waiter;
import org.apache.hadoop.hbase.testclassification.IntegrationTests;
import org.junit.After;
import org.junit.Before;
import org.junit.experimental.categories.Category;
/**
* Runs all of the units tests defined in TestGroupBase
* as an integration test.
* Requires TestRSGroupBase.NUM_SLAVE_BASE servers to run.
*/
@Category(IntegrationTests.class)
public class IntegrationTestRSGroup extends TestRSGroupsBase {
//Integration specific
private final static Log LOG = LogFactory.getLog(IntegrationTestRSGroup.class);
private static boolean initialized = false;
@Before
public void beforeMethod() throws Exception {
if(!initialized) {
LOG.info("Setting up IntegrationTestGroup");
LOG.info("Initializing cluster with " + NUM_SLAVES_BASE + " servers");
TEST_UTIL = new IntegrationTestingUtility();
((IntegrationTestingUtility)TEST_UTIL).initializeCluster(NUM_SLAVES_BASE);
//set shared configs
admin = TEST_UTIL.getHBaseAdmin();
cluster = TEST_UTIL.getHBaseClusterInterface();
rsGroupAdmin = new VerifyingRSGroupAdminClient(rsGroupAdmin.newClient(TEST_UTIL.getConnection()),
TEST_UTIL.getConfiguration());
LOG.info("Done initializing cluster");
initialized = true;
//cluster may not be clean
//cleanup when initializing
afterMethod();
}
}
@After
public void afterMethod() throws Exception {
LOG.info("Cleaning up previous test run");
//cleanup previous artifacts
deleteTableIfNecessary();
deleteNamespaceIfNecessary();
deleteGroups();
admin.setBalancerRunning(true, true);
LOG.info("Restoring the cluster");
((IntegrationTestingUtility)TEST_UTIL).restoreCluster();
LOG.info("Done restoring the cluster");
TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
LOG.info("Waiting for cleanup to finish "+ rsGroupAdmin.listRSGroups());
//Might be greater since moving servers back to default
//is after starting a server
return rsGroupAdmin.getRSGroupInfo(RSGroupInfo.DEFAULT_GROUP).getServers().size()
>= NUM_SLAVES_BASE;
}
});
TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
LOG.info("Waiting for regionservers to be registered "+ rsGroupAdmin.listRSGroups());
//Might be greater since moving servers back to default
//is after starting a server
return rsGroupAdmin.getRSGroupInfo(RSGroupInfo.DEFAULT_GROUP).getServers().size()
== getNumServers();
}
});
LOG.info("Done cleaning up previous test run");
}
}

View File

@ -193,6 +193,8 @@
<include>RegionServerStatus.proto</include> <include>RegionServerStatus.proto</include>
<include>RowProcessor.proto</include> <include>RowProcessor.proto</include>
<include>RPC.proto</include> <include>RPC.proto</include>
<include>RSGroup.proto</include>
<include>RSGroupAdmin.proto</include>
<include>SecureBulkLoad.proto</include> <include>SecureBulkLoad.proto</include>
<include>Tracing.proto</include> <include>Tracing.proto</include>
<include>VisibilityLabels.proto</include> <include>VisibilityLabels.proto</include>

View File

@ -0,0 +1,34 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package hbase.pb;
option java_package = "org.apache.hadoop.hbase.protobuf.generated";
option java_outer_classname = "RSGroupProtos";
option java_generic_services = true;
option java_generate_equals_and_hash = true;
option optimize_for = SPEED;
import "HBase.proto";
message RSGroupInfo {
required string name = 1;
repeated ServerName servers = 4;
repeated TableName tables = 3;
}

View File

@ -0,0 +1,136 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package hbase.pb;
option java_package = "org.apache.hadoop.hbase.protobuf.generated";
option java_outer_classname = "RSGroupAdminProtos";
option java_generic_services = true;
option java_generate_equals_and_hash = true;
option optimize_for = SPEED;
import "HBase.proto";
import "RSGroup.proto";
/** Group level protobufs */
message ListTablesOfRSGroupRequest {
required string r_s_group_name = 1;
}
message ListTablesOfRSGroupResponse {
repeated TableName table_name = 1;
}
message GetRSGroupInfoRequest {
required string r_s_group_name = 1;
}
message GetRSGroupInfoResponse {
optional RSGroupInfo r_s_group_info = 1;
}
message GetRSGroupInfoOfTableRequest {
required TableName table_name = 1;
}
message GetRSGroupInfoOfTableResponse {
optional RSGroupInfo r_s_group_info = 1;
}
message MoveServersRequest {
required string target_group = 1;
repeated ServerName servers = 3;
}
message MoveServersResponse {
}
message MoveTablesRequest {
required string target_group = 1;
repeated TableName table_name = 2;
}
message MoveTablesResponse {
}
message AddRSGroupRequest {
required string r_s_group_name = 1;
}
message AddRSGroupResponse {
}
message RemoveRSGroupRequest {
required string r_s_group_name = 1;
}
message RemoveRSGroupResponse {
}
message BalanceRSGroupRequest {
required string r_s_group_name = 1;
}
message BalanceRSGroupResponse {
required bool balanceRan = 1;
}
message ListRSGroupInfosRequest {
}
message ListRSGroupInfosResponse {
repeated RSGroupInfo r_s_group_info = 1;
}
message GetRSGroupInfoOfServerRequest {
required ServerName server = 2;
}
message GetRSGroupInfoOfServerResponse {
optional RSGroupInfo r_s_group_info = 1;
}
service RSGroupAdminService {
rpc GetRSGroupInfo(GetRSGroupInfoRequest)
returns (GetRSGroupInfoResponse);
rpc GetRSGroupInfoOfTable(GetRSGroupInfoOfTableRequest)
returns (GetRSGroupInfoOfTableResponse);
rpc GetRSGroupInfoOfServer(GetRSGroupInfoOfServerRequest)
returns (GetRSGroupInfoOfServerResponse);
rpc MoveServers(MoveServersRequest)
returns (MoveServersResponse);
rpc MoveTables(MoveTablesRequest)
returns (MoveTablesResponse);
rpc AddRSGroup(AddRSGroupRequest)
returns (AddRSGroupResponse);
rpc RemoveRSGroup(RemoveRSGroupRequest)
returns (RemoveRSGroupResponse);
rpc BalanceRSGroup(BalanceRSGroupRequest)
returns (BalanceRSGroupResponse);
rpc ListRSGroupInfos(ListRSGroupInfosRequest)
returns (ListRSGroupInfosResponse);
}

346
hbase-rsgroup/pom.xml Normal file
View File

@ -0,0 +1,346 @@
<?xml version="1.0"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<!--
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-->
<modelVersion>4.0.0</modelVersion>
<parent>
<artifactId>hbase</artifactId>
<groupId>org.apache.hbase</groupId>
<version>2.0.0-SNAPSHOT</version>
<relativePath>..</relativePath>
</parent>
<artifactId>hbase-rsgroup</artifactId>
<name>Apache HBase - RSGroup</name>
<description>Regionserver Groups for HBase</description>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-site-plugin</artifactId>
<configuration>
<skip>true</skip>
</configuration>
</plugin>
<plugin>
<!--Make it so assembly:single does nothing in here-->
<artifactId>maven-assembly-plugin</artifactId>
<version>${maven.assembly.version}</version>
<configuration>
<skipAssembly>true</skipAssembly>
</configuration>
</plugin>
<plugin>
<artifactId>maven-surefire-plugin</artifactId>
<!-- Always skip the second part executions, since we only run
simple unit tests in this module -->
<executions>
<execution>
<id>secondPartTestsExecution</id>
<phase>test</phase>
<goals>
<goal>test</goal>
</goals>
<configuration>
<skip>true</skip>
</configuration>
</execution>
</executions>
</plugin>
<!-- Make a jar and put the sources in the jar -->
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-source-plugin</artifactId>
</plugin>
</plugins>
<pluginManagement>
<plugins>
<!--This plugin's configuration is used to store Eclipse m2e settings
only. It has no influence on the Maven build itself.-->
<plugin>
<groupId>org.eclipse.m2e</groupId>
<artifactId>lifecycle-mapping</artifactId>
<version>1.0.0</version>
<configuration>
<lifecycleMappingMetadata>
<pluginExecutions>
<pluginExecution>
<pluginExecutionFilter>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<versionRange>[3.2,)</versionRange>
<goals>
<goal>compile</goal>
</goals>
</pluginExecutionFilter>
<action>
<ignore></ignore>
</action>
</pluginExecution>
</pluginExecutions>
</lifecycleMappingMetadata>
</configuration>
</plugin>
</plugins>
</pluginManagement>
</build>
<dependencies>
<!-- Intra-project dependencies -->
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-annotations</artifactId>
<exclusions>
<exclusion>
<groupId>jdk.tools</groupId>
<artifactId>jdk.tools</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-annotations</artifactId>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-client</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-server</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-common</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-common</artifactId>
<type>test-jar</type>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-protocol</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-testing-util</artifactId>
<scope>test</scope>
</dependency>
<!-- General dependencies -->
<dependency>
<groupId>commons-codec</groupId>
<artifactId>commons-codec</artifactId>
</dependency>
<dependency>
<groupId>commons-io</groupId>
<artifactId>commons-io</artifactId>
</dependency>
<dependency>
<groupId>commons-lang</groupId>
<artifactId>commons-lang</artifactId>
</dependency>
<dependency>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
</dependency>
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
</dependency>
<dependency>
<groupId>com.google.protobuf</groupId>
<artifactId>protobuf-java</artifactId>
</dependency>
<dependency>
<groupId>io.netty</groupId>
<artifactId>netty-all</artifactId>
</dependency>
<dependency>
<groupId>org.apache.zookeeper</groupId>
<artifactId>zookeeper</artifactId>
</dependency>
<dependency>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
</dependency>
<dependency>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-mapper-asl</artifactId>
</dependency>
<dependency>
<groupId>org.jruby.jcodings</groupId>
<artifactId>jcodings</artifactId>
</dependency>
<dependency>
<groupId>org.jruby.joni</groupId>
<artifactId>joni</artifactId>
</dependency>
<dependency>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>io.dropwizard.metrics</groupId>
<artifactId>metrics-core</artifactId>
</dependency>
<dependency>
<groupId>org.mockito</groupId>
<artifactId>mockito-all</artifactId>
<scope>test</scope>
</dependency>
</dependencies>
<profiles>
<!-- Skip the tests in this module -->
<profile>
<id>skipRSGroupTests</id>
<activation>
<property>
<name>skipRSGroupTests</name>
</property>
</activation>
<properties>
<surefire.skipFirstPart>true</surefire.skipFirstPart>
</properties>
</profile>
<!-- profile against Hadoop 1.0.x: This is the default. It has to have the same
activation property as the parent Hadoop 1.0.x profile to make sure it gets run at
the same time. -->
<profile>
<id>hadoop-1.1</id>
<activation>
<property>
<!--Below formatting for dev-support/generate-hadoopX-poms.sh-->
<!--h1--><name>hadoop.profile</name><value>1.1</value>
</property>
</activation>
<dependencies>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-core</artifactId>
</dependency>
</dependencies>
</profile>
<!--
profile for building against Hadoop 2.0.0-alpha. Activate using:
mvn -Dhadoop.profile=2.0
-->
<profile>
<id>hadoop-2.0</id>
<activation>
<property>
<!--Below formatting for dev-support/generate-hadoopX-poms.sh-->
<!--h2--><name>!hadoop.profile</name>
</property>
</activation>
<dependencies>
<dependency>
<groupId>com.google.code.findbugs</groupId>
<artifactId>jsr305</artifactId>
<optional>true</optional>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-auth</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<exclusions>
<exclusion>
<groupId>com.github.stephenc.findbugs</groupId>
<artifactId>findbugs-annotations</artifactId>
</exclusion>
<exclusion>
<groupId>net.java.dev.jets3t</groupId>
<artifactId>jets3t</artifactId>
</exclusion>
<exclusion>
<groupId>javax.servlet.jsp</groupId>
<artifactId>jsp-api</artifactId>
</exclusion>
<exclusion>
<groupId>org.mortbay.jetty</groupId>
<artifactId>jetty</artifactId>
</exclusion>
<exclusion>
<groupId>com.sun.jersey</groupId>
<artifactId>jersey-server</artifactId>
</exclusion>
<exclusion>
<groupId>com.sun.jersey</groupId>
<artifactId>jersey-core</artifactId>
</exclusion>
<exclusion>
<groupId>com.sun.jersey</groupId>
<artifactId>jersey-json</artifactId>
</exclusion>
<exclusion>
<groupId>javax.servlet</groupId>
<artifactId>servlet-api</artifactId>
</exclusion>
<exclusion>
<groupId>tomcat</groupId>
<artifactId>jasper-compiler</artifactId>
</exclusion>
<exclusion>
<groupId>tomcat</groupId>
<artifactId>jasper-runtime</artifactId>
</exclusion>
</exclusions>
</dependency>
</dependencies>
</profile>
<!--
profile for building against Hadoop 3.0.x. Activate using:
mvn -Dhadoop.profile=3.0
-->
<profile>
<id>hadoop-3.0</id>
<activation>
<property>
<name>hadoop.profile</name>
<value>3.0</value>
</property>
</activation>
<properties>
<hadoop.version>3.0-SNAPSHOT</hadoop.version>
</properties>
<dependencies>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-auth</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
</dependency>
</dependencies>
</profile>
</profiles>
</project>

View File

@ -0,0 +1,121 @@
/**
* Copyright The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.rsgroup;
import com.google.common.net.HostAndPort;
import java.io.Closeable;
import java.io.IOException;
import java.util.List;
import java.util.Set;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Connection;
/**
* Group user API interface used between client and server.
*/
@InterfaceAudience.Private
public abstract class RSGroupAdmin implements Closeable {
/**
* Create a new RSGroupAdmin client
* @param conn connection RSGroupAdmin instance will use
* @return a new RSGroupAdmin client
* @throws IOException on failure to create new client
*/
public static RSGroupAdmin newClient(Connection conn) throws IOException {
return new RSGroupAdminClient(conn);
}
/**
* Gets the regionserver group information.
*
* @param groupName the group name
* @return An instance of RSGroupInfo
*/
public abstract RSGroupInfo getRSGroupInfo(String groupName) throws IOException;
/**
* Gets the regionserver group info of table.
*
* @param tableName the table name
* @return An instance of RSGroupInfo.
*/
public abstract RSGroupInfo getRSGroupInfoOfTable(TableName tableName) throws IOException;
/**
* Move a set of serves to another group
*
*
* @param servers set of servers, must be in the form HOST:PORT
* @param targetGroup the target group
* @throws java.io.IOException Signals that an I/O exception has occurred.
*/
public abstract void moveServers(Set<HostAndPort> servers, String targetGroup) throws IOException;
/**
* Move tables to a new group.
* This will unassign all of a table's region so it can be reassigned to the correct group.
* @param tables list of tables to move
* @param targetGroup target group
* @throws java.io.IOException on failure to move tables
*/
public abstract void moveTables(Set<TableName> tables, String targetGroup) throws IOException;
/**
* Add a new group
* @param name name of the group
* @throws java.io.IOException on failure to add group
*/
public abstract void addRSGroup(String name) throws IOException;
/**
* Remove a regionserver group
* @param name name of the group
* @throws java.io.IOException on failure to remove group
*/
public abstract void removeRSGroup(String name) throws IOException;
/**
* Balance the regions in a group
*
* @param name the name of the group to balance
* @return boolean whether balance ran or not
* @throws java.io.IOException on unexpected failure to balance group
*/
public abstract boolean balanceRSGroup(String name) throws IOException;
/**
* Lists the existing groups.
*
* @return Collection of RSGroupInfo.
*/
public abstract List<RSGroupInfo> listRSGroups() throws IOException;
/**
* Retrieve the RSGroupInfo a server is affiliated to
* @param hostPort HostPort to get RSGroupInfo for
* @return RSGroupInfo associated with the server
* @throws java.io.IOException on unexpected failure to retrieve GroupInfo
*/
public abstract RSGroupInfo getRSGroupOfServer(HostAndPort hostPort) throws IOException;
}

View File

@ -0,0 +1,204 @@
/**
* Copyright The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.rsgroup;
import com.google.common.collect.Sets;
import com.google.common.net.HostAndPort;
import com.google.protobuf.ServiceException;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos;
/**
* Client used for managing region server group information.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
class RSGroupAdminClient extends RSGroupAdmin {
private RSGroupAdminProtos.RSGroupAdminService.BlockingInterface proxy;
private static final Log LOG = LogFactory.getLog(RSGroupAdminClient.class);
public RSGroupAdminClient(Connection conn) throws IOException {
proxy = RSGroupAdminProtos.RSGroupAdminService.newBlockingStub(
conn.getAdmin().coprocessorService());
}
@Override
public RSGroupInfo getRSGroupInfo(String groupName) throws IOException {
try {
RSGroupAdminProtos.GetRSGroupInfoResponse resp =
proxy.getRSGroupInfo(null,
RSGroupAdminProtos.GetRSGroupInfoRequest.newBuilder()
.setRSGroupName(groupName).build());
if(resp.hasRSGroupInfo()) {
return ProtobufUtil.toGroupInfo(resp.getRSGroupInfo());
}
return null;
} catch (ServiceException e) {
throw ProtobufUtil.getRemoteException(e);
}
}
@Override
public RSGroupInfo getRSGroupInfoOfTable(TableName tableName) throws IOException {
RSGroupAdminProtos.GetRSGroupInfoOfTableRequest request =
RSGroupAdminProtos.GetRSGroupInfoOfTableRequest.newBuilder()
.setTableName(ProtobufUtil.toProtoTableName(tableName)).build();
try {
GetRSGroupInfoOfTableResponse resp = proxy.getRSGroupInfoOfTable(null, request);
if (resp.hasRSGroupInfo()) {
return ProtobufUtil.toGroupInfo(resp.getRSGroupInfo());
}
return null;
} catch (ServiceException e) {
throw ProtobufUtil.getRemoteException(e);
}
}
@Override
public void moveServers(Set<HostAndPort> servers, String targetGroup) throws IOException {
Set<HBaseProtos.ServerName> hostPorts = Sets.newHashSet();
for(HostAndPort el: servers) {
hostPorts.add(HBaseProtos.ServerName.newBuilder()
.setHostName(el.getHostText())
.setPort(el.getPort())
.build());
}
RSGroupAdminProtos.MoveServersRequest request =
RSGroupAdminProtos.MoveServersRequest.newBuilder()
.setTargetGroup(targetGroup)
.addAllServers(hostPorts).build();
try {
proxy.moveServers(null, request);
} catch (ServiceException e) {
throw ProtobufUtil.getRemoteException(e);
}
}
@Override
public void moveTables(Set<TableName> tables, String targetGroup) throws IOException {
RSGroupAdminProtos.MoveTablesRequest.Builder builder =
RSGroupAdminProtos.MoveTablesRequest.newBuilder()
.setTargetGroup(targetGroup);
for(TableName tableName: tables) {
builder.addTableName(ProtobufUtil.toProtoTableName(tableName));
}
try {
proxy.moveTables(null, builder.build());
} catch (ServiceException e) {
throw ProtobufUtil.getRemoteException(e);
}
}
@Override
public void addRSGroup(String groupName) throws IOException {
RSGroupAdminProtos.AddRSGroupRequest request =
RSGroupAdminProtos.AddRSGroupRequest.newBuilder()
.setRSGroupName(groupName).build();
try {
proxy.addRSGroup(null, request);
} catch (ServiceException e) {
throw ProtobufUtil.getRemoteException(e);
}
}
@Override
public void removeRSGroup(String name) throws IOException {
RSGroupAdminProtos.RemoveRSGroupRequest request =
RSGroupAdminProtos.RemoveRSGroupRequest.newBuilder()
.setRSGroupName(name).build();
try {
proxy.removeRSGroup(null, request);
} catch (ServiceException e) {
throw ProtobufUtil.getRemoteException(e);
}
}
@Override
public boolean balanceRSGroup(String name) throws IOException {
RSGroupAdminProtos.BalanceRSGroupRequest request =
RSGroupAdminProtos.BalanceRSGroupRequest.newBuilder()
.setRSGroupName(name).build();
try {
return proxy.balanceRSGroup(null, request).getBalanceRan();
} catch (ServiceException e) {
throw ProtobufUtil.getRemoteException(e);
}
}
@Override
public List<RSGroupInfo> listRSGroups() throws IOException {
try {
List<RSGroupProtos.RSGroupInfo> resp =
proxy.listRSGroupInfos(null,
RSGroupAdminProtos.ListRSGroupInfosRequest.newBuilder().build()).getRSGroupInfoList();
List<RSGroupInfo> result = new ArrayList<RSGroupInfo>(resp.size());
for(RSGroupProtos.RSGroupInfo entry: resp) {
result.add(ProtobufUtil.toGroupInfo(entry));
}
return result;
} catch (ServiceException e) {
throw ProtobufUtil.getRemoteException(e);
}
}
@Override
public RSGroupInfo getRSGroupOfServer(HostAndPort hostPort) throws IOException {
RSGroupAdminProtos.GetRSGroupInfoOfServerRequest request =
RSGroupAdminProtos.GetRSGroupInfoOfServerRequest.newBuilder()
.setServer(HBaseProtos.ServerName.newBuilder()
.setHostName(hostPort.getHostText())
.setPort(hostPort.getPort())
.build())
.build();
try {
GetRSGroupInfoOfServerResponse resp = proxy.getRSGroupInfoOfServer(null, request);
if (resp.hasRSGroupInfo()) {
return ProtobufUtil.toGroupInfo(resp.getRSGroupInfo());
}
return null;
} catch (ServiceException e) {
throw ProtobufUtil.getRemoteException(e);
}
}
@Override
public void close() throws IOException {
}
}

View File

@ -0,0 +1,965 @@
/**
* Copyright The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.rsgroup;
import com.google.common.collect.Sets;
import com.google.common.net.HostAndPort;
import com.google.protobuf.RpcCallback;
import com.google.protobuf.RpcController;
import com.google.protobuf.Service;
import java.io.IOException;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.Coprocessor;
import org.apache.hadoop.hbase.CoprocessorEnvironment;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.ProcedureInfo;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.constraint.ConstraintException;
import org.apache.hadoop.hbase.coprocessor.CoprocessorService;
import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
import org.apache.hadoop.hbase.coprocessor.MasterObserver;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.master.RegionPlan;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.ResponseConverter;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupResponse;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponse;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersRequest;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersResponse;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesRequest;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesResponse;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RSGroupAdminService;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse;
public class RSGroupAdminEndpoint extends RSGroupAdminService
implements CoprocessorService, Coprocessor, MasterObserver {
private static final Log LOG = LogFactory.getLog(RSGroupAdminEndpoint.class);
private MasterServices master = null;
private static RSGroupInfoManagerImpl groupInfoManager;
private RSGroupAdminServer groupAdminServer;
@Override
public void start(CoprocessorEnvironment env) throws IOException {
MasterCoprocessorEnvironment menv = (MasterCoprocessorEnvironment)env;
master = menv.getMasterServices();
groupInfoManager = new RSGroupInfoManagerImpl(master);
groupAdminServer = new RSGroupAdminServer(master, groupInfoManager);
Class clazz =
master.getConfiguration().getClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, null);
if (!RSGroupableBalancer.class.isAssignableFrom(clazz)) {
throw new IOException("Configured balancer is not a GroupableBalancer");
}
}
@Override
public void stop(CoprocessorEnvironment env) throws IOException {
}
@Override
public Service getService() {
return this;
}
public RSGroupInfoManager getGroupInfoManager() {
return groupInfoManager;
}
@Override
public void getRSGroupInfo(RpcController controller,
GetRSGroupInfoRequest request,
RpcCallback<GetRSGroupInfoResponse> done) {
GetRSGroupInfoResponse response = null;
try {
GetRSGroupInfoResponse.Builder builder =
GetRSGroupInfoResponse.newBuilder();
RSGroupInfo RSGroupInfo = groupAdminServer.getRSGroupInfo(request.getRSGroupName());
if(RSGroupInfo != null) {
builder.setRSGroupInfo(ProtobufUtil.toProtoGroupInfo(RSGroupInfo));
}
response = builder.build();
} catch (IOException e) {
ResponseConverter.setControllerException(controller, e);
}
done.run(response);
}
@Override
public void getRSGroupInfoOfTable(RpcController controller,
GetRSGroupInfoOfTableRequest request,
RpcCallback<GetRSGroupInfoOfTableResponse> done) {
GetRSGroupInfoOfTableResponse response = null;
try {
GetRSGroupInfoOfTableResponse.Builder builder =
GetRSGroupInfoOfTableResponse.newBuilder();
TableName tableName = ProtobufUtil.toTableName(request.getTableName());
RSGroupInfo RSGroupInfo = groupAdminServer.getRSGroupInfoOfTable(tableName);
if (RSGroupInfo == null) {
response = builder.build();
} else {
response = builder.setRSGroupInfo(ProtobufUtil.toProtoGroupInfo(RSGroupInfo)).build();
}
} catch (IOException e) {
ResponseConverter.setControllerException(controller, e);
}
done.run(response);
}
@Override
public void moveServers(RpcController controller,
MoveServersRequest request,
RpcCallback<MoveServersResponse> done) {
RSGroupAdminProtos.MoveServersResponse response = null;
try {
RSGroupAdminProtos.MoveServersResponse.Builder builder =
RSGroupAdminProtos.MoveServersResponse.newBuilder();
Set<HostAndPort> hostPorts = Sets.newHashSet();
for(HBaseProtos.ServerName el: request.getServersList()) {
hostPorts.add(HostAndPort.fromParts(el.getHostName(), el.getPort()));
}
groupAdminServer.moveServers(hostPorts, request.getTargetGroup());
response = builder.build();
} catch (IOException e) {
ResponseConverter.setControllerException(controller, e);
}
done.run(response);
}
@Override
public void moveTables(RpcController controller,
MoveTablesRequest request,
RpcCallback<MoveTablesResponse> done) {
MoveTablesResponse response = null;
try {
MoveTablesResponse.Builder builder =
MoveTablesResponse.newBuilder();
Set<TableName> tables = new HashSet<TableName>(request.getTableNameList().size());
for(HBaseProtos.TableName tableName: request.getTableNameList()) {
tables.add(ProtobufUtil.toTableName(tableName));
}
groupAdminServer.moveTables(tables, request.getTargetGroup());
response = builder.build();
} catch (IOException e) {
ResponseConverter.setControllerException(controller, e);
}
done.run(response);
}
@Override
public void addRSGroup(RpcController controller,
AddRSGroupRequest request,
RpcCallback<AddRSGroupResponse> done) {
AddRSGroupResponse response = null;
try {
AddRSGroupResponse.Builder builder =
AddRSGroupResponse.newBuilder();
groupAdminServer.addRSGroup(request.getRSGroupName());
response = builder.build();
} catch (IOException e) {
ResponseConverter.setControllerException(controller, e);
}
done.run(response);
}
@Override
public void removeRSGroup(RpcController controller,
RemoveRSGroupRequest request,
RpcCallback<RemoveRSGroupResponse> done) {
RemoveRSGroupResponse response = null;
try {
RemoveRSGroupResponse.Builder builder =
RemoveRSGroupResponse.newBuilder();
groupAdminServer.removeRSGroup(request.getRSGroupName());
response = builder.build();
} catch (IOException e) {
ResponseConverter.setControllerException(controller, e);
}
done.run(response);
}
@Override
public void balanceRSGroup(RpcController controller,
BalanceRSGroupRequest request,
RpcCallback<BalanceRSGroupResponse> done) {
BalanceRSGroupResponse.Builder builder = BalanceRSGroupResponse.newBuilder();
try {
builder.setBalanceRan(groupAdminServer.balanceRSGroup(request.getRSGroupName()));
} catch (IOException e) {
ResponseConverter.setControllerException(controller, e);
builder.setBalanceRan(false);
}
done.run(builder.build());
}
@Override
public void listRSGroupInfos(RpcController controller,
ListRSGroupInfosRequest request,
RpcCallback<ListRSGroupInfosResponse> done) {
ListRSGroupInfosResponse response = null;
try {
ListRSGroupInfosResponse.Builder builder =
ListRSGroupInfosResponse.newBuilder();
for(RSGroupInfo RSGroupInfo : groupAdminServer.listRSGroups()) {
builder.addRSGroupInfo(ProtobufUtil.toProtoGroupInfo(RSGroupInfo));
}
response = builder.build();
} catch (IOException e) {
ResponseConverter.setControllerException(controller, e);
}
done.run(response);
}
@Override
public void getRSGroupInfoOfServer(RpcController controller,
GetRSGroupInfoOfServerRequest request,
RpcCallback<GetRSGroupInfoOfServerResponse> done) {
GetRSGroupInfoOfServerResponse.Builder builder = GetRSGroupInfoOfServerResponse.newBuilder();
try {
HostAndPort hp =
HostAndPort.fromParts(request.getServer().getHostName(), request.getServer().getPort());
RSGroupInfo RSGroupInfo = groupAdminServer.getRSGroupOfServer(hp);
if (RSGroupInfo != null) {
builder.setRSGroupInfo(ProtobufUtil.toProtoGroupInfo(RSGroupInfo));
}
} catch (IOException e) {
ResponseConverter.setControllerException(controller, e);
}
done.run(builder.build());
}
@Override
public void preCreateTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
HTableDescriptor desc, HRegionInfo[] regions) throws IOException {
groupAdminServer.prepareRSGroupForTable(desc);
}
@Override
public void postDeleteTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName) throws IOException {
groupAdminServer.cleanupRSGroupForTable(tableName);
}
@Override
public void preCreateNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
NamespaceDescriptor ns) throws IOException {
String group = ns.getConfigurationValue(RSGroupInfo.NAMESPACEDESC_PROP_GROUP);
if(group != null && groupAdminServer.getRSGroupInfo(group) == null) {
throw new ConstraintException("Region server group "+group+" does not exit");
}
}
@Override
public void preModifyNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
NamespaceDescriptor ns) throws IOException {
preCreateNamespace(ctx, ns);
}
@Override
public void postCreateTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
HTableDescriptor desc,
HRegionInfo[] regions) throws IOException {
}
@Override
public void preCreateTableHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
HTableDescriptor desc,
HRegionInfo[] regions) throws IOException {
}
@Override
public void postCreateTableHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
HTableDescriptor desc,
HRegionInfo[] regions) throws IOException {
}
@Override
public void preDeleteTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName) throws IOException {
}
@Override
public void preDeleteTableHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName) throws IOException {
}
@Override
public void postDeleteTableHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName) throws IOException {
}
@Override
public void preTruncateTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName) throws IOException {
}
@Override
public void postTruncateTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName) throws IOException {
}
@Override
public void preTruncateTableHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName) throws IOException {
}
@Override
public void postTruncateTableHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName) throws IOException {
}
@Override
public void preModifyTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
HTableDescriptor htd) throws IOException {
}
@Override
public void postModifyTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
HTableDescriptor htd) throws IOException {
}
@Override
public void preModifyTableHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
HTableDescriptor htd) throws IOException {
}
@Override
public void postModifyTableHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
HTableDescriptor htd) throws IOException {
}
@Override
public void preAddColumn(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
HColumnDescriptor columnFamily) throws IOException {
}
@Override
public void preAddColumnFamily(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
HColumnDescriptor columnFamily) throws IOException {
}
@Override
public void postAddColumn(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
HColumnDescriptor columnFamily) throws IOException {
}
@Override
public void postAddColumnFamily(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
HColumnDescriptor columnFamily) throws IOException {
}
@Override
public void preAddColumnHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
HColumnDescriptor columnFamily) throws IOException {
}
@Override
public void preAddColumnFamilyHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
HColumnDescriptor columnFamily) throws IOException {
}
@Override
public void postAddColumnHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
HColumnDescriptor columnFamily) throws IOException {
}
@Override
public void postAddColumnFamilyHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
HColumnDescriptor columnFamily) throws IOException {
}
@Override
public void preModifyColumn(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
HColumnDescriptor columnFamily) throws IOException {
}
@Override
public void preModifyColumnFamily(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
HColumnDescriptor columnFamily) throws IOException {
}
@Override
public void postModifyColumn(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName,
HColumnDescriptor columnFamily) throws IOException {
}
@Override
public void postModifyColumnFamily(ObserverContext<MasterCoprocessorEnvironment> ctx, TableName
tableName, HColumnDescriptor columnFamily) throws IOException {
}
@Override
public void preModifyColumnHandler(ObserverContext<MasterCoprocessorEnvironment> ctx, TableName
tableName, HColumnDescriptor columnFamily) throws IOException {
}
@Override
public void preModifyColumnFamilyHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName, HColumnDescriptor columnFamily)
throws IOException {
}
@Override
public void postModifyColumnHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName, HColumnDescriptor columnFamily) throws
IOException {
}
@Override
public void postModifyColumnFamilyHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName, HColumnDescriptor columnFamily)
throws IOException {
}
@Override
public void preDeleteColumn(ObserverContext<MasterCoprocessorEnvironment> ctx, TableName
tableName, byte[] columnFamily) throws IOException {
}
@Override
public void preDeleteColumnFamily(ObserverContext<MasterCoprocessorEnvironment> ctx, TableName
tableName, byte[] columnFamily) throws IOException {
}
@Override
public void postDeleteColumn(ObserverContext<MasterCoprocessorEnvironment> ctx, TableName
tableName, byte[] columnFamily) throws IOException {
}
@Override
public void postDeleteColumnFamily(ObserverContext<MasterCoprocessorEnvironment> ctx, TableName
tableName, byte[] columnFamily) throws IOException {
}
@Override
public void preDeleteColumnHandler(ObserverContext<MasterCoprocessorEnvironment> ctx, TableName
tableName, byte[] columnFamily) throws IOException {
}
@Override
public void preDeleteColumnFamilyHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName, byte[] columnFamily) throws
IOException {
}
@Override
public void postDeleteColumnHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName, byte[] columnFamily) throws IOException {
}
@Override
public void postDeleteColumnFamilyHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName, byte[] columnFamily) throws
IOException {
}
@Override
public void preEnableTable(ObserverContext<MasterCoprocessorEnvironment> ctx, TableName
tableName) throws IOException {
}
@Override
public void postEnableTable(ObserverContext<MasterCoprocessorEnvironment> ctx, TableName
tableName) throws IOException {
}
@Override
public void preEnableTableHandler(ObserverContext<MasterCoprocessorEnvironment> ctx, TableName
tableName) throws IOException {
}
@Override
public void postEnableTableHandler(ObserverContext<MasterCoprocessorEnvironment> ctx, TableName
tableName) throws IOException {
}
@Override
public void preDisableTable(ObserverContext<MasterCoprocessorEnvironment> ctx, TableName
tableName) throws IOException {
}
@Override
public void postDisableTable(ObserverContext<MasterCoprocessorEnvironment> ctx, TableName
tableName) throws IOException {
}
@Override
public void preDisableTableHandler(ObserverContext<MasterCoprocessorEnvironment> ctx, TableName
tableName) throws IOException {
}
@Override
public void postDisableTableHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
TableName tableName) throws IOException {
}
@Override
public void preMove(ObserverContext<MasterCoprocessorEnvironment> ctx, HRegionInfo region,
ServerName srcServer, ServerName destServer) throws IOException {
}
@Override
public void postMove(ObserverContext<MasterCoprocessorEnvironment> ctx, HRegionInfo region,
ServerName srcServer, ServerName destServer) throws IOException {
}
@Override
public void preAssign(ObserverContext<MasterCoprocessorEnvironment> ctx, HRegionInfo
regionInfo) throws IOException {
}
@Override
public void postAssign(ObserverContext<MasterCoprocessorEnvironment> ctx, HRegionInfo
regionInfo) throws IOException {
}
@Override
public void preUnassign(ObserverContext<MasterCoprocessorEnvironment> ctx, HRegionInfo
regionInfo, boolean force) throws IOException {
}
@Override
public void postUnassign(ObserverContext<MasterCoprocessorEnvironment> ctx, HRegionInfo
regionInfo, boolean force) throws IOException {
}
@Override
public void preRegionOffline(ObserverContext<MasterCoprocessorEnvironment> ctx, HRegionInfo
regionInfo) throws IOException {
}
@Override
public void postRegionOffline(ObserverContext<MasterCoprocessorEnvironment> ctx, HRegionInfo
regionInfo) throws IOException {
}
@Override
public void preBalance(ObserverContext<MasterCoprocessorEnvironment> ctx) throws IOException {
}
@Override
public void postBalance(ObserverContext<MasterCoprocessorEnvironment> ctx, List<RegionPlan>
plans) throws IOException {
}
@Override
public boolean preBalanceSwitch(ObserverContext<MasterCoprocessorEnvironment> ctx, boolean
newValue) throws IOException {
return newValue;
}
@Override
public void postBalanceSwitch(ObserverContext<MasterCoprocessorEnvironment> ctx, boolean
oldValue, boolean newValue) throws IOException {
}
@Override
public void preShutdown(ObserverContext<MasterCoprocessorEnvironment> ctx) throws IOException {
}
@Override
public void preStopMaster(ObserverContext<MasterCoprocessorEnvironment> ctx) throws IOException {
}
@Override
public void postStartMaster(ObserverContext<MasterCoprocessorEnvironment> ctx) throws
IOException {
}
@Override
public void preMasterInitialization(ObserverContext<MasterCoprocessorEnvironment> ctx) throws
IOException {
}
@Override
public void preSnapshot(ObserverContext<MasterCoprocessorEnvironment> ctx, SnapshotDescription
snapshot, HTableDescriptor hTableDescriptor) throws IOException {
}
@Override
public void postSnapshot(ObserverContext<MasterCoprocessorEnvironment> ctx, SnapshotDescription
snapshot, HTableDescriptor hTableDescriptor) throws IOException {
}
@Override
public void preListSnapshot(ObserverContext<MasterCoprocessorEnvironment> ctx,
SnapshotDescription snapshot) throws IOException {
}
@Override
public void postListSnapshot(ObserverContext<MasterCoprocessorEnvironment> ctx,
SnapshotDescription snapshot) throws IOException {
}
@Override
public void preCloneSnapshot(ObserverContext<MasterCoprocessorEnvironment> ctx,
SnapshotDescription snapshot, HTableDescriptor hTableDescriptor)
throws IOException {
}
@Override
public void postCloneSnapshot(ObserverContext<MasterCoprocessorEnvironment> ctx,
SnapshotDescription snapshot, HTableDescriptor hTableDescriptor)
throws IOException {
}
@Override
public void preRestoreSnapshot(ObserverContext<MasterCoprocessorEnvironment> ctx,
SnapshotDescription snapshot, HTableDescriptor hTableDescriptor)
throws IOException {
}
@Override
public void postRestoreSnapshot(ObserverContext<MasterCoprocessorEnvironment> ctx,
SnapshotDescription snapshot, HTableDescriptor
hTableDescriptor) throws IOException {
}
@Override
public void preDeleteSnapshot(ObserverContext<MasterCoprocessorEnvironment> ctx,
SnapshotDescription snapshot) throws IOException {
}
@Override
public void postDeleteSnapshot(ObserverContext<MasterCoprocessorEnvironment> ctx,
SnapshotDescription snapshot) throws IOException {
}
@Override
public void preGetTableDescriptors(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<TableName> tableNamesList, List<HTableDescriptor>
descriptors, String regex) throws IOException {
}
@Override
public void postGetTableDescriptors(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<TableName> tableNamesList, List<HTableDescriptor>
descriptors, String regex) throws IOException {
}
@Override
public void preGetTableNames(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<HTableDescriptor> descriptors, String regex) throws
IOException {
}
@Override
public void postGetTableNames(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<HTableDescriptor> descriptors, String regex) throws
IOException {
}
@Override
public void postCreateNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
NamespaceDescriptor ns) throws IOException {
}
@Override
public void preDeleteNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx, String
namespace) throws IOException {
}
@Override
public void postDeleteNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx, String
namespace) throws IOException {
}
@Override
public void postModifyNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
NamespaceDescriptor ns) throws IOException {
}
@Override
public void preGetNamespaceDescriptor(ObserverContext<MasterCoprocessorEnvironment> ctx, String
namespace) throws IOException {
}
@Override
public void postGetNamespaceDescriptor(ObserverContext<MasterCoprocessorEnvironment> ctx,
NamespaceDescriptor ns) throws IOException {
}
@Override
public void preListNamespaceDescriptors(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<NamespaceDescriptor> descriptors) throws
IOException {
}
@Override
public void postListNamespaceDescriptors(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<NamespaceDescriptor> descriptors) throws
IOException {
}
@Override
public void preTableFlush(ObserverContext<MasterCoprocessorEnvironment> ctx, TableName
tableName) throws IOException {
}
@Override
public void postTableFlush(ObserverContext<MasterCoprocessorEnvironment> ctx, TableName
tableName) throws IOException {
}
@Override
public void preSetUserQuota(ObserverContext<MasterCoprocessorEnvironment> ctx, String userName,
Quotas quotas) throws IOException {
}
@Override
public void postSetUserQuota(ObserverContext<MasterCoprocessorEnvironment> ctx, String
userName, Quotas quotas) throws IOException {
}
@Override
public void preSetUserQuota(ObserverContext<MasterCoprocessorEnvironment> ctx, String userName,
TableName tableName, Quotas quotas) throws IOException {
}
@Override
public void postSetUserQuota(ObserverContext<MasterCoprocessorEnvironment> ctx, String
userName, TableName tableName, Quotas quotas) throws IOException {
}
@Override
public void preSetUserQuota(ObserverContext<MasterCoprocessorEnvironment> ctx, String userName,
String namespace, Quotas quotas) throws IOException {
}
@Override
public void postSetUserQuota(ObserverContext<MasterCoprocessorEnvironment> ctx, String
userName, String namespace, Quotas quotas) throws IOException {
}
@Override
public void preSetTableQuota(ObserverContext<MasterCoprocessorEnvironment> ctx, TableName
tableName, Quotas quotas) throws IOException {
}
@Override
public void postSetTableQuota(ObserverContext<MasterCoprocessorEnvironment> ctx, TableName
tableName, Quotas quotas) throws IOException {
}
@Override
public void preSetNamespaceQuota(ObserverContext<MasterCoprocessorEnvironment> ctx, String
namespace, Quotas quotas) throws IOException {
}
@Override
public void postSetNamespaceQuota(ObserverContext<MasterCoprocessorEnvironment> ctx, String
namespace, Quotas quotas) throws IOException {
}
@Override
public void preDispatchMerge(ObserverContext<MasterCoprocessorEnvironment> ctx, HRegionInfo
regionA, HRegionInfo regionB) throws IOException {
}
@Override
public void postDispatchMerge(ObserverContext<MasterCoprocessorEnvironment> c, HRegionInfo
regionA, HRegionInfo regionB) throws IOException {
}
@Override
public void preMoveServers(ObserverContext<MasterCoprocessorEnvironment> ctx, Set<HostAndPort>
servers, String targetGroup) throws IOException {
}
@Override
public void postMoveServers(ObserverContext<MasterCoprocessorEnvironment> ctx, Set<HostAndPort>
servers, String targetGroup) throws IOException {
}
@Override
public void preMoveTables(ObserverContext<MasterCoprocessorEnvironment> ctx, Set<TableName>
tables, String targetGroup) throws IOException {
}
@Override
public void postMoveTables(ObserverContext<MasterCoprocessorEnvironment> ctx,
Set<TableName> tables, String targetGroup) throws IOException {
}
@Override
public void preAddRSGroup(ObserverContext<MasterCoprocessorEnvironment> ctx, String name)
throws IOException {
}
@Override
public void postAddRSGroup(ObserverContext<MasterCoprocessorEnvironment> ctx, String name)
throws IOException {
}
@Override
public void preRemoveRSGroup(ObserverContext<MasterCoprocessorEnvironment> ctx, String name)
throws IOException {
}
@Override
public void postRemoveRSGroup(ObserverContext<MasterCoprocessorEnvironment> ctx, String name)
throws IOException {
}
@Override
public void preBalanceRSGroup(ObserverContext<MasterCoprocessorEnvironment> ctx, String groupName)
throws IOException {
}
@Override
public void postBalanceRSGroup(ObserverContext<MasterCoprocessorEnvironment> ctx,
String groupName, boolean balancerRan) throws IOException {
}
@Override
public void preAbortProcedure(ObserverContext<MasterCoprocessorEnvironment> ctx,
ProcedureExecutor<MasterProcedureEnv> procEnv, long procId) throws IOException {
}
@Override
public void postAbortProcedure(ObserverContext<MasterCoprocessorEnvironment> ctx)
throws IOException {
}
@Override
public void preListProcedures(ObserverContext<MasterCoprocessorEnvironment> ctx)
throws IOException {
}
@Override
public void postListProcedures(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<ProcedureInfo> procInfoList) throws IOException {
}
}

View File

@ -0,0 +1,501 @@
/**
* Copyright The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.rsgroup;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.google.common.net.HostAndPort;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.constraint.ConstraintException;
import org.apache.hadoop.hbase.master.AssignmentManager;
import org.apache.hadoop.hbase.master.LoadBalancer;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.master.RegionPlan;
import org.apache.hadoop.hbase.master.RegionState;
import org.apache.hadoop.hbase.master.ServerManager;
import org.apache.hadoop.hbase.master.TableLockManager.TableLock;
/**
* Service to support Region Server Grouping (HBase-6721)
*/
@InterfaceAudience.Private
public class RSGroupAdminServer extends RSGroupAdmin {
private static final Log LOG = LogFactory.getLog(RSGroupAdminServer.class);
private MasterServices master;
//List of servers that are being moved from one group to another
//Key=host:port,Value=targetGroup
private ConcurrentMap<HostAndPort,String> serversInTransition =
new ConcurrentHashMap<HostAndPort, String>();
private RSGroupInfoManager RSGroupInfoManager;
public RSGroupAdminServer(MasterServices master,
RSGroupInfoManager RSGroupInfoManager) throws IOException {
this.master = master;
this.RSGroupInfoManager = RSGroupInfoManager;
}
@Override
public RSGroupInfo getRSGroupInfo(String groupName) throws IOException {
return getRSGroupInfoManager().getRSGroup(groupName);
}
@Override
public RSGroupInfo getRSGroupInfoOfTable(TableName tableName) throws IOException {
String groupName = getRSGroupInfoManager().getRSGroupOfTable(tableName);
if (groupName == null) {
return null;
}
return getRSGroupInfoManager().getRSGroup(groupName);
}
@Override
public void moveServers(Set<HostAndPort> servers, String targetGroupName)
throws IOException {
if (servers == null) {
throw new ConstraintException(
"The list of servers cannot be null.");
}
if (StringUtils.isEmpty(targetGroupName)) {
throw new ConstraintException("The target group cannot be null.");
}
if (servers.size() < 1) {
return;
}
RSGroupInfo targetGrp = getRSGroupInfo(targetGroupName);
if (targetGrp == null) {
throw new ConstraintException("Group does not exist: "+targetGroupName);
}
RSGroupInfoManager manager = getRSGroupInfoManager();
synchronized (manager) {
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().preMoveServers(servers, targetGroupName);
}
HostAndPort firstServer = servers.iterator().next();
//we only allow a move from a single source group
//so this should be ok
RSGroupInfo srcGrp = manager.getRSGroupOfServer(firstServer);
//only move online servers (from default)
//or servers from other groups
//this prevents bogus servers from entering groups
if (srcGrp == null) {
throw new ConstraintException(
"Server "+firstServer+" does not have a group.");
}
if (RSGroupInfo.DEFAULT_GROUP.equals(srcGrp.getName())) {
Set<HostAndPort> onlineServers = new HashSet<HostAndPort>();
for(ServerName server: master.getServerManager().getOnlineServers().keySet()) {
onlineServers.add(server.getHostPort());
}
for(HostAndPort el: servers) {
if(!onlineServers.contains(el)) {
throw new ConstraintException(
"Server "+el+" is not an online server in default group.");
}
}
}
if(srcGrp.getServers().size() <= servers.size() &&
srcGrp.getTables().size() > 0) {
throw new ConstraintException("Cannot leave a group "+srcGrp.getName()+
" that contains tables " +"without servers.");
}
String sourceGroupName = getRSGroupInfoManager()
.getRSGroupOfServer(srcGrp.getServers().iterator().next()).getName();
if(getRSGroupInfo(targetGroupName) == null) {
throw new ConstraintException("Target group does not exist: "+targetGroupName);
}
for(HostAndPort server: servers) {
if (serversInTransition.containsKey(server)) {
throw new ConstraintException(
"Server list contains a server that is already being moved: "+server);
}
String tmpGroup = getRSGroupInfoManager().getRSGroupOfServer(server).getName();
if (sourceGroupName != null && !tmpGroup.equals(sourceGroupName)) {
throw new ConstraintException(
"Move server request should only come from one source group. "+
"Expecting only "+sourceGroupName+" but contains "+tmpGroup);
}
}
if(sourceGroupName.equals(targetGroupName)) {
throw new ConstraintException(
"Target group is the same as source group: "+targetGroupName);
}
try {
//update the servers as in transition
for (HostAndPort server : servers) {
serversInTransition.put(server, targetGroupName);
}
getRSGroupInfoManager().moveServers(servers, sourceGroupName, targetGroupName);
boolean found;
List<HostAndPort> tmpServers = Lists.newArrayList(servers);
do {
found = false;
for (Iterator<HostAndPort> iter = tmpServers.iterator();
iter.hasNext(); ) {
HostAndPort rs = iter.next();
//get online regions
List<HRegionInfo> regions = new LinkedList<HRegionInfo>();
for (Map.Entry<HRegionInfo, ServerName> el :
master.getAssignmentManager().getRegionStates().getRegionAssignments().entrySet()) {
if (el.getValue().getHostPort().equals(rs)) {
regions.add(el.getKey());
}
}
for (RegionState state :
master.getAssignmentManager().getRegionStates().getRegionsInTransition().values()) {
if (state.getServerName().getHostPort().equals(rs)) {
regions.add(state.getRegion());
}
}
//unassign regions for a server
LOG.info("Unassigning " + regions.size() +
" regions from server " + rs + " for move to " + targetGroupName);
if (regions.size() > 0) {
//TODO bulk unassign or throttled unassign?
for (HRegionInfo region : regions) {
//regions might get assigned from tables of target group
//so we need to filter
if (!targetGrp.containsTable(region.getTable())) {
master.getAssignmentManager().unassign(region);
found = true;
}
}
}
if (!found) {
iter.remove();
}
}
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
LOG.warn("Sleep interrupted", e);
Thread.currentThread().interrupt();
}
} while (found);
} finally {
//remove from transition
for (HostAndPort server : servers) {
serversInTransition.remove(server);
}
}
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().postMoveServers(servers, targetGroupName);
}
LOG.info("Move server done: "+sourceGroupName+"->"+targetGroupName);
}
}
@Override
public void moveTables(Set<TableName> tables, String targetGroup) throws IOException {
if (tables == null) {
throw new ConstraintException(
"The list of servers cannot be null.");
}
if(tables.size() < 1) {
LOG.debug("moveTables() passed an empty set. Ignoring.");
return;
}
RSGroupInfoManager manager = getRSGroupInfoManager();
synchronized (manager) {
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().preMoveTables(tables, targetGroup);
}
if(targetGroup != null) {
RSGroupInfo destGroup = manager.getRSGroup(targetGroup);
if(destGroup == null) {
throw new ConstraintException("Target group does not exist: "+targetGroup);
}
if(destGroup.getServers().size() < 1) {
throw new ConstraintException("Target group must have at least one server.");
}
}
for(TableName table : tables) {
String srcGroup = manager.getRSGroupOfTable(table);
if(srcGroup != null && srcGroup.equals(targetGroup)) {
throw new ConstraintException(
"Source group is the same as target group for table "+table+" :"+srcGroup);
}
}
manager.moveTables(tables, targetGroup);
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().postMoveTables(tables, targetGroup);
}
}
for(TableName table: tables) {
TableLock lock = master.getTableLockManager().writeLock(table, "Group: table move");
try {
lock.acquire();
for (HRegionInfo region :
master.getAssignmentManager().getRegionStates().getRegionsOfTable(table)) {
master.getAssignmentManager().unassign(region);
}
} finally {
lock.release();
}
}
}
@Override
public void addRSGroup(String name) throws IOException {
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().preAddRSGroup(name);
}
getRSGroupInfoManager().addRSGroup(new RSGroupInfo(name));
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().postAddRSGroup(name);
}
}
@Override
public void removeRSGroup(String name) throws IOException {
RSGroupInfoManager manager = getRSGroupInfoManager();
synchronized (manager) {
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().preRemoveRSGroup(name);
}
RSGroupInfo RSGroupInfo = getRSGroupInfoManager().getRSGroup(name);
if(RSGroupInfo == null) {
throw new ConstraintException("Group "+name+" does not exist");
}
int tableCount = RSGroupInfo.getTables().size();
if (tableCount > 0) {
throw new ConstraintException("Group "+name+" must have no associated tables: "+tableCount);
}
int serverCount = RSGroupInfo.getServers().size();
if(serverCount > 0) {
throw new ConstraintException(
"Group "+name+" must have no associated servers: "+serverCount);
}
for(NamespaceDescriptor ns: master.getClusterSchema().getNamespaces()) {
String nsGroup = ns.getConfigurationValue(RSGroupInfo.NAMESPACEDESC_PROP_GROUP);
if(nsGroup != null && nsGroup.equals(name)) {
throw new ConstraintException("Group "+name+" is referenced by namespace: "+ns.getName());
}
}
manager.removeRSGroup(name);
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().postRemoveRSGroup(name);
}
}
}
@Override
public boolean balanceRSGroup(String groupName) throws IOException {
ServerManager serverManager = master.getServerManager();
AssignmentManager assignmentManager = master.getAssignmentManager();
LoadBalancer balancer = master.getLoadBalancer();
boolean balancerRan;
synchronized (balancer) {
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().preBalanceRSGroup(groupName);
}
if (getRSGroupInfo(groupName) == null) {
throw new ConstraintException("Group does not exist: "+groupName);
}
// Only allow one balance run at at time.
Map<String, RegionState> groupRIT = rsGroupGetRegionsInTransition(groupName);
if (groupRIT.size() > 0) {
LOG.debug("Not running balancer because " +
groupRIT.size() +
" region(s) in transition: " +
StringUtils.abbreviate(
master.getAssignmentManager().getRegionStates().getRegionsInTransition().toString(),
256));
return false;
}
if (serverManager.areDeadServersInProgress()) {
LOG.debug("Not running balancer because processing dead regionserver(s): " +
serverManager.getDeadServers());
return false;
}
//We balance per group instead of per table
List<RegionPlan> plans = new ArrayList<RegionPlan>();
for(Map.Entry<TableName, Map<ServerName, List<HRegionInfo>>> tableMap:
getRSGroupAssignmentsByTable(groupName).entrySet()) {
LOG.info("Creating partial plan for table "+tableMap.getKey()+": "+tableMap.getValue());
List<RegionPlan> partialPlans = balancer.balanceCluster(tableMap.getValue());
LOG.info("Partial plan for table "+tableMap.getKey()+": "+partialPlans);
if (partialPlans != null) {
plans.addAll(partialPlans);
}
}
long startTime = System.currentTimeMillis();
balancerRan = plans != null;
if (plans != null && !plans.isEmpty()) {
LOG.info("Group balance "+groupName+" starting with plan count: "+plans.size());
for (RegionPlan plan: plans) {
LOG.info("balance " + plan);
assignmentManager.balance(plan);
}
LOG.info("Group balance "+groupName+" completed after "+
(System.currentTimeMillis()-startTime)+" seconds");
}
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().postBalanceRSGroup(groupName, balancerRan);
}
}
return balancerRan;
}
@Override
public List<RSGroupInfo> listRSGroups() throws IOException {
return getRSGroupInfoManager().listRSGroups();
}
@Override
public RSGroupInfo getRSGroupOfServer(HostAndPort hostPort) throws IOException {
return getRSGroupInfoManager().getRSGroupOfServer(hostPort);
}
@InterfaceAudience.Private
public RSGroupInfoManager getRSGroupInfoManager() throws IOException {
return RSGroupInfoManager;
}
private Map<String, RegionState> rsGroupGetRegionsInTransition(String groupName)
throws IOException {
Map<String, RegionState> rit = Maps.newTreeMap();
AssignmentManager am = master.getAssignmentManager();
RSGroupInfo RSGroupInfo = getRSGroupInfo(groupName);
for(TableName tableName : RSGroupInfo.getTables()) {
for(HRegionInfo regionInfo: am.getRegionStates().getRegionsOfTable(tableName)) {
RegionState state =
master.getAssignmentManager().getRegionStates().getRegionTransitionState(regionInfo);
if(state != null) {
rit.put(regionInfo.getEncodedName(), state);
}
}
}
return rit;
}
private Map<TableName, Map<ServerName, List<HRegionInfo>>>
getRSGroupAssignmentsByTable(String groupName) throws IOException {
Map<TableName, Map<ServerName, List<HRegionInfo>>> result = Maps.newHashMap();
RSGroupInfo RSGroupInfo = getRSGroupInfo(groupName);
Map<TableName, Map<ServerName, List<HRegionInfo>>> assignments = Maps.newHashMap();
for(Map.Entry<HRegionInfo, ServerName> entry:
master.getAssignmentManager().getRegionStates().getRegionAssignments().entrySet()) {
TableName currTable = entry.getKey().getTable();
ServerName currServer = entry.getValue();
HRegionInfo currRegion = entry.getKey();
if(RSGroupInfo.getTables().contains(currTable)) {
if(!assignments.containsKey(entry.getKey().getTable())) {
assignments.put(currTable, new HashMap<ServerName, List<HRegionInfo>>());
}
if(!assignments.get(currTable).containsKey(currServer)) {
assignments.get(currTable).put(currServer, new ArrayList<HRegionInfo>());
}
assignments.get(currTable).get(currServer).add(currRegion);
}
}
Map<ServerName, List<HRegionInfo>> serverMap = Maps.newHashMap();
for(ServerName serverName: master.getServerManager().getOnlineServers().keySet()) {
if(RSGroupInfo.getServers().contains(serverName.getHostPort())) {
serverMap.put(serverName, Collections.EMPTY_LIST);
}
}
//add all tables that are members of the group
for(TableName tableName : RSGroupInfo.getTables()) {
if(assignments.containsKey(tableName)) {
result.put(tableName, new HashMap<ServerName, List<HRegionInfo>>());
result.get(tableName).putAll(serverMap);
result.get(tableName).putAll(assignments.get(tableName));
LOG.debug("Adding assignments for "+tableName+": "+assignments.get(tableName));
}
}
return result;
}
public void prepareRSGroupForTable(HTableDescriptor desc) throws IOException {
String groupName =
master.getClusterSchema().getNamespace(desc.getTableName().getNamespaceAsString())
.getConfigurationValue(RSGroupInfo.NAMESPACEDESC_PROP_GROUP);
if (groupName == null) {
groupName = RSGroupInfo.DEFAULT_GROUP;
}
RSGroupInfo RSGroupInfo = getRSGroupInfo(groupName);
if (RSGroupInfo == null) {
throw new ConstraintException("RSGroup " + groupName + " does not exist.");
}
if (!RSGroupInfo.containsTable(desc.getTableName())) {
LOG.debug("Pre-moving table " + desc.getTableName() + " to rsgroup " + groupName);
moveTables(Sets.newHashSet(desc.getTableName()), groupName);
}
}
public void cleanupRSGroupForTable(TableName tableName) throws IOException {
try {
RSGroupInfo group = getRSGroupInfoOfTable(tableName);
if (group != null) {
LOG.debug("Removing deleted table from table rsgroup " + group.getName());
moveTables(Sets.newHashSet(tableName), null);
}
} catch (ConstraintException ex) {
LOG.debug("Failed to perform rsgroup information cleanup for table: " + tableName, ex);
} catch (IOException ex) {
LOG.debug("Failed to perform rsgroup information cleanup for table: " + tableName, ex);
}
}
@Override
public void close() throws IOException {
}
}

View File

@ -0,0 +1,422 @@
/**
* Copyright The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.rsgroup;
import com.google.common.collect.ArrayListMultimap;
import com.google.common.collect.LinkedListMultimap;
import com.google.common.collect.ListMultimap;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.net.HostAndPort;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.HBaseIOException;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.constraint.ConstraintException;
import org.apache.hadoop.hbase.master.LoadBalancer;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.master.RegionPlan;
import org.apache.hadoop.hbase.master.balancer.StochasticLoadBalancer;
import org.apache.hadoop.util.ReflectionUtils;
/**
* GroupBasedLoadBalancer, used when Region Server Grouping is configured (HBase-6721)
* It does region balance based on a table's group membership.
*
* Most assignment methods contain two exclusive code paths: Online - when the group
* table is online and Offline - when it is unavailable.
*
* During Offline, assignments are assigned based on cached information in zookeeper.
* If unavailable (ie bootstrap) then regions are assigned randomly.
*
* Once the GROUP table has been assigned, the balancer switches to Online and will then
* start providing appropriate assignments for user tables.
*
*/
@InterfaceAudience.Private
public class RSGroupBasedLoadBalancer implements RSGroupableBalancer, LoadBalancer {
/** Config for pluggable load balancers */
public static final String HBASE_GROUP_LOADBALANCER_CLASS = "hbase.group.grouploadbalancer.class";
private static final Log LOG = LogFactory.getLog(RSGroupBasedLoadBalancer.class);
private Configuration config;
private ClusterStatus clusterStatus;
private MasterServices masterServices;
private RSGroupInfoManager RSGroupInfoManager;
private LoadBalancer internalBalancer;
//used during reflection by LoadBalancerFactory
@InterfaceAudience.Private
public RSGroupBasedLoadBalancer() {
}
//This constructor should only be used for unit testing
@InterfaceAudience.Private
public RSGroupBasedLoadBalancer(RSGroupInfoManager RSGroupInfoManager) {
this.RSGroupInfoManager = RSGroupInfoManager;
}
@Override
public Configuration getConf() {
return config;
}
@Override
public void setConf(Configuration conf) {
this.config = conf;
}
@Override
public void setClusterStatus(ClusterStatus st) {
this.clusterStatus = st;
}
@Override
public void setMasterServices(MasterServices masterServices) {
this.masterServices = masterServices;
}
@Override
public List<RegionPlan> balanceCluster(TableName tableName, Map<ServerName, List<HRegionInfo>>
clusterState) throws HBaseIOException {
return balanceCluster(clusterState);
}
@Override
public List<RegionPlan> balanceCluster(Map<ServerName, List<HRegionInfo>> clusterState)
throws HBaseIOException {
if (!isOnline()) {
throw new ConstraintException(RSGroupInfoManager.RSGROUP_TABLE_NAME +
" is not online, unable to perform balance");
}
Map<ServerName,List<HRegionInfo>> correctedState = correctAssignments(clusterState);
List<RegionPlan> regionPlans = new ArrayList<RegionPlan>();
List<HRegionInfo> misplacedRegions = correctedState.get(LoadBalancer.BOGUS_SERVER_NAME);
for (HRegionInfo regionInfo : misplacedRegions) {
regionPlans.add(new RegionPlan(regionInfo, null, null));
}
try {
for (RSGroupInfo info : RSGroupInfoManager.listRSGroups()) {
Map<ServerName, List<HRegionInfo>> groupClusterState =
new HashMap<ServerName, List<HRegionInfo>>();
for (HostAndPort sName : info.getServers()) {
for(ServerName curr: clusterState.keySet()) {
if(curr.getHostPort().equals(sName)) {
groupClusterState.put(curr, correctedState.get(curr));
}
}
}
List<RegionPlan> groupPlans = this.internalBalancer
.balanceCluster(groupClusterState);
if (groupPlans != null) {
regionPlans.addAll(groupPlans);
}
}
} catch (IOException exp) {
LOG.warn("Exception while balancing cluster.", exp);
regionPlans.clear();
}
return regionPlans;
}
@Override
public Map<ServerName, List<HRegionInfo>> roundRobinAssignment(
List<HRegionInfo> regions, List<ServerName> servers) throws HBaseIOException {
Map<ServerName, List<HRegionInfo>> assignments = Maps.newHashMap();
ListMultimap<String,HRegionInfo> regionMap = ArrayListMultimap.create();
ListMultimap<String,ServerName> serverMap = ArrayListMultimap.create();
generateGroupMaps(regions, servers, regionMap, serverMap);
for(String groupKey : regionMap.keySet()) {
if (regionMap.get(groupKey).size() > 0) {
Map<ServerName, List<HRegionInfo>> result =
this.internalBalancer.roundRobinAssignment(
regionMap.get(groupKey),
serverMap.get(groupKey));
if(result != null) {
assignments.putAll(result);
}
}
}
return assignments;
}
@Override
public Map<ServerName, List<HRegionInfo>> retainAssignment(
Map<HRegionInfo, ServerName> regions, List<ServerName> servers) throws HBaseIOException {
try {
Map<ServerName, List<HRegionInfo>> assignments = new TreeMap<ServerName, List<HRegionInfo>>();
ListMultimap<String, HRegionInfo> groupToRegion = ArrayListMultimap.create();
Set<HRegionInfo> misplacedRegions = getMisplacedRegions(regions);
for (HRegionInfo region : regions.keySet()) {
if (!misplacedRegions.contains(region)) {
String groupName = RSGroupInfoManager.getRSGroupOfTable(region.getTable());
groupToRegion.put(groupName, region);
}
}
// Now the "groupToRegion" map has only the regions which have correct
// assignments.
for (String key : groupToRegion.keySet()) {
Map<HRegionInfo, ServerName> currentAssignmentMap = new TreeMap<HRegionInfo, ServerName>();
List<HRegionInfo> regionList = groupToRegion.get(key);
RSGroupInfo info = RSGroupInfoManager.getRSGroup(key);
List<ServerName> candidateList = filterOfflineServers(info, servers);
for (HRegionInfo region : regionList) {
currentAssignmentMap.put(region, regions.get(region));
}
if(candidateList.size() > 0) {
assignments.putAll(this.internalBalancer.retainAssignment(
currentAssignmentMap, candidateList));
}
}
for (HRegionInfo region : misplacedRegions) {
String groupName = RSGroupInfoManager.getRSGroupOfTable(
region.getTable());
RSGroupInfo info = RSGroupInfoManager.getRSGroup(groupName);
List<ServerName> candidateList = filterOfflineServers(info, servers);
ServerName server = this.internalBalancer.randomAssignment(region,
candidateList);
if (server != null && !assignments.containsKey(server)) {
assignments.put(server, new ArrayList<HRegionInfo>());
} else if (server != null) {
assignments.get(server).add(region);
} else {
//if not server is available assign to bogus so it ends up in RIT
if(!assignments.containsKey(LoadBalancer.BOGUS_SERVER_NAME)) {
assignments.put(LoadBalancer.BOGUS_SERVER_NAME, new ArrayList<HRegionInfo>());
}
assignments.get(LoadBalancer.BOGUS_SERVER_NAME).add(region);
}
}
return assignments;
} catch (IOException e) {
throw new HBaseIOException("Failed to do online retain assignment", e);
}
}
@Override
public ServerName randomAssignment(HRegionInfo region,
List<ServerName> servers) throws HBaseIOException {
ListMultimap<String,HRegionInfo> regionMap = LinkedListMultimap.create();
ListMultimap<String,ServerName> serverMap = LinkedListMultimap.create();
generateGroupMaps(Lists.newArrayList(region), servers, regionMap, serverMap);
List<ServerName> filteredServers = serverMap.get(regionMap.keySet().iterator().next());
return this.internalBalancer.randomAssignment(region, filteredServers);
}
private void generateGroupMaps(
List<HRegionInfo> regions,
List<ServerName> servers,
ListMultimap<String, HRegionInfo> regionMap,
ListMultimap<String, ServerName> serverMap) throws HBaseIOException {
try {
for (HRegionInfo region : regions) {
String groupName = RSGroupInfoManager.getRSGroupOfTable(region.getTable());
if(groupName == null) {
LOG.warn("Group for table "+region.getTable()+" is null");
}
regionMap.put(groupName, region);
}
for (String groupKey : regionMap.keySet()) {
RSGroupInfo info = RSGroupInfoManager.getRSGroup(groupKey);
serverMap.putAll(groupKey, filterOfflineServers(info, servers));
if(serverMap.get(groupKey).size() < 1) {
serverMap.put(groupKey, LoadBalancer.BOGUS_SERVER_NAME);
}
}
} catch(IOException e) {
throw new HBaseIOException("Failed to generate group maps", e);
}
}
private List<ServerName> filterOfflineServers(RSGroupInfo RSGroupInfo,
List<ServerName> onlineServers) {
if (RSGroupInfo != null) {
return filterServers(RSGroupInfo.getServers(), onlineServers);
} else {
LOG.debug("Group Information found to be null. Some regions might be unassigned.");
return Collections.EMPTY_LIST;
}
}
/**
* Filter servers based on the online servers.
*
* @param servers
* the servers
* @param onlineServers
* List of servers which are online.
* @return the list
*/
private List<ServerName> filterServers(Collection<HostAndPort> servers,
Collection<ServerName> onlineServers) {
ArrayList<ServerName> finalList = new ArrayList<ServerName>();
for (HostAndPort server : servers) {
for(ServerName curr: onlineServers) {
if(curr.getHostPort().equals(server)) {
finalList.add(curr);
}
}
}
return finalList;
}
private ListMultimap<String, HRegionInfo> groupRegions(
List<HRegionInfo> regionList) throws IOException {
ListMultimap<String, HRegionInfo> regionGroup = ArrayListMultimap
.create();
for (HRegionInfo region : regionList) {
String groupName = RSGroupInfoManager.getRSGroupOfTable(region.getTable());
regionGroup.put(groupName, region);
}
return regionGroup;
}
private Set<HRegionInfo> getMisplacedRegions(
Map<HRegionInfo, ServerName> regions) throws IOException {
Set<HRegionInfo> misplacedRegions = new HashSet<HRegionInfo>();
for (HRegionInfo region : regions.keySet()) {
ServerName assignedServer = regions.get(region);
RSGroupInfo info =
RSGroupInfoManager.getRSGroup(RSGroupInfoManager.getRSGroupOfTable(region.getTable()));
if (assignedServer != null &&
(info == null || !info.containsServer(assignedServer.getHostPort()))) {
LOG.debug("Found misplaced region: " + region.getRegionNameAsString() +
" on server: " + assignedServer +
" found in group: " +
RSGroupInfoManager.getRSGroupOfServer(assignedServer.getHostPort()) +
" outside of group: " + info.getName());
misplacedRegions.add(region);
}
}
return misplacedRegions;
}
private Map<ServerName, List<HRegionInfo>> correctAssignments(
Map<ServerName, List<HRegionInfo>> existingAssignments){
Map<ServerName, List<HRegionInfo>> correctAssignments =
new TreeMap<ServerName, List<HRegionInfo>>();
List<HRegionInfo> misplacedRegions = new LinkedList<HRegionInfo>();
correctAssignments.put(LoadBalancer.BOGUS_SERVER_NAME, new LinkedList<HRegionInfo>());
for (ServerName sName : existingAssignments.keySet()) {
correctAssignments.put(sName, new LinkedList<HRegionInfo>());
List<HRegionInfo> regions = existingAssignments.get(sName);
for (HRegionInfo region : regions) {
RSGroupInfo info = null;
try {
info = RSGroupInfoManager.getRSGroup(
RSGroupInfoManager.getRSGroupOfTable(region.getTable()));
}catch(IOException exp){
LOG.debug("Group information null for region of table " + region.getTable(),
exp);
}
if ((info == null) || (!info.containsServer(sName.getHostPort()))) {
correctAssignments.get(LoadBalancer.BOGUS_SERVER_NAME).add(region);
} else {
correctAssignments.get(sName).add(region);
}
}
}
//TODO bulk unassign?
//unassign misplaced regions, so that they are assigned to correct groups.
for(HRegionInfo info: misplacedRegions) {
this.masterServices.getAssignmentManager().unassign(info);
}
return correctAssignments;
}
@Override
public void initialize() throws HBaseIOException {
try {
if (RSGroupInfoManager == null) {
List<RSGroupAdminEndpoint> cps =
masterServices.getMasterCoprocessorHost().findCoprocessors(RSGroupAdminEndpoint.class);
if (cps.size() != 1) {
String msg = "Expected one implementation of GroupAdminEndpoint but found " + cps.size();
LOG.error(msg);
throw new HBaseIOException(msg);
}
RSGroupInfoManager = cps.get(0).getGroupInfoManager();
}
} catch (IOException e) {
throw new HBaseIOException("Failed to initialize GroupInfoManagerImpl", e);
}
// Create the balancer
Class<? extends LoadBalancer> balancerKlass = config.getClass(
HBASE_GROUP_LOADBALANCER_CLASS,
StochasticLoadBalancer.class, LoadBalancer.class);
internalBalancer = ReflectionUtils.newInstance(balancerKlass, config);
internalBalancer.setClusterStatus(clusterStatus);
internalBalancer.setMasterServices(masterServices);
internalBalancer.setConf(config);
internalBalancer.initialize();
}
public boolean isOnline() {
return RSGroupInfoManager != null && RSGroupInfoManager.isOnline();
}
@Override
public void regionOnline(HRegionInfo regionInfo, ServerName sn) {
}
@Override
public void regionOffline(HRegionInfo regionInfo) {
}
@Override
public void onConfigurationChange(Configuration conf) {
//DO nothing for now
}
@Override
public void stop(String why) {
}
@Override
public boolean isStopped() {
return false;
}
}

View File

@ -0,0 +1,132 @@
/**
* Copyright The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.rsgroup;
import com.google.common.net.HostAndPort;
import java.io.IOException;
import java.util.List;
import java.util.Set;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.util.Bytes;
/**
* Interface used to manage RSGroupInfo storage. An implementation
* has the option to support offline mode.
* See {@link RSGroupBasedLoadBalancer}
*/
public interface RSGroupInfoManager {
//Assigned before user tables
public static final TableName RSGROUP_TABLE_NAME =
TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "rsgroup");
public static final byte[] RSGROUP_TABLE_NAME_BYTES = RSGROUP_TABLE_NAME.toBytes();
public static final String rsGroupZNode = "rsgroup";
public static final byte[] META_FAMILY_BYTES = Bytes.toBytes("m");
public static final byte[] META_QUALIFIER_BYTES = Bytes.toBytes("i");
public static final byte[] ROW_KEY = {0};
/**
* Adds the group.
*
* @param rsGroupInfo the group name
* @throws java.io.IOException Signals that an I/O exception has occurred.
*/
void addRSGroup(RSGroupInfo rsGroupInfo) throws IOException;
/**
* Remove a region server group.
*
* @param groupName the group name
* @throws java.io.IOException Signals that an I/O exception has occurred.
*/
void removeRSGroup(String groupName) throws IOException;
/**
* move servers to a new group.
* @param hostPorts list of servers, must be part of the same group
* @param srcGroup groupName being moved from
* @param dstGroup groupName being moved to
* @return true if move was successful
* @throws java.io.IOException on move failure
*/
boolean moveServers(Set<HostAndPort> hostPorts,
String srcGroup, String dstGroup) throws IOException;
/**
* Gets the group info of server.
*
* @param hostPort the server
* @return An instance of RSGroupInfo
*/
RSGroupInfo getRSGroupOfServer(HostAndPort hostPort) throws IOException;
/**
* Gets the group information.
*
* @param groupName the group name
* @return An instance of RSGroupInfo
*/
RSGroupInfo getRSGroup(String groupName) throws IOException;
/**
* Get the group membership of a table
* @param tableName name of table to get group membership
* @return Group name of table
* @throws java.io.IOException on failure to retrive information
*/
String getRSGroupOfTable(TableName tableName) throws IOException;
/**
* Set the group membership of a set of tables
*
* @param tableNames set of tables to move
* @param groupName name of group of tables to move to
* @throws java.io.IOException on failure to move
*/
void moveTables(Set<TableName> tableNames, String groupName) throws IOException;
/**
* List the groups
*
* @return list of RSGroupInfo
* @throws java.io.IOException on failure
*/
List<RSGroupInfo> listRSGroups() throws IOException;
/**
* Refresh/reload the group information from
* the persistent store
*
* @throws java.io.IOException on failure to refresh
*/
void refresh() throws IOException;
/**
* Whether the manager is able to fully
* return group metadata
*
* @return whether the manager is in online mode
*/
boolean isOnline();
}

View File

@ -0,0 +1,755 @@
/**
* Copyright The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.rsgroup;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.google.common.net.HostAndPort;
import com.google.protobuf.ServiceException;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.NavigableSet;
import java.util.Set;
import java.util.TreeSet;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.Coprocessor;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.MetaTableAccessor.DefaultVisitorBase;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableState;
import org.apache.hadoop.hbase.constraint.ConstraintException;
import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint;
import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.master.ServerListener;
import org.apache.hadoop.hbase.master.TableStateManager;
import org.apache.hadoop.hbase.master.procedure.CreateTableProcedure;
import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch;
import org.apache.hadoop.hbase.protobuf.ProtobufMagic;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.RequestConverter;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos;
import org.apache.hadoop.hbase.regionserver.DisabledRegionSplitPolicy;
import org.apache.hadoop.hbase.security.access.AccessControlLists;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.ModifyRegionUtils;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
import org.apache.zookeeper.KeeperException;
/**
* This is an implementation of {@link RSGroupInfoManager}. Which makes
* use of an HBase table as the persistence store for the group information.
* It also makes use of zookeeper to store group information needed
* for bootstrapping during offline mode.
*/
public class RSGroupInfoManagerImpl implements RSGroupInfoManager, ServerListener {
private static final Log LOG = LogFactory.getLog(RSGroupInfoManagerImpl.class);
/** Table descriptor for <code>hbase:rsgroup</code> catalog table */
private final static HTableDescriptor RSGROUP_TABLE_DESC;
static {
RSGROUP_TABLE_DESC = new HTableDescriptor(RSGROUP_TABLE_NAME_BYTES);
RSGROUP_TABLE_DESC.addFamily(new HColumnDescriptor(META_FAMILY_BYTES));
RSGROUP_TABLE_DESC.setRegionSplitPolicyClassName(DisabledRegionSplitPolicy.class.getName());
try {
RSGROUP_TABLE_DESC.addCoprocessor(
MultiRowMutationEndpoint.class.getName(),
null, Coprocessor.PRIORITY_SYSTEM, null);
} catch (IOException ex) {
throw new RuntimeException(ex);
}
}
private volatile Map<String, RSGroupInfo> rsGroupMap;
private volatile Map<TableName, String> tableMap;
private MasterServices master;
private Table rsGroupTable;
private ClusterConnection conn;
private ZooKeeperWatcher watcher;
private RSGroupStartupWorker rsGroupStartupWorker;
// contains list of groups that were last flushed to persistent store
private volatile Set<String> prevRSGroups;
private RSGroupSerDe rsGroupSerDe;
private DefaultServerUpdater defaultServerUpdater;
public RSGroupInfoManagerImpl(MasterServices master) throws IOException {
this.rsGroupMap = Collections.EMPTY_MAP;
this.tableMap = Collections.EMPTY_MAP;
rsGroupSerDe = new RSGroupSerDe();
this.master = master;
this.watcher = master.getZooKeeper();
this.conn = master.getClusterConnection();
rsGroupStartupWorker = new RSGroupStartupWorker(this, master, conn);
prevRSGroups = new HashSet<String>();
refresh();
rsGroupStartupWorker.start();
defaultServerUpdater = new DefaultServerUpdater(this);
master.getServerManager().registerListener(this);
defaultServerUpdater.start();
}
/**
* Adds the group.
*
* @param rsGroupInfo the group name
*/
@Override
public synchronized void addRSGroup(RSGroupInfo rsGroupInfo) throws IOException {
checkGroupName(rsGroupInfo.getName());
if (rsGroupMap.get(rsGroupInfo.getName()) != null ||
rsGroupInfo.getName().equals(rsGroupInfo.DEFAULT_GROUP)) {
throw new DoNotRetryIOException("Group already exists: "+ rsGroupInfo.getName());
}
Map<String, RSGroupInfo> newGroupMap = Maps.newHashMap(rsGroupMap);
newGroupMap.put(rsGroupInfo.getName(), rsGroupInfo);
flushConfig(newGroupMap);
}
@Override
public synchronized boolean moveServers(Set<HostAndPort> hostPorts, String srcGroup,
String dstGroup) throws IOException {
if (!rsGroupMap.containsKey(srcGroup)) {
throw new DoNotRetryIOException("Group "+srcGroup+" does not exist");
}
if (!rsGroupMap.containsKey(dstGroup)) {
throw new DoNotRetryIOException("Group "+dstGroup+" does not exist");
}
RSGroupInfo src = new RSGroupInfo(getRSGroup(srcGroup));
RSGroupInfo dst = new RSGroupInfo(getRSGroup(dstGroup));
boolean foundOne = false;
for(HostAndPort el: hostPorts) {
foundOne = src.removeServer(el) || foundOne;
dst.addServer(el);
}
Map<String,RSGroupInfo> newGroupMap = Maps.newHashMap(rsGroupMap);
newGroupMap.put(src.getName(), src);
newGroupMap.put(dst.getName(), dst);
flushConfig(newGroupMap);
return foundOne;
}
/**
* Gets the group info of server.
*
* @param hostPort the server
* @return An instance of GroupInfo.
*/
@Override
public RSGroupInfo getRSGroupOfServer(HostAndPort hostPort) throws IOException {
for (RSGroupInfo info : rsGroupMap.values()) {
if (info.containsServer(hostPort)){
return info;
}
}
return null;
}
/**
* Gets the group information.
*
* @param groupName
* the group name
* @return An instance of GroupInfo
*/
@Override
public RSGroupInfo getRSGroup(String groupName) throws IOException {
RSGroupInfo RSGroupInfo = rsGroupMap.get(groupName);
return RSGroupInfo;
}
@Override
public String getRSGroupOfTable(TableName tableName) throws IOException {
return tableMap.get(tableName);
}
@Override
public synchronized void moveTables(
Set<TableName> tableNames, String groupName) throws IOException {
if (groupName != null && !rsGroupMap.containsKey(groupName)) {
throw new DoNotRetryIOException("Group "+groupName+" does not exist or is a special group");
}
Map<String,RSGroupInfo> newGroupMap = Maps.newHashMap(rsGroupMap);
for(TableName tableName: tableNames) {
if (tableMap.containsKey(tableName)) {
RSGroupInfo src = new RSGroupInfo(rsGroupMap.get(tableMap.get(tableName)));
src.removeTable(tableName);
newGroupMap.put(src.getName(), src);
}
if(groupName != null) {
RSGroupInfo dst = new RSGroupInfo(newGroupMap.get(groupName));
dst.addTable(tableName);
newGroupMap.put(dst.getName(), dst);
}
}
flushConfig(newGroupMap);
}
/**
* Delete a region server group.
*
* @param groupName the group name
* @throws java.io.IOException Signals that an I/O exception has occurred.
*/
@Override
public synchronized void removeRSGroup(String groupName) throws IOException {
if (!rsGroupMap.containsKey(groupName) || groupName.equals(RSGroupInfo.DEFAULT_GROUP)) {
throw new DoNotRetryIOException("Group "+groupName+" does not exist or is a reserved group");
}
Map<String,RSGroupInfo> newGroupMap = Maps.newHashMap(rsGroupMap);
newGroupMap.remove(groupName);
flushConfig(newGroupMap);
}
@Override
public List<RSGroupInfo> listRSGroups() throws IOException {
List<RSGroupInfo> list = Lists.newLinkedList(rsGroupMap.values());
return list;
}
@Override
public boolean isOnline() {
return rsGroupStartupWorker.isOnline();
}
@Override
public synchronized void refresh() throws IOException {
refresh(false);
}
private synchronized void refresh(boolean forceOnline) throws IOException {
List<RSGroupInfo> groupList = new LinkedList<RSGroupInfo>();
// overwrite anything read from zk, group table is source of truth
// if online read from GROUP table
if (forceOnline || isOnline()) {
LOG.debug("Refreshing in Online mode.");
if (rsGroupTable == null) {
rsGroupTable = conn.getTable(RSGROUP_TABLE_NAME);
}
groupList.addAll(rsGroupSerDe.retrieveGroupList(rsGroupTable));
} else {
LOG.debug("Refershing in Offline mode.");
String groupBasePath = ZKUtil.joinZNode(watcher.baseZNode, rsGroupZNode);
groupList.addAll(rsGroupSerDe.retrieveGroupList(watcher, groupBasePath));
}
// refresh default group, prune
NavigableSet<TableName> orphanTables = new TreeSet<TableName>();
for(String entry: master.getTableDescriptors().getAll().keySet()) {
orphanTables.add(TableName.valueOf(entry));
}
List<TableName> specialTables;
if(!master.isInitialized()) {
specialTables = new ArrayList<TableName>();
specialTables.add(AccessControlLists.ACL_TABLE_NAME);
specialTables.add(TableName.META_TABLE_NAME);
specialTables.add(TableName.NAMESPACE_TABLE_NAME);
specialTables.add(RSGROUP_TABLE_NAME);
} else {
specialTables =
master.listTableNamesByNamespace(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR);
}
for(TableName table : specialTables) {
orphanTables.add(table);
}
for(RSGroupInfo group: groupList) {
if(!group.getName().equals(RSGroupInfo.DEFAULT_GROUP)) {
orphanTables.removeAll(group.getTables());
}
}
// This is added to the last of the list
// so it overwrites the default group loaded
// from region group table or zk
groupList.add(new RSGroupInfo(RSGroupInfo.DEFAULT_GROUP,
Sets.newHashSet(getDefaultServers()),
orphanTables));
// populate the data
HashMap<String, RSGroupInfo> newGroupMap = Maps.newHashMap();
HashMap<TableName, String> newTableMap = Maps.newHashMap();
for (RSGroupInfo group : groupList) {
newGroupMap.put(group.getName(), group);
for(TableName table: group.getTables()) {
newTableMap.put(table, group.getName());
}
}
rsGroupMap = Collections.unmodifiableMap(newGroupMap);
tableMap = Collections.unmodifiableMap(newTableMap);
prevRSGroups.clear();
prevRSGroups.addAll(rsGroupMap.keySet());
}
private synchronized Map<TableName,String> flushConfigTable(Map<String,RSGroupInfo> newGroupMap)
throws IOException {
Map<TableName,String> newTableMap = Maps.newHashMap();
List<Mutation> mutations = Lists.newArrayList();
// populate deletes
for(String groupName : prevRSGroups) {
if(!newGroupMap.containsKey(groupName)) {
Delete d = new Delete(Bytes.toBytes(groupName));
mutations.add(d);
}
}
// populate puts
for(RSGroupInfo RSGroupInfo : newGroupMap.values()) {
RSGroupProtos.RSGroupInfo proto = ProtobufUtil.toProtoGroupInfo(RSGroupInfo);
Put p = new Put(Bytes.toBytes(RSGroupInfo.getName()));
p.addColumn(META_FAMILY_BYTES,
META_QUALIFIER_BYTES,
proto.toByteArray());
mutations.add(p);
for(TableName entry: RSGroupInfo.getTables()) {
newTableMap.put(entry, RSGroupInfo.getName());
}
}
if(mutations.size() > 0) {
multiMutate(mutations);
}
return newTableMap;
}
private synchronized void flushConfig(Map<String, RSGroupInfo> newGroupMap) throws IOException {
Map<TableName, String> newTableMap;
// For offline mode persistence is still unavailable
// We're refreshing in-memory state but only for default servers
if (!isOnline()) {
Map<String, RSGroupInfo> m = Maps.newHashMap(rsGroupMap);
RSGroupInfo oldDefaultGroup = m.remove(RSGroupInfo.DEFAULT_GROUP);
RSGroupInfo newDefaultGroup = newGroupMap.remove(RSGroupInfo.DEFAULT_GROUP);
if (!m.equals(newGroupMap) ||
!oldDefaultGroup.getTables().equals(newDefaultGroup.getTables())) {
throw new IOException("Only default servers can be updated during offline mode");
}
newGroupMap.put(RSGroupInfo.DEFAULT_GROUP, newDefaultGroup);
rsGroupMap = newGroupMap;
return;
}
newTableMap = flushConfigTable(newGroupMap);
// make changes visible since it has been
// persisted in the source of truth
rsGroupMap = Collections.unmodifiableMap(newGroupMap);
tableMap = Collections.unmodifiableMap(newTableMap);
try {
String groupBasePath = ZKUtil.joinZNode(watcher.baseZNode, rsGroupZNode);
ZKUtil.createAndFailSilent(watcher, groupBasePath, ProtobufMagic.PB_MAGIC);
List<ZKUtil.ZKUtilOp> zkOps = new ArrayList<ZKUtil.ZKUtilOp>(newGroupMap.size());
for(String groupName : prevRSGroups) {
if(!newGroupMap.containsKey(groupName)) {
String znode = ZKUtil.joinZNode(groupBasePath, groupName);
zkOps.add(ZKUtil.ZKUtilOp.deleteNodeFailSilent(znode));
}
}
for(RSGroupInfo RSGroupInfo : newGroupMap.values()) {
String znode = ZKUtil.joinZNode(groupBasePath, RSGroupInfo.getName());
RSGroupProtos.RSGroupInfo proto = ProtobufUtil.toProtoGroupInfo(RSGroupInfo);
LOG.debug("Updating znode: "+znode);
ZKUtil.createAndFailSilent(watcher, znode);
zkOps.add(ZKUtil.ZKUtilOp.deleteNodeFailSilent(znode));
zkOps.add(ZKUtil.ZKUtilOp.createAndFailSilent(znode,
ProtobufUtil.prependPBMagic(proto.toByteArray())));
}
LOG.debug("Writing ZK GroupInfo count: " + zkOps.size());
ZKUtil.multiOrSequential(watcher, zkOps, false);
} catch (KeeperException e) {
LOG.error("Failed to write to rsGroupZNode", e);
master.abort("Failed to write to rsGroupZNode", e);
throw new IOException("Failed to write to rsGroupZNode",e);
}
prevRSGroups.clear();
prevRSGroups.addAll(newGroupMap.keySet());
}
private List<ServerName> getOnlineRS() throws IOException {
if (master != null) {
return master.getServerManager().getOnlineServersList();
}
try {
LOG.debug("Reading online RS from zookeeper");
List<ServerName> servers = new LinkedList<ServerName>();
for (String el: ZKUtil.listChildrenNoWatch(watcher, watcher.rsZNode)) {
servers.add(ServerName.parseServerName(el));
}
return servers;
} catch (KeeperException e) {
throw new IOException("Failed to retrieve server list from zookeeper", e);
}
}
private List<HostAndPort> getDefaultServers() throws IOException {
List<HostAndPort> defaultServers = new LinkedList<HostAndPort>();
for(ServerName server : getOnlineRS()) {
HostAndPort hostPort = HostAndPort.fromParts(server.getHostname(), server.getPort());
boolean found = false;
for(RSGroupInfo RSGroupInfo : rsGroupMap.values()) {
if(!RSGroupInfo.DEFAULT_GROUP.equals(RSGroupInfo.getName()) &&
RSGroupInfo.containsServer(hostPort)) {
found = true;
break;
}
}
if(!found) {
defaultServers.add(hostPort);
}
}
return defaultServers;
}
private synchronized void updateDefaultServers(
Set<HostAndPort> hostPort) throws IOException {
RSGroupInfo info = rsGroupMap.get(RSGroupInfo.DEFAULT_GROUP);
RSGroupInfo newInfo = new RSGroupInfo(info.getName(), hostPort, info.getTables());
HashMap<String, RSGroupInfo> newGroupMap = Maps.newHashMap(rsGroupMap);
newGroupMap.put(newInfo.getName(), newInfo);
flushConfig(newGroupMap);
}
@Override
public void serverAdded(ServerName serverName) {
defaultServerUpdater.serverChanged();
}
@Override
public void serverRemoved(ServerName serverName) {
defaultServerUpdater.serverChanged();
}
private static class DefaultServerUpdater extends Thread {
private static final Log LOG = LogFactory.getLog(DefaultServerUpdater.class);
private RSGroupInfoManagerImpl mgr;
private boolean hasChanged = false;
public DefaultServerUpdater(RSGroupInfoManagerImpl mgr) {
this.mgr = mgr;
}
@Override
public void run() {
List<HostAndPort> prevDefaultServers = new LinkedList<HostAndPort>();
while(!mgr.master.isAborted() || !mgr.master.isStopped()) {
try {
LOG.info("Updating default servers.");
List<HostAndPort> servers = mgr.getDefaultServers();
Collections.sort(servers, new Comparator<HostAndPort>() {
@Override
public int compare(HostAndPort o1, HostAndPort o2) {
int diff = o1.getHostText().compareTo(o2.getHostText());
if (diff != 0) {
return diff;
}
return o1.getPort() - o2.getPort();
}
});
if(!servers.equals(prevDefaultServers)) {
mgr.updateDefaultServers(Sets.<HostAndPort>newHashSet(servers));
prevDefaultServers = servers;
LOG.info("Updated with servers: "+servers.size());
}
try {
synchronized (this) {
if(!hasChanged) {
wait();
}
hasChanged = false;
}
} catch (InterruptedException e) {
}
} catch (IOException e) {
LOG.warn("Failed to update default servers", e);
}
}
}
public void serverChanged() {
synchronized (this) {
hasChanged = true;
this.notify();
}
}
}
private static class RSGroupStartupWorker extends Thread {
private static final Log LOG = LogFactory.getLog(RSGroupStartupWorker.class);
private Configuration conf;
private volatile boolean isOnline = false;
private MasterServices masterServices;
private RSGroupInfoManagerImpl groupInfoManager;
private ClusterConnection conn;
public RSGroupStartupWorker(RSGroupInfoManagerImpl groupInfoManager,
MasterServices masterServices,
ClusterConnection conn) {
this.conf = masterServices.getConfiguration();
this.masterServices = masterServices;
this.groupInfoManager = groupInfoManager;
this.conn = conn;
setName(RSGroupStartupWorker.class.getName()+"-"+masterServices.getServerName());
setDaemon(true);
}
@Override
public void run() {
if(waitForGroupTableOnline()) {
LOG.info("GroupBasedLoadBalancer is now online");
}
}
public boolean waitForGroupTableOnline() {
final List<HRegionInfo> foundRegions = new LinkedList<HRegionInfo>();
final List<HRegionInfo> assignedRegions = new LinkedList<HRegionInfo>();
final AtomicBoolean found = new AtomicBoolean(false);
final TableStateManager tsm = masterServices.getTableStateManager();
boolean createSent = false;
while (!found.get() && isMasterRunning()) {
foundRegions.clear();
assignedRegions.clear();
found.set(true);
try {
final Table nsTable = conn.getTable(TableName.NAMESPACE_TABLE_NAME);
final Table groupTable = conn.getTable(RSGROUP_TABLE_NAME);
boolean rootMetaFound =
masterServices.getMetaTableLocator().verifyMetaRegionLocation(
conn,
masterServices.getZooKeeper(),
1);
final AtomicBoolean nsFound = new AtomicBoolean(false);
if (rootMetaFound) {
MetaTableAccessor.Visitor visitor = new DefaultVisitorBase() {
@Override
public boolean visitInternal(Result row) throws IOException {
HRegionInfo info = MetaTableAccessor.getHRegionInfo(row);
if (info != null) {
Cell serverCell =
row.getColumnLatestCell(HConstants.CATALOG_FAMILY,
HConstants.SERVER_QUALIFIER);
if (RSGROUP_TABLE_NAME.equals(info.getTable()) && serverCell != null) {
ServerName sn =
ServerName.parseVersionedServerName(CellUtil.cloneValue(serverCell));
if (sn == null) {
found.set(false);
} else if (tsm.isTableState(RSGROUP_TABLE_NAME, TableState.State.ENABLED)) {
try {
ClientProtos.ClientService.BlockingInterface rs = conn.getClient(sn);
ClientProtos.GetRequest request =
RequestConverter.buildGetRequest(info.getRegionName(),
new Get(ROW_KEY));
rs.get(null, request);
assignedRegions.add(info);
} catch(Exception ex) {
LOG.debug("Caught exception while verifying group region", ex);
}
}
foundRegions.add(info);
}
if (TableName.NAMESPACE_TABLE_NAME.equals(info.getTable())) {
Cell cell = row.getColumnLatestCell(HConstants.CATALOG_FAMILY,
HConstants.SERVER_QUALIFIER);
ServerName sn = null;
if(cell != null) {
sn = ServerName.parseVersionedServerName(CellUtil.cloneValue(cell));
}
if (tsm.isTableState(TableName.NAMESPACE_TABLE_NAME,
TableState.State.ENABLED)) {
try {
ClientProtos.ClientService.BlockingInterface rs = conn.getClient(sn);
ClientProtos.GetRequest request =
RequestConverter.buildGetRequest(info.getRegionName(),
new Get(ROW_KEY));
rs.get(null, request);
nsFound.set(true);
} catch(Exception ex) {
LOG.debug("Caught exception while verifying group region", ex);
}
}
}
}
return true;
}
};
MetaTableAccessor.fullScanRegions(conn, visitor);
// if no regions in meta then we have to create the table
if (foundRegions.size() < 1 && rootMetaFound && !createSent && nsFound.get()) {
groupInfoManager.createGroupTable(masterServices);
createSent = true;
}
LOG.info("Group table: " + RSGROUP_TABLE_NAME + " isOnline: " + found.get()
+ ", regionCount: " + foundRegions.size() + ", assignCount: "
+ assignedRegions.size() + ", rootMetaFound: "+rootMetaFound);
found.set(found.get() && assignedRegions.size() == foundRegions.size()
&& foundRegions.size() > 0);
} else {
LOG.info("Waiting for catalog tables to come online");
found.set(false);
}
if (found.get()) {
LOG.debug("With group table online, refreshing cached information.");
groupInfoManager.refresh(true);
isOnline = true;
//flush any inconsistencies between ZK and HTable
groupInfoManager.flushConfig(groupInfoManager.rsGroupMap);
}
} catch(Exception e) {
found.set(false);
LOG.warn("Failed to perform check", e);
}
try {
Thread.sleep(100);
} catch (InterruptedException e) {
LOG.info("Sleep interrupted", e);
}
}
return found.get();
}
public boolean isOnline() {
return isOnline;
}
private boolean isMasterRunning() {
return !masterServices.isAborted() && !masterServices.isStopped();
}
}
private void createGroupTable(MasterServices masterServices) throws IOException {
HRegionInfo[] newRegions =
ModifyRegionUtils.createHRegionInfos(RSGROUP_TABLE_DESC, null);
ProcedurePrepareLatch latch = ProcedurePrepareLatch.createLatch();
masterServices.getMasterProcedureExecutor().submitProcedure(
new CreateTableProcedure(
masterServices.getMasterProcedureExecutor().getEnvironment(),
RSGROUP_TABLE_DESC,
newRegions,
latch),
HConstants.NO_NONCE,
HConstants.NO_NONCE);
latch.await();
// wait for region to be online
int tries = 600;
while(masterServices.getAssignmentManager().getRegionStates()
.getRegionServerOfRegion(newRegions[0]) == null && tries > 0) {
try {
Thread.sleep(100);
} catch (InterruptedException e) {
throw new IOException("Wait interrupted", e);
}
tries--;
}
if(tries <= 0) {
throw new IOException("Failed to create group table.");
}
}
private void multiMutate(List<Mutation> mutations)
throws IOException {
CoprocessorRpcChannel channel = rsGroupTable.coprocessorService(ROW_KEY);
MultiRowMutationProtos.MutateRowsRequest.Builder mmrBuilder
= MultiRowMutationProtos.MutateRowsRequest.newBuilder();
for (Mutation mutation : mutations) {
if (mutation instanceof Put) {
mmrBuilder.addMutationRequest(ProtobufUtil.toMutation(
ClientProtos.MutationProto.MutationType.PUT, mutation));
} else if (mutation instanceof Delete) {
mmrBuilder.addMutationRequest(ProtobufUtil.toMutation(
ClientProtos.MutationProto.MutationType.DELETE, mutation));
} else {
throw new DoNotRetryIOException("multiMutate doesn't support "
+ mutation.getClass().getName());
}
}
MultiRowMutationProtos.MultiRowMutationService.BlockingInterface service =
MultiRowMutationProtos.MultiRowMutationService.newBlockingStub(channel);
try {
service.mutateRows(null, mmrBuilder.build());
} catch (ServiceException ex) {
ProtobufUtil.toIOException(ex);
}
}
private void checkGroupName(String groupName) throws ConstraintException {
if(!groupName.matches("[a-zA-Z0-9_]+")) {
throw new ConstraintException("Group name should only contain alphanumeric characters");
}
}
}

View File

@ -0,0 +1,88 @@
/**
* Copyright The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.rsgroup;
import com.google.common.collect.Lists;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
import org.apache.zookeeper.KeeperException;
//TODO do better encapsulation of SerDe logic from GroupInfoManager and GroupTracker
public class RSGroupSerDe {
private static final Log LOG = LogFactory.getLog(RSGroupSerDe.class);
public RSGroupSerDe() {
}
public List<RSGroupInfo> retrieveGroupList(Table groupTable) throws IOException {
List<RSGroupInfo> RSGroupInfoList = Lists.newArrayList();
for (Result result : groupTable.getScanner(new Scan())) {
RSGroupProtos.RSGroupInfo proto =
RSGroupProtos.RSGroupInfo.parseFrom(
result.getValue(
RSGroupInfoManager.META_FAMILY_BYTES,
RSGroupInfoManager.META_QUALIFIER_BYTES));
RSGroupInfoList.add(ProtobufUtil.toGroupInfo(proto));
}
return RSGroupInfoList;
}
public List<RSGroupInfo> retrieveGroupList(ZooKeeperWatcher watcher,
String groupBasePath) throws IOException {
List<RSGroupInfo> RSGroupInfoList = Lists.newArrayList();
//Overwrite any info stored by table, this takes precedence
try {
if(ZKUtil.checkExists(watcher, groupBasePath) != -1) {
for(String znode: ZKUtil.listChildrenAndWatchForNewChildren(watcher, groupBasePath)) {
byte[] data = ZKUtil.getData(watcher, ZKUtil.joinZNode(groupBasePath, znode));
if(data.length > 0) {
ProtobufUtil.expectPBMagicPrefix(data);
ByteArrayInputStream bis = new ByteArrayInputStream(
data, ProtobufUtil.lengthOfPBMagic(), data.length);
RSGroupInfoList.add(ProtobufUtil.toGroupInfo(RSGroupProtos.RSGroupInfo.parseFrom(bis)));
}
}
LOG.debug("Read ZK GroupInfo count:" + RSGroupInfoList.size());
}
} catch (KeeperException e) {
throw new IOException("Failed to read rsGroupZNode",e);
} catch (DeserializationException e) {
throw new IOException("Failed to read rsGroupZNode",e);
} catch (InterruptedException e) {
throw new IOException("Failed to read rsGroupZNode",e);
}
return RSGroupInfoList;
}
}

View File

@ -0,0 +1,29 @@
/**
* Copyright The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.rsgroup;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.master.LoadBalancer;
@InterfaceAudience.Private
public interface RSGroupableBalancer extends LoadBalancer {
}

View File

@ -0,0 +1,574 @@
/**
* Copyright The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.master.balancer;
import com.google.common.collect.ArrayListMultimap;
import com.google.common.collect.Lists;
import com.google.common.net.HostAndPort;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableDescriptors;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.rsgroup.RSGroupBasedLoadBalancer;
import org.apache.hadoop.hbase.rsgroup.RSGroupInfo;
import org.apache.hadoop.hbase.rsgroup.RSGroupInfoManager;
import org.apache.hadoop.hbase.master.AssignmentManager;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.master.RegionPlan;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.security.SecureRandom;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import java.util.TreeSet;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
//TODO use stochastic based load balancer instead
@Category(SmallTests.class)
public class TestRSGroupBasedLoadBalancer {
private static final Log LOG = LogFactory.getLog(TestRSGroupBasedLoadBalancer.class);
private static RSGroupBasedLoadBalancer loadBalancer;
private static SecureRandom rand;
static String[] groups = new String[] { RSGroupInfo.DEFAULT_GROUP, "dg2", "dg3",
"dg4" };
static TableName[] tables =
new TableName[] { TableName.valueOf("dt1"),
TableName.valueOf("dt2"),
TableName.valueOf("dt3"),
TableName.valueOf("dt4")};
static List<ServerName> servers;
static Map<String, RSGroupInfo> groupMap;
static Map<TableName, String> tableMap;
static List<HTableDescriptor> tableDescs;
int[] regionAssignment = new int[] { 2, 5, 7, 10, 4, 3, 1 };
static int regionId = 0;
@BeforeClass
public static void beforeAllTests() throws Exception {
rand = new SecureRandom();
servers = generateServers(7);
groupMap = constructGroupInfo(servers, groups);
tableMap = new HashMap<TableName, String>();
tableDescs = constructTableDesc();
Configuration conf = HBaseConfiguration.create();
conf.set("hbase.regions.slop", "0");
conf.set("hbase.group.grouploadbalancer.class", SimpleLoadBalancer.class.getCanonicalName());
loadBalancer = new RSGroupBasedLoadBalancer(getMockedGroupInfoManager());
loadBalancer.setMasterServices(getMockedMaster());
loadBalancer.setConf(conf);
loadBalancer.initialize();
}
/**
* Test the load balancing algorithm.
*
* Invariant is that all servers of the group should be hosting either floor(average) or
* ceiling(average)
*
* @throws Exception
*/
@Test
public void testBalanceCluster() throws Exception {
Map<ServerName, List<HRegionInfo>> servers = mockClusterServers();
ArrayListMultimap<String, ServerAndLoad> list = convertToGroupBasedMap(servers);
LOG.info("Mock Cluster : " + printStats(list));
List<RegionPlan> plans = loadBalancer.balanceCluster(servers);
ArrayListMultimap<String, ServerAndLoad> balancedCluster = reconcile(
list, plans);
LOG.info("Mock Balance : " + printStats(balancedCluster));
assertClusterAsBalanced(balancedCluster);
}
/**
* Invariant is that all servers of a group have load between floor(avg) and
* ceiling(avg) number of regions.
*/
private void assertClusterAsBalanced(
ArrayListMultimap<String, ServerAndLoad> groupLoadMap) {
for (String gName : groupLoadMap.keySet()) {
List<ServerAndLoad> groupLoad = groupLoadMap.get(gName);
int numServers = groupLoad.size();
int numRegions = 0;
int maxRegions = 0;
int minRegions = Integer.MAX_VALUE;
for (ServerAndLoad server : groupLoad) {
int nr = server.getLoad();
if (nr > maxRegions) {
maxRegions = nr;
}
if (nr < minRegions) {
minRegions = nr;
}
numRegions += nr;
}
if (maxRegions - minRegions < 2) {
// less than 2 between max and min, can't balance
return;
}
int min = numRegions / numServers;
int max = numRegions % numServers == 0 ? min : min + 1;
for (ServerAndLoad server : groupLoad) {
assertTrue(server.getLoad() <= max);
assertTrue(server.getLoad() >= min);
}
}
}
/**
* All regions have an assignment.
*
* @param regions
* @param servers
* @param assignments
* @throws java.io.IOException
* @throws java.io.FileNotFoundException
*/
private void assertImmediateAssignment(List<HRegionInfo> regions,
List<ServerName> servers,
Map<HRegionInfo, ServerName> assignments)
throws IOException {
for (HRegionInfo region : regions) {
assertTrue(assignments.containsKey(region));
ServerName server = assignments.get(region);
TableName tableName = region.getTable();
String groupName =
getMockedGroupInfoManager().getRSGroupOfTable(tableName);
assertTrue(StringUtils.isNotEmpty(groupName));
RSGroupInfo gInfo = getMockedGroupInfoManager().getRSGroup(groupName);
assertTrue("Region is not correctly assigned to group servers.",
gInfo.containsServer(server.getHostPort()));
}
}
/**
* Tests the bulk assignment used during cluster startup.
*
* Round-robin. Should yield a balanced cluster so same invariant as the
* load balancer holds, all servers holding either floor(avg) or
* ceiling(avg).
*
* @throws Exception
*/
@Test
public void testBulkAssignment() throws Exception {
List<HRegionInfo> regions = randomRegions(25);
Map<ServerName, List<HRegionInfo>> assignments = loadBalancer
.roundRobinAssignment(regions, servers);
//test empty region/servers scenario
//this should not throw an NPE
loadBalancer.roundRobinAssignment(regions,
Collections.EMPTY_LIST);
//test regular scenario
assertTrue(assignments.keySet().size() == servers.size());
for (ServerName sn : assignments.keySet()) {
List<HRegionInfo> regionAssigned = assignments.get(sn);
for (HRegionInfo region : regionAssigned) {
TableName tableName = region.getTable();
String groupName =
getMockedGroupInfoManager().getRSGroupOfTable(tableName);
assertTrue(StringUtils.isNotEmpty(groupName));
RSGroupInfo gInfo = getMockedGroupInfoManager().getRSGroup(
groupName);
assertTrue(
"Region is not correctly assigned to group servers.",
gInfo.containsServer(sn.getHostPort()));
}
}
ArrayListMultimap<String, ServerAndLoad> loadMap = convertToGroupBasedMap(assignments);
assertClusterAsBalanced(loadMap);
}
/**
* Test the cluster startup bulk assignment which attempts to retain
* assignment info.
*
* @throws Exception
*/
@Test
public void testRetainAssignment() throws Exception {
// Test simple case where all same servers are there
Map<ServerName, List<HRegionInfo>> currentAssignments = mockClusterServers();
Map<HRegionInfo, ServerName> inputForTest = new HashMap<HRegionInfo, ServerName>();
for (ServerName sn : currentAssignments.keySet()) {
for (HRegionInfo region : currentAssignments.get(sn)) {
inputForTest.put(region, sn);
}
}
//verify region->null server assignment is handled
inputForTest.put(randomRegions(1).get(0), null);
Map<ServerName, List<HRegionInfo>> newAssignment = loadBalancer
.retainAssignment(inputForTest, servers);
assertRetainedAssignment(inputForTest, servers, newAssignment);
}
/**
* Asserts a valid retained assignment plan.
* <p>
* Must meet the following conditions:
* <ul>
* <li>Every input region has an assignment, and to an online server
* <li>If a region had an existing assignment to a server with the same
* address a a currently online server, it will be assigned to it
* </ul>
*
* @param existing
* @param assignment
* @throws java.io.IOException
* @throws java.io.FileNotFoundException
*/
private void assertRetainedAssignment(
Map<HRegionInfo, ServerName> existing, List<ServerName> servers,
Map<ServerName, List<HRegionInfo>> assignment)
throws FileNotFoundException, IOException {
// Verify condition 1, every region assigned, and to online server
Set<ServerName> onlineServerSet = new TreeSet<ServerName>(servers);
Set<HRegionInfo> assignedRegions = new TreeSet<HRegionInfo>();
for (Map.Entry<ServerName, List<HRegionInfo>> a : assignment.entrySet()) {
assertTrue(
"Region assigned to server that was not listed as online",
onlineServerSet.contains(a.getKey()));
for (HRegionInfo r : a.getValue())
assignedRegions.add(r);
}
assertEquals(existing.size(), assignedRegions.size());
// Verify condition 2, every region must be assigned to correct server.
Set<String> onlineHostNames = new TreeSet<String>();
for (ServerName s : servers) {
onlineHostNames.add(s.getHostname());
}
for (Map.Entry<ServerName, List<HRegionInfo>> a : assignment.entrySet()) {
ServerName currentServer = a.getKey();
for (HRegionInfo r : a.getValue()) {
ServerName oldAssignedServer = existing.get(r);
TableName tableName = r.getTable();
String groupName =
getMockedGroupInfoManager().getRSGroupOfTable(tableName);
assertTrue(StringUtils.isNotEmpty(groupName));
RSGroupInfo gInfo = getMockedGroupInfoManager().getRSGroup(
groupName);
assertTrue(
"Region is not correctly assigned to group servers.",
gInfo.containsServer(currentServer.getHostPort()));
if (oldAssignedServer != null
&& onlineHostNames.contains(oldAssignedServer
.getHostname())) {
// this region was previously assigned somewhere, and that
// host is still around, then the host must have been is a
// different group.
if (!oldAssignedServer.getHostPort().equals(currentServer.getHostPort())) {
assertFalse(gInfo.containsServer(oldAssignedServer.getHostPort()));
}
}
}
}
}
private String printStats(
ArrayListMultimap<String, ServerAndLoad> groupBasedLoad) {
StringBuffer sb = new StringBuffer();
sb.append("\n");
for (String groupName : groupBasedLoad.keySet()) {
sb.append("Stats for group: " + groupName);
sb.append("\n");
sb.append(groupMap.get(groupName).getServers());
sb.append("\n");
List<ServerAndLoad> groupLoad = groupBasedLoad.get(groupName);
int numServers = groupLoad.size();
int totalRegions = 0;
sb.append("Per Server Load: \n");
for (ServerAndLoad sLoad : groupLoad) {
sb.append("Server :" + sLoad.getServerName() + " Load : "
+ sLoad.getLoad() + "\n");
totalRegions += sLoad.getLoad();
}
sb.append(" Group Statistics : \n");
float average = (float) totalRegions / numServers;
int max = (int) Math.ceil(average);
int min = (int) Math.floor(average);
sb.append("[srvr=" + numServers + " rgns=" + totalRegions + " avg="
+ average + " max=" + max + " min=" + min + "]");
sb.append("\n");
sb.append("===============================");
sb.append("\n");
}
return sb.toString();
}
private ArrayListMultimap<String, ServerAndLoad> convertToGroupBasedMap(
final Map<ServerName, List<HRegionInfo>> serversMap) throws IOException {
ArrayListMultimap<String, ServerAndLoad> loadMap = ArrayListMultimap
.create();
for (RSGroupInfo gInfo : getMockedGroupInfoManager().listRSGroups()) {
Set<HostAndPort> groupServers = gInfo.getServers();
for (HostAndPort hostPort : groupServers) {
ServerName actual = null;
for(ServerName entry: servers) {
if(entry.getHostPort().equals(hostPort)) {
actual = entry;
break;
}
}
List<HRegionInfo> regions = serversMap.get(actual);
assertTrue("No load for " + actual, regions != null);
loadMap.put(gInfo.getName(),
new ServerAndLoad(actual, regions.size()));
}
}
return loadMap;
}
private ArrayListMultimap<String, ServerAndLoad> reconcile(
ArrayListMultimap<String, ServerAndLoad> previousLoad,
List<RegionPlan> plans) {
ArrayListMultimap<String, ServerAndLoad> result = ArrayListMultimap
.create();
result.putAll(previousLoad);
if (plans != null) {
for (RegionPlan plan : plans) {
ServerName source = plan.getSource();
updateLoad(result, source, -1);
ServerName destination = plan.getDestination();
updateLoad(result, destination, +1);
}
}
return result;
}
private void updateLoad(
ArrayListMultimap<String, ServerAndLoad> previousLoad,
final ServerName sn, final int diff) {
for (String groupName : previousLoad.keySet()) {
ServerAndLoad newSAL = null;
ServerAndLoad oldSAL = null;
for (ServerAndLoad sal : previousLoad.get(groupName)) {
if (ServerName.isSameHostnameAndPort(sn, sal.getServerName())) {
oldSAL = sal;
newSAL = new ServerAndLoad(sn, sal.getLoad() + diff);
break;
}
}
if (newSAL != null) {
previousLoad.remove(groupName, oldSAL);
previousLoad.put(groupName, newSAL);
break;
}
}
}
private Map<ServerName, List<HRegionInfo>> mockClusterServers() throws IOException {
assertTrue(servers.size() == regionAssignment.length);
Map<ServerName, List<HRegionInfo>> assignment = new TreeMap<ServerName, List<HRegionInfo>>();
for (int i = 0; i < servers.size(); i++) {
int numRegions = regionAssignment[i];
List<HRegionInfo> regions = assignedRegions(numRegions, servers.get(i));
assignment.put(servers.get(i), regions);
}
return assignment;
}
/**
* Generate a list of regions evenly distributed between the tables.
*
* @param numRegions The number of regions to be generated.
* @return List of HRegionInfo.
*/
private List<HRegionInfo> randomRegions(int numRegions) {
List<HRegionInfo> regions = new ArrayList<HRegionInfo>(numRegions);
byte[] start = new byte[16];
byte[] end = new byte[16];
rand.nextBytes(start);
rand.nextBytes(end);
int regionIdx = rand.nextInt(tables.length);
for (int i = 0; i < numRegions; i++) {
Bytes.putInt(start, 0, numRegions << 1);
Bytes.putInt(end, 0, (numRegions << 1) + 1);
int tableIndex = (i + regionIdx) % tables.length;
HRegionInfo hri = new HRegionInfo(
tables[tableIndex], start, end, false, regionId++);
regions.add(hri);
}
return regions;
}
/**
* Generate assigned regions to a given server using group information.
*
* @param numRegions the num regions to generate
* @param sn the servername
* @return the list of regions
* @throws java.io.IOException Signals that an I/O exception has occurred.
*/
private List<HRegionInfo> assignedRegions(int numRegions, ServerName sn) throws IOException {
List<HRegionInfo> regions = new ArrayList<HRegionInfo>(numRegions);
byte[] start = new byte[16];
byte[] end = new byte[16];
Bytes.putInt(start, 0, numRegions << 1);
Bytes.putInt(end, 0, (numRegions << 1) + 1);
for (int i = 0; i < numRegions; i++) {
TableName tableName = getTableName(sn);
HRegionInfo hri = new HRegionInfo(
tableName, start, end, false,
regionId++);
regions.add(hri);
}
return regions;
}
private static List<ServerName> generateServers(int numServers) {
List<ServerName> servers = new ArrayList<ServerName>(numServers);
for (int i = 0; i < numServers; i++) {
String host = "server" + rand.nextInt(100000);
int port = rand.nextInt(60000);
servers.add(ServerName.valueOf(host, port, -1));
}
return servers;
}
/**
* Construct group info, with each group having at least one server.
*
* @param servers the servers
* @param groups the groups
* @return the map
*/
private static Map<String, RSGroupInfo> constructGroupInfo(
List<ServerName> servers, String[] groups) {
assertTrue(servers != null);
assertTrue(servers.size() >= groups.length);
int index = 0;
Map<String, RSGroupInfo> groupMap = new HashMap<String, RSGroupInfo>();
for (String grpName : groups) {
RSGroupInfo RSGroupInfo = new RSGroupInfo(grpName);
RSGroupInfo.addServer(servers.get(index).getHostPort());
groupMap.put(grpName, RSGroupInfo);
index++;
}
while (index < servers.size()) {
int grpIndex = rand.nextInt(groups.length);
groupMap.get(groups[grpIndex]).addServer(
servers.get(index).getHostPort());
index++;
}
return groupMap;
}
/**
* Construct table descriptors evenly distributed between the groups.
*
* @return the list
*/
private static List<HTableDescriptor> constructTableDesc() {
List<HTableDescriptor> tds = Lists.newArrayList();
int index = rand.nextInt(groups.length);
for (int i = 0; i < tables.length; i++) {
HTableDescriptor htd = new HTableDescriptor(tables[i]);
int grpIndex = (i + index) % groups.length ;
String groupName = groups[grpIndex];
tableMap.put(tables[i], groupName);
tds.add(htd);
}
return tds;
}
private static MasterServices getMockedMaster() throws IOException {
TableDescriptors tds = Mockito.mock(TableDescriptors.class);
Mockito.when(tds.get(tables[0])).thenReturn(tableDescs.get(0));
Mockito.when(tds.get(tables[1])).thenReturn(tableDescs.get(1));
Mockito.when(tds.get(tables[2])).thenReturn(tableDescs.get(2));
Mockito.when(tds.get(tables[3])).thenReturn(tableDescs.get(3));
MasterServices services = Mockito.mock(HMaster.class);
Mockito.when(services.getTableDescriptors()).thenReturn(tds);
AssignmentManager am = Mockito.mock(AssignmentManager.class);
Mockito.when(services.getAssignmentManager()).thenReturn(am);
return services;
}
private static RSGroupInfoManager getMockedGroupInfoManager() throws IOException {
RSGroupInfoManager gm = Mockito.mock(RSGroupInfoManager.class);
Mockito.when(gm.getRSGroup(groups[0])).thenReturn(
groupMap.get(groups[0]));
Mockito.when(gm.getRSGroup(groups[1])).thenReturn(
groupMap.get(groups[1]));
Mockito.when(gm.getRSGroup(groups[2])).thenReturn(
groupMap.get(groups[2]));
Mockito.when(gm.getRSGroup(groups[3])).thenReturn(
groupMap.get(groups[3]));
Mockito.when(gm.listRSGroups()).thenReturn(
Lists.newLinkedList(groupMap.values()));
Mockito.when(gm.isOnline()).thenReturn(true);
Mockito.when(gm.getRSGroupOfTable(Mockito.any(TableName.class)))
.thenAnswer(new Answer<String>() {
@Override
public String answer(InvocationOnMock invocation) throws Throwable {
return tableMap.get(invocation.getArguments()[0]);
}
});
return gm;
}
private TableName getTableName(ServerName sn) throws IOException {
TableName tableName = null;
RSGroupInfoManager gm = getMockedGroupInfoManager();
RSGroupInfo groupOfServer = null;
for(RSGroupInfo gInfo : gm.listRSGroups()){
if(gInfo.containsServer(sn.getHostPort())){
groupOfServer = gInfo;
break;
}
}
for(HTableDescriptor desc : tableDescs){
if(gm.getRSGroupOfTable(desc.getTableName()).endsWith(groupOfServer.getName())){
tableName = desc.getTableName();
}
}
return tableName;
}
}

View File

@ -0,0 +1,287 @@
/**
* Copyright The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.rsgroup;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import com.google.common.net.HostAndPort;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Waiter;
import org.apache.hadoop.hbase.Waiter.Predicate;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.master.ServerManager;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import javax.management.MBeanServer;
import javax.management.ObjectName;
import java.io.IOException;
import java.lang.management.ManagementFactory;
import java.util.Iterator;
import java.util.List;
import java.util.concurrent.atomic.AtomicReference;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
@Category({MediumTests.class})
public class TestRSGroups extends TestRSGroupsBase {
protected static final Log LOG = LogFactory.getLog(TestRSGroups.class);
private static HMaster master;
private static boolean init = false;
private static RSGroupAdminEndpoint RSGroupAdminEndpoint;
@BeforeClass
public static void setUp() throws Exception {
TEST_UTIL = new HBaseTestingUtility();
TEST_UTIL.getConfiguration().set(
HConstants.HBASE_MASTER_LOADBALANCER_CLASS,
RSGroupBasedLoadBalancer.class.getName());
TEST_UTIL.getConfiguration().set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY,
RSGroupAdminEndpoint.class.getName());
TEST_UTIL.getConfiguration().setBoolean(
HConstants.ZOOKEEPER_USEMULTI,
true);
TEST_UTIL.startMiniCluster(NUM_SLAVES_BASE);
TEST_UTIL.getConfiguration().set(
ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART,
""+NUM_SLAVES_BASE);
admin = TEST_UTIL.getHBaseAdmin();
cluster = TEST_UTIL.getHBaseCluster();
master = ((MiniHBaseCluster)cluster).getMaster();
//wait for balancer to come online
TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
return master.isInitialized() &&
((RSGroupBasedLoadBalancer) master.getLoadBalancer()).isOnline();
}
});
admin.setBalancerRunning(false,true);
rsGroupAdmin = new VerifyingRSGroupAdminClient(rsGroupAdmin.newClient(TEST_UTIL.getConnection()),
TEST_UTIL.getConfiguration());
RSGroupAdminEndpoint =
master.getMasterCoprocessorHost().findCoprocessors(RSGroupAdminEndpoint.class).get(0);
}
@AfterClass
public static void tearDown() throws Exception {
TEST_UTIL.shutdownMiniCluster();
}
@Before
public void beforeMethod() throws Exception {
if(!init) {
init = true;
afterMethod();
}
}
@After
public void afterMethod() throws Exception {
deleteTableIfNecessary();
deleteNamespaceIfNecessary();
deleteGroups();
int missing = NUM_SLAVES_BASE - getNumServers();
LOG.info("Restoring servers: "+missing);
for(int i=0; i<missing; i++) {
((MiniHBaseCluster)cluster).startRegionServer();
}
rsGroupAdmin.addRSGroup("master");
ServerName masterServerName =
((MiniHBaseCluster)cluster).getMaster().getServerName();
try {
rsGroupAdmin.moveServers(
Sets.newHashSet(masterServerName.getHostPort()),
"master");
} catch (Exception ex) {
// ignore
}
TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
LOG.info("Waiting for cleanup to finish " + rsGroupAdmin.listRSGroups());
//Might be greater since moving servers back to default
//is after starting a server
return rsGroupAdmin.getRSGroupInfo(RSGroupInfo.DEFAULT_GROUP).getServers().size()
== NUM_SLAVES_BASE;
}
});
}
@Test
public void testBasicStartUp() throws IOException {
RSGroupInfo defaultInfo = rsGroupAdmin.getRSGroupInfo(RSGroupInfo.DEFAULT_GROUP);
assertEquals(4, defaultInfo.getServers().size());
// Assignment of root and meta regions.
int count = master.getAssignmentManager().getRegionStates().getRegionAssignments().size();
//3 meta,namespace, group
assertEquals(3, count);
}
@Test
public void testNamespaceCreateAndAssign() throws Exception {
LOG.info("testNamespaceCreateAndAssign");
String nsName = tablePrefix+"_foo";
final TableName tableName = TableName.valueOf(nsName, tablePrefix + "_testCreateAndAssign");
RSGroupInfo appInfo = addGroup(rsGroupAdmin, "appInfo", 1);
admin.createNamespace(NamespaceDescriptor.create(nsName)
.addConfiguration(RSGroupInfo.NAMESPACEDESC_PROP_GROUP, "appInfo").build());
final HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor("f"));
admin.createTable(desc);
//wait for created table to be assigned
TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
return getTableRegionMap().get(desc.getTableName()) != null;
}
});
ServerName targetServer =
ServerName.parseServerName(appInfo.getServers().iterator().next().toString());
AdminProtos.AdminService.BlockingInterface rs = admin.getConnection().getAdmin(targetServer);
//verify it was assigned to the right group
Assert.assertEquals(1, ProtobufUtil.getOnlineRegions(rs).size());
}
@Test
public void testDefaultNamespaceCreateAndAssign() throws Exception {
LOG.info("testDefaultNamespaceCreateAndAssign");
final byte[] tableName = Bytes.toBytes(tablePrefix + "_testCreateAndAssign");
admin.modifyNamespace(NamespaceDescriptor.create("default")
.addConfiguration(RSGroupInfo.NAMESPACEDESC_PROP_GROUP, "default").build());
final HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor("f"));
admin.createTable(desc);
//wait for created table to be assigned
TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
return getTableRegionMap().get(desc.getTableName()) != null;
}
});
}
@Test
public void testNamespaceConstraint() throws Exception {
String nsName = tablePrefix+"_foo";
String groupName = tablePrefix+"_foo";
LOG.info("testNamespaceConstraint");
rsGroupAdmin.addRSGroup(groupName);
admin.createNamespace(NamespaceDescriptor.create(nsName)
.addConfiguration(RSGroupInfo.NAMESPACEDESC_PROP_GROUP, groupName)
.build());
//test removing a referenced group
try {
rsGroupAdmin.removeRSGroup(groupName);
fail("Expected a constraint exception");
} catch (IOException ex) {
}
//test modify group
//changing with the same name is fine
admin.modifyNamespace(
NamespaceDescriptor.create(nsName)
.addConfiguration(RSGroupInfo.NAMESPACEDESC_PROP_GROUP, groupName)
.build());
String anotherGroup = tablePrefix+"_anotherGroup";
rsGroupAdmin.addRSGroup(anotherGroup);
//test add non-existent group
admin.deleteNamespace(nsName);
rsGroupAdmin.removeRSGroup(groupName);
try {
admin.createNamespace(NamespaceDescriptor.create(nsName)
.addConfiguration(RSGroupInfo.NAMESPACEDESC_PROP_GROUP, "foo")
.build());
fail("Expected a constraint exception");
} catch (IOException ex) {
}
}
@Test
public void testGroupInfoMultiAccessing() throws Exception {
RSGroupInfoManager manager = RSGroupAdminEndpoint.getGroupInfoManager();
final RSGroupInfo defaultGroup = manager.getRSGroup("default");
// getRSGroup updates default group's server list
// this process must not affect other threads iterating the list
Iterator<HostAndPort> it = defaultGroup.getServers().iterator();
manager.getRSGroup("default");
it.next();
}
@Test
public void testMisplacedRegions() throws Exception {
final TableName tableName = TableName.valueOf(tablePrefix+"_testMisplacedRegions");
LOG.info("testMisplacedRegions");
final RSGroupInfo RSGroupInfo = addGroup(rsGroupAdmin, "testMisplacedRegions", 1);
TEST_UTIL.createMultiRegionTable(tableName, new byte[]{'f'}, 15);
TEST_UTIL.waitUntilAllRegionsAssigned(tableName);
RSGroupAdminEndpoint.getGroupInfoManager()
.moveTables(Sets.newHashSet(tableName), RSGroupInfo.getName());
assertTrue(rsGroupAdmin.balanceRSGroup(RSGroupInfo.getName()));
TEST_UTIL.waitFor(60000, new Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
ServerName serverName =
ServerName.valueOf(RSGroupInfo.getServers().iterator().next().toString(), 1);
return admin.getConnection().getAdmin()
.getOnlineRegions(serverName).size() == 15;
}
});
}
}

View File

@ -0,0 +1,643 @@
/**
* Copyright The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.rsgroup;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.google.common.net.HostAndPort;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.HBaseCluster;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.RegionLoad;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Waiter;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.constraint.ConstraintException;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.Assert;
import org.junit.Test;
import java.io.IOException;
import java.security.SecureRandom;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
public abstract class TestRSGroupsBase {
protected static final Log LOG = LogFactory.getLog(TestRSGroupsBase.class);
//shared
protected final static String groupPrefix = "Group";
protected final static String tablePrefix = "Group";
protected final static SecureRandom rand = new SecureRandom();
//shared, cluster type specific
protected static HBaseTestingUtility TEST_UTIL;
protected static HBaseAdmin admin;
protected static HBaseCluster cluster;
protected static RSGroupAdmin rsGroupAdmin;
public final static long WAIT_TIMEOUT = 60000*5;
public final static int NUM_SLAVES_BASE = 4; //number of slaves for the smallest cluster
protected RSGroupInfo addGroup(RSGroupAdmin gAdmin, String groupName,
int serverCount) throws IOException, InterruptedException {
RSGroupInfo defaultInfo = gAdmin
.getRSGroupInfo(RSGroupInfo.DEFAULT_GROUP);
assertTrue(defaultInfo != null);
assertTrue(defaultInfo.getServers().size() >= serverCount);
gAdmin.addRSGroup(groupName);
Set<HostAndPort> set = new HashSet<HostAndPort>();
for(HostAndPort server: defaultInfo.getServers()) {
if(set.size() == serverCount) {
break;
}
set.add(server);
}
gAdmin.moveServers(set, groupName);
RSGroupInfo result = gAdmin.getRSGroupInfo(groupName);
assertTrue(result.getServers().size() >= serverCount);
return result;
}
static void removeGroup(RSGroupAdminClient groupAdmin, String groupName) throws IOException {
RSGroupInfo RSGroupInfo = groupAdmin.getRSGroupInfo(groupName);
groupAdmin.moveTables(RSGroupInfo.getTables(), RSGroupInfo.DEFAULT_GROUP);
groupAdmin.moveServers(RSGroupInfo.getServers(), RSGroupInfo.DEFAULT_GROUP);
groupAdmin.removeRSGroup(groupName);
}
protected void deleteTableIfNecessary() throws IOException {
for (HTableDescriptor desc : TEST_UTIL.getHBaseAdmin().listTables(tablePrefix+".*")) {
TEST_UTIL.deleteTable(desc.getTableName());
}
}
protected void deleteNamespaceIfNecessary() throws IOException {
for (NamespaceDescriptor desc : TEST_UTIL.getHBaseAdmin().listNamespaceDescriptors()) {
if(desc.getName().startsWith(tablePrefix)) {
admin.deleteNamespace(desc.getName());
}
}
}
protected void deleteGroups() throws IOException {
RSGroupAdmin groupAdmin = rsGroupAdmin.newClient(TEST_UTIL.getConnection());
for(RSGroupInfo group: groupAdmin.listRSGroups()) {
if(!group.getName().equals(RSGroupInfo.DEFAULT_GROUP)) {
groupAdmin.moveTables(group.getTables(), RSGroupInfo.DEFAULT_GROUP);
groupAdmin.moveServers(group.getServers(), RSGroupInfo.DEFAULT_GROUP);
groupAdmin.removeRSGroup(group.getName());
}
}
}
public Map<TableName, List<String>> getTableRegionMap() throws IOException {
Map<TableName, List<String>> map = Maps.newTreeMap();
Map<TableName, Map<ServerName, List<String>>> tableServerRegionMap
= getTableServerRegionMap();
for(TableName tableName : tableServerRegionMap.keySet()) {
if(!map.containsKey(tableName)) {
map.put(tableName, new LinkedList<String>());
}
for(List<String> subset: tableServerRegionMap.get(tableName).values()) {
map.get(tableName).addAll(subset);
}
}
return map;
}
public Map<TableName, Map<ServerName, List<String>>> getTableServerRegionMap()
throws IOException {
Map<TableName, Map<ServerName, List<String>>> map = Maps.newTreeMap();
ClusterStatus status = TEST_UTIL.getHBaseClusterInterface().getClusterStatus();
for(ServerName serverName : status.getServers()) {
for(RegionLoad rl : status.getLoad(serverName).getRegionsLoad().values()) {
TableName tableName = HRegionInfo.getTable(rl.getName());
if(!map.containsKey(tableName)) {
map.put(tableName, new TreeMap<ServerName, List<String>>());
}
if(!map.get(tableName).containsKey(serverName)) {
map.get(tableName).put(serverName, new LinkedList<String>());
}
map.get(tableName).get(serverName).add(rl.getNameAsString());
}
}
return map;
}
@Test
public void testBogusArgs() throws Exception {
assertNull(rsGroupAdmin.getRSGroupInfoOfTable(TableName.valueOf("nonexistent")));
assertNull(rsGroupAdmin.getRSGroupOfServer(HostAndPort.fromParts("bogus",123)));
assertNull(rsGroupAdmin.getRSGroupInfo("bogus"));
try {
rsGroupAdmin.removeRSGroup("bogus");
fail("Expected removing bogus group to fail");
} catch(ConstraintException ex) {
//expected
}
try {
rsGroupAdmin.moveTables(Sets.newHashSet(TableName.valueOf("bogustable")), "bogus");
fail("Expected move with bogus group to fail");
} catch(ConstraintException ex) {
//expected
}
try {
rsGroupAdmin.moveServers(Sets.newHashSet(HostAndPort.fromParts("bogus",123)), "bogus");
fail("Expected move with bogus group to fail");
} catch(ConstraintException ex) {
//expected
}
try {
rsGroupAdmin.balanceRSGroup("bogus");
fail("Expected move with bogus group to fail");
} catch(ConstraintException ex) {
//expected
}
}
@Test
public void testCreateMultiRegion() throws IOException {
LOG.info("testCreateMultiRegion");
TableName tableName = TableName.valueOf(tablePrefix + "_testCreateMultiRegion");
byte[] end = {1,3,5,7,9};
byte[] start = {0,2,4,6,8};
byte[][] f = {Bytes.toBytes("f")};
TEST_UTIL.createTable(tableName, f,1,start,end,10);
}
@Test
public void testCreateAndDrop() throws Exception {
LOG.info("testCreateAndDrop");
final TableName tableName = TableName.valueOf(tablePrefix + "_testCreateAndDrop");
TEST_UTIL.createTable(tableName, Bytes.toBytes("cf"));
//wait for created table to be assigned
TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
return getTableRegionMap().get(tableName) != null;
}
});
TEST_UTIL.deleteTable(tableName);
}
@Test
public void testSimpleRegionServerMove() throws IOException,
InterruptedException {
LOG.info("testSimpleRegionServerMove");
int initNumGroups = rsGroupAdmin.listRSGroups().size();
RSGroupInfo appInfo = addGroup(rsGroupAdmin, getGroupName("testSimpleRegionServerMove"), 1);
RSGroupInfo adminInfo = addGroup(rsGroupAdmin, getGroupName("testSimpleRegionServerMove"), 1);
RSGroupInfo dInfo = rsGroupAdmin.getRSGroupInfo(RSGroupInfo.DEFAULT_GROUP);
Assert.assertEquals(initNumGroups + 2, rsGroupAdmin.listRSGroups().size());
assertEquals(1, adminInfo.getServers().size());
assertEquals(1, appInfo.getServers().size());
assertEquals(getNumServers() - 2, dInfo.getServers().size());
rsGroupAdmin.moveServers(appInfo.getServers(),
RSGroupInfo.DEFAULT_GROUP);
rsGroupAdmin.removeRSGroup(appInfo.getName());
rsGroupAdmin.moveServers(adminInfo.getServers(),
RSGroupInfo.DEFAULT_GROUP);
rsGroupAdmin.removeRSGroup(adminInfo.getName());
Assert.assertEquals(rsGroupAdmin.listRSGroups().size(), initNumGroups);
}
// return the real number of region servers, excluding the master embedded region server in 2.0+
public int getNumServers() throws IOException {
ClusterStatus status = admin.getClusterStatus();
ServerName master = status.getMaster();
int count = 0;
for (ServerName sn : status.getServers()) {
if (!sn.equals(master)) {
count++;
}
}
return count;
}
@Test
public void testMoveServers() throws Exception {
LOG.info("testMoveServers");
//create groups and assign servers
addGroup(rsGroupAdmin, "bar", 3);
rsGroupAdmin.addRSGroup("foo");
RSGroupInfo barGroup = rsGroupAdmin.getRSGroupInfo("bar");
RSGroupInfo fooGroup = rsGroupAdmin.getRSGroupInfo("foo");
assertEquals(3, barGroup.getServers().size());
assertEquals(0, fooGroup.getServers().size());
//test fail bogus server move
try {
rsGroupAdmin.moveServers(Sets.newHashSet(HostAndPort.fromString("foo:9999")),"foo");
fail("Bogus servers shouldn't have been successfully moved.");
} catch(IOException ex) {
String exp = "Server foo:9999 does not have a group.";
String msg = "Expected '"+exp+"' in exception message: ";
assertTrue(msg+" "+ex.getMessage(), ex.getMessage().contains(exp));
}
//test success case
LOG.info("moving servers "+barGroup.getServers()+" to group foo");
rsGroupAdmin.moveServers(barGroup.getServers(), fooGroup.getName());
barGroup = rsGroupAdmin.getRSGroupInfo("bar");
fooGroup = rsGroupAdmin.getRSGroupInfo("foo");
assertEquals(0,barGroup.getServers().size());
assertEquals(3,fooGroup.getServers().size());
LOG.info("moving servers "+fooGroup.getServers()+" to group default");
rsGroupAdmin.moveServers(fooGroup.getServers(), RSGroupInfo.DEFAULT_GROUP);
TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
return getNumServers() ==
rsGroupAdmin.getRSGroupInfo(RSGroupInfo.DEFAULT_GROUP).getServers().size();
}
});
fooGroup = rsGroupAdmin.getRSGroupInfo("foo");
assertEquals(0,fooGroup.getServers().size());
//test group removal
LOG.info("Remove group "+barGroup.getName());
rsGroupAdmin.removeRSGroup(barGroup.getName());
Assert.assertEquals(null, rsGroupAdmin.getRSGroupInfo(barGroup.getName()));
LOG.info("Remove group "+fooGroup.getName());
rsGroupAdmin.removeRSGroup(fooGroup.getName());
Assert.assertEquals(null, rsGroupAdmin.getRSGroupInfo(fooGroup.getName()));
}
@Test
public void testTableMoveTruncateAndDrop() throws Exception {
LOG.info("testTableMove");
final TableName tableName = TableName.valueOf(tablePrefix + "_testTableMoveAndDrop");
final byte[] familyNameBytes = Bytes.toBytes("f");
String newGroupName = getGroupName("testTableMove");
final RSGroupInfo newGroup = addGroup(rsGroupAdmin, newGroupName, 2);
TEST_UTIL.createMultiRegionTable(tableName, familyNameBytes, 5);
TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
List<String> regions = getTableRegionMap().get(tableName);
if (regions == null)
return false;
return getTableRegionMap().get(tableName).size() >= 5;
}
});
RSGroupInfo tableGrp = rsGroupAdmin.getRSGroupInfoOfTable(tableName);
assertTrue(tableGrp.getName().equals(RSGroupInfo.DEFAULT_GROUP));
//change table's group
LOG.info("Moving table "+tableName+" to "+newGroup.getName());
rsGroupAdmin.moveTables(Sets.newHashSet(tableName), newGroup.getName());
//verify group change
Assert.assertEquals(newGroup.getName(),
rsGroupAdmin.getRSGroupInfoOfTable(tableName).getName());
TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
Map<ServerName, List<String>> serverMap = getTableServerRegionMap().get(tableName);
int count = 0;
if (serverMap != null) {
for (ServerName rs : serverMap.keySet()) {
if (newGroup.containsServer(rs.getHostPort())) {
count += serverMap.get(rs).size();
}
}
}
return count == 5;
}
});
//test truncate
admin.disableTable(tableName);
admin.truncateTable(tableName, true);
Assert.assertEquals(1, rsGroupAdmin.getRSGroupInfo(newGroup.getName()).getTables().size());
Assert.assertEquals(tableName, rsGroupAdmin.getRSGroupInfo(
newGroup.getName()).getTables().first());
//verify removed table is removed from group
TEST_UTIL.deleteTable(tableName);
Assert.assertEquals(0, rsGroupAdmin.getRSGroupInfo(newGroup.getName()).getTables().size());
}
@Test
public void testGroupBalance() throws Exception {
LOG.info("testGroupBalance");
String newGroupName = getGroupName("testGroupBalance");
final RSGroupInfo newGroup = addGroup(rsGroupAdmin, newGroupName, 3);
final TableName tableName = TableName.valueOf(tablePrefix+"_ns", "testGroupBalance");
admin.createNamespace(
NamespaceDescriptor.create(tableName.getNamespaceAsString())
.addConfiguration(RSGroupInfo.NAMESPACEDESC_PROP_GROUP, newGroupName).build());
final byte[] familyNameBytes = Bytes.toBytes("f");
final HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor("f"));
byte [] startKey = Bytes.toBytes("aaaaa");
byte [] endKey = Bytes.toBytes("zzzzz");
admin.createTable(desc, startKey, endKey, 6);
TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
List<String> regions = getTableRegionMap().get(tableName);
if (regions == null) {
return false;
}
return regions.size() >= 6;
}
});
//make assignment uneven, move all regions to one server
Map<ServerName,List<String>> assignMap =
getTableServerRegionMap().get(tableName);
final ServerName first = assignMap.entrySet().iterator().next().getKey();
for(HRegionInfo region: admin.getTableRegions(tableName)) {
if(!assignMap.get(first).contains(region)) {
admin.move(region.getEncodedNameAsBytes(), Bytes.toBytes(first.getServerName()));
}
}
TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
Map<ServerName, List<String>> map = getTableServerRegionMap().get(tableName);
if (map == null) {
return true;
}
List<String> regions = map.get(first);
if (regions == null) {
return true;
}
return regions.size() >= 6;
}
});
//balance the other group and make sure it doesn't affect the new group
rsGroupAdmin.balanceRSGroup(RSGroupInfo.DEFAULT_GROUP);
assertEquals(6, getTableServerRegionMap().get(tableName).get(first).size());
rsGroupAdmin.balanceRSGroup(newGroupName);
TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
for (List<String> regions : getTableServerRegionMap().get(tableName).values()) {
if (2 != regions.size()) {
return false;
}
}
return true;
}
});
}
@Test
public void testRegionMove() throws Exception {
LOG.info("testRegionMove");
final RSGroupInfo newGroup = addGroup(rsGroupAdmin, getGroupName("testRegionMove"), 1);
final TableName tableName = TableName.valueOf(tablePrefix + rand.nextInt());
final byte[] familyNameBytes = Bytes.toBytes("f");
// All the regions created below will be assigned to the default group.
TEST_UTIL.createMultiRegionTable(tableName, familyNameBytes, 6);
TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
List<String> regions = getTableRegionMap().get(tableName);
if (regions == null)
return false;
return getTableRegionMap().get(tableName).size() >= 6;
}
});
//get target region to move
Map<ServerName,List<String>> assignMap =
getTableServerRegionMap().get(tableName);
String targetRegion = null;
for(ServerName server : assignMap.keySet()) {
targetRegion = assignMap.get(server).size() > 0 ? assignMap.get(server).get(0) : null;
if(targetRegion != null) {
break;
}
}
//get server which is not a member of new group
ServerName targetServer = null;
for(ServerName server : admin.getClusterStatus().getServers()) {
if(!newGroup.containsServer(server.getHostPort())) {
targetServer = server;
break;
}
}
final AdminProtos.AdminService.BlockingInterface targetRS =
admin.getConnection().getAdmin(targetServer);
//move target server to group
rsGroupAdmin.moveServers(Sets.newHashSet(targetServer.getHostPort()),
newGroup.getName());
TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
return ProtobufUtil.getOnlineRegions(targetRS).size() <= 0;
}
});
// Lets move this region to the new group.
TEST_UTIL.getHBaseAdmin().move(Bytes.toBytes(HRegionInfo.encodeRegionName(Bytes.toBytes(targetRegion))),
Bytes.toBytes(targetServer.getServerName()));
TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
return
getTableRegionMap().get(tableName) != null &&
getTableRegionMap().get(tableName).size() == 6 &&
admin.getClusterStatus().getRegionsInTransition().size() < 1;
}
});
//verify that targetServer didn't open it
assertFalse(ProtobufUtil.getOnlineRegions(targetRS).contains(targetRegion));
}
@Test
public void testFailRemoveGroup() throws IOException, InterruptedException {
LOG.info("testFailRemoveGroup");
int initNumGroups = rsGroupAdmin.listRSGroups().size();
addGroup(rsGroupAdmin, "bar", 3);
TableName tableName = TableName.valueOf(tablePrefix+"_my_table");
TEST_UTIL.createTable(tableName, Bytes.toBytes("f"));
rsGroupAdmin.moveTables(Sets.newHashSet(tableName), "bar");
RSGroupInfo barGroup = rsGroupAdmin.getRSGroupInfo("bar");
//group is not empty therefore it should fail
try {
rsGroupAdmin.removeRSGroup(barGroup.getName());
fail("Expected remove group to fail");
} catch(IOException e) {
}
//group cannot lose all it's servers therefore it should fail
try {
rsGroupAdmin.moveServers(barGroup.getServers(), RSGroupInfo.DEFAULT_GROUP);
fail("Expected move servers to fail");
} catch(IOException e) {
}
rsGroupAdmin.moveTables(barGroup.getTables(), RSGroupInfo.DEFAULT_GROUP);
try {
rsGroupAdmin.removeRSGroup(barGroup.getName());
fail("Expected move servers to fail");
} catch(IOException e) {
}
rsGroupAdmin.moveServers(barGroup.getServers(), RSGroupInfo.DEFAULT_GROUP);
rsGroupAdmin.removeRSGroup(barGroup.getName());
Assert.assertEquals(initNumGroups, rsGroupAdmin.listRSGroups().size());
}
@Test
public void testKillRS() throws Exception {
LOG.info("testKillRS");
RSGroupInfo appInfo = addGroup(rsGroupAdmin, "appInfo", 1);
final TableName tableName = TableName.valueOf(tablePrefix+"_ns", "_testKillRS");
admin.createNamespace(
NamespaceDescriptor.create(tableName.getNamespaceAsString())
.addConfiguration(RSGroupInfo.NAMESPACEDESC_PROP_GROUP, appInfo.getName()).build());
final HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor("f"));
admin.createTable(desc);
//wait for created table to be assigned
TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
return getTableRegionMap().get(desc.getTableName()) != null;
}
});
ServerName targetServer = ServerName.parseServerName(
appInfo.getServers().iterator().next().toString());
AdminProtos.AdminService.BlockingInterface targetRS =
admin.getConnection().getAdmin(targetServer);
HRegionInfo targetRegion = ProtobufUtil.getOnlineRegions(targetRS).get(0);
Assert.assertEquals(1, ProtobufUtil.getOnlineRegions(targetRS).size());
try {
//stopping may cause an exception
//due to the connection loss
targetRS.stopServer(null,
AdminProtos.StopServerRequest.newBuilder().setReason("Die").build());
} catch(Exception e) {
}
assertFalse(cluster.getClusterStatus().getServers().contains(targetServer));
//wait for created table to be assigned
TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
return cluster.getClusterStatus().getRegionsInTransition().size() == 0;
}
});
Set<HostAndPort> newServers = Sets.newHashSet();
newServers.add(
rsGroupAdmin.getRSGroupInfo(RSGroupInfo.DEFAULT_GROUP).getServers().iterator().next());
rsGroupAdmin.moveServers(newServers, appInfo.getName());
//Make sure all the table's regions get reassigned
//disabling the table guarantees no conflicting assign/unassign (ie SSH) happens
admin.disableTable(tableName);
admin.enableTable(tableName);
//wait for region to be assigned
TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
return cluster.getClusterStatus().getRegionsInTransition().size() == 0;
}
});
targetServer = ServerName.parseServerName(
newServers.iterator().next().toString());
targetRS =
admin.getConnection().getAdmin(targetServer);
Assert.assertEquals(1, ProtobufUtil.getOnlineRegions(targetRS).size());
Assert.assertEquals(tableName,
ProtobufUtil.getOnlineRegions(targetRS).get(0).getTable());
}
@Test
public void testValidGroupNames() throws IOException {
String[] badNames = {"foo*","foo@","-"};
String[] goodNames = {"foo_123"};
for(String entry: badNames) {
try {
rsGroupAdmin.addRSGroup(entry);
fail("Expected a constraint exception for: "+entry);
} catch(ConstraintException ex) {
//expected
}
}
for(String entry: goodNames) {
rsGroupAdmin.addRSGroup(entry);
}
}
private String getGroupName(String baseName) {
return groupPrefix+"_"+baseName+"_"+rand.nextInt(Integer.MAX_VALUE);
}
}

View File

@ -0,0 +1,187 @@
/**
* Copyright The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.rsgroup;
import com.google.common.collect.Sets;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HBaseCluster;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Waiter;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.ServerManager;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
//This tests that GroupBasedBalancer will use data in zk
//to do balancing during master startup
//This does not test retain assignment
@Category(MediumTests.class)
public class TestRSGroupsOfflineMode {
private static final org.apache.commons.logging.Log LOG =
LogFactory.getLog(TestRSGroupsOfflineMode.class);
private static HMaster master;
private static HBaseAdmin hbaseAdmin;
private static HBaseTestingUtility TEST_UTIL;
private static HBaseCluster cluster;
private static RSGroupAdminEndpoint RSGroupAdminEndpoint;
public final static long WAIT_TIMEOUT = 60000*5;
@BeforeClass
public static void setUp() throws Exception {
TEST_UTIL = new HBaseTestingUtility();
TEST_UTIL.getConfiguration().set(
HConstants.HBASE_MASTER_LOADBALANCER_CLASS,
RSGroupBasedLoadBalancer.class.getName());
TEST_UTIL.getConfiguration().set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY,
RSGroupAdminEndpoint.class.getName());
TEST_UTIL.getConfiguration().set(
ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART,
"1");
TEST_UTIL.startMiniCluster(2, 3);
cluster = TEST_UTIL.getHBaseCluster();
master = ((MiniHBaseCluster)cluster).getMaster();
master.balanceSwitch(false);
hbaseAdmin = TEST_UTIL.getHBaseAdmin();
//wait till the balancer is in online mode
TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
return master.isInitialized() &&
((RSGroupBasedLoadBalancer) master.getLoadBalancer()).isOnline() &&
master.getServerManager().getOnlineServersList().size() >= 3;
}
});
RSGroupAdminEndpoint =
master.getMasterCoprocessorHost().findCoprocessors(RSGroupAdminEndpoint.class).get(0);
}
@AfterClass
public static void tearDown() throws Exception {
TEST_UTIL.shutdownMiniCluster();
}
@Test
public void testOffline() throws Exception, InterruptedException {
//table should be after group table name
//so it gets assigned later
final TableName failoverTable = TableName.valueOf("testOffline");
TEST_UTIL.createTable(failoverTable, Bytes.toBytes("f"));
RSGroupAdmin groupAdmin = RSGroupAdmin.newClient(TEST_UTIL.getConnection());
final HRegionServer killRS = ((MiniHBaseCluster)cluster).getRegionServer(0);
final HRegionServer groupRS = ((MiniHBaseCluster)cluster).getRegionServer(1);
final HRegionServer failoverRS = ((MiniHBaseCluster)cluster).getRegionServer(2);
String newGroup = "my_group";
groupAdmin.addRSGroup(newGroup);
if(master.getAssignmentManager().getRegionStates().getRegionAssignments()
.containsValue(failoverRS.getServerName())) {
for(HRegionInfo regionInfo: hbaseAdmin.getOnlineRegions(failoverRS.getServerName())) {
hbaseAdmin.move(regionInfo.getEncodedNameAsBytes(),
Bytes.toBytes(failoverRS.getServerName().getServerName()));
}
LOG.info("Waiting for region unassignments on failover RS...");
TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
return master.getServerManager().getLoad(failoverRS.getServerName())
.getRegionsLoad().size() > 0;
}
});
}
//move server to group and make sure all tables are assigned
groupAdmin.moveServers(Sets.newHashSet(groupRS.getServerName().getHostPort()), newGroup);
TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
return groupRS.getNumberOfOnlineRegions() < 1 &&
master.getAssignmentManager().getRegionStates().getRegionsInTransition().size() < 1;
}
});
//move table to group and wait
groupAdmin.moveTables(Sets.newHashSet(RSGroupInfoManager.RSGROUP_TABLE_NAME), newGroup);
LOG.info("Waiting for move table...");
TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
return groupRS.getNumberOfOnlineRegions() == 1;
}
});
groupRS.stop("die");
//race condition here
TEST_UTIL.getHBaseCluster().getMaster().stopMaster();
LOG.info("Waiting for offline mode...");
TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
return TEST_UTIL.getHBaseCluster().getMaster() != null &&
TEST_UTIL.getHBaseCluster().getMaster().isActiveMaster() &&
TEST_UTIL.getHBaseCluster().getMaster().isInitialized() &&
TEST_UTIL.getHBaseCluster().getMaster().getServerManager().getOnlineServers().size()
<= 3;
}
});
RSGroupInfoManager groupMgr = RSGroupAdminEndpoint.getGroupInfoManager();
//make sure balancer is in offline mode, since this is what we're testing
assertFalse(groupMgr.isOnline());
//verify the group affiliation that's loaded from ZK instead of tables
assertEquals(newGroup,
groupMgr.getRSGroupOfTable(RSGroupInfoManager.RSGROUP_TABLE_NAME));
assertEquals(RSGroupInfo.DEFAULT_GROUP, groupMgr.getRSGroupOfTable(failoverTable));
//kill final regionserver to see the failover happens for all tables
//except GROUP table since it's group does not have any online RS
killRS.stop("die");
master = TEST_UTIL.getHBaseCluster().getMaster();
LOG.info("Waiting for new table assignment...");
TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
return failoverRS.getOnlineRegions(failoverTable).size() >= 1;
}
});
Assert.assertEquals(0, failoverRS.getOnlineRegions(RSGroupInfoManager.RSGROUP_TABLE_NAME).size());
//need this for minicluster to shutdown cleanly
master.stopMaster();
}
}

View File

@ -0,0 +1,149 @@
/**
* Copyright The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.rsgroup;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.google.common.net.HostAndPort;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
import org.apache.zookeeper.KeeperException;
import org.junit.Assert;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import java.util.Set;
public class VerifyingRSGroupAdminClient extends RSGroupAdmin {
private Table table;
private ZooKeeperWatcher zkw;
private RSGroupSerDe serDe;
private RSGroupAdmin wrapped;
public VerifyingRSGroupAdminClient(RSGroupAdmin RSGroupAdmin, Configuration conf)
throws IOException {
wrapped = RSGroupAdmin;
table = ConnectionFactory.createConnection(conf).getTable(RSGroupInfoManager.RSGROUP_TABLE_NAME);
zkw = new ZooKeeperWatcher(conf, this.getClass().getSimpleName(), null);
serDe = new RSGroupSerDe();
}
@Override
public void addRSGroup(String groupName) throws IOException {
wrapped.addRSGroup(groupName);
verify();
}
@Override
public RSGroupInfo getRSGroupInfo(String groupName) throws IOException {
return wrapped.getRSGroupInfo(groupName);
}
@Override
public RSGroupInfo getRSGroupInfoOfTable(TableName tableName) throws IOException {
return wrapped.getRSGroupInfoOfTable(tableName);
}
@Override
public void moveServers(Set<HostAndPort> servers, String targetGroup) throws IOException {
wrapped.moveServers(servers, targetGroup);
verify();
}
@Override
public void moveTables(Set<TableName> tables, String targetGroup) throws IOException {
wrapped.moveTables(tables, targetGroup);
verify();
}
@Override
public void removeRSGroup(String name) throws IOException {
wrapped.removeRSGroup(name);
verify();
}
@Override
public boolean balanceRSGroup(String name) throws IOException {
return wrapped.balanceRSGroup(name);
}
@Override
public List<RSGroupInfo> listRSGroups() throws IOException {
return wrapped.listRSGroups();
}
@Override
public RSGroupInfo getRSGroupOfServer(HostAndPort hostPort) throws IOException {
return wrapped.getRSGroupOfServer(hostPort);
}
public void verify() throws IOException {
Map<String, RSGroupInfo> groupMap = Maps.newHashMap();
Set<RSGroupInfo> zList = Sets.newHashSet();
for (Result result : table.getScanner(new Scan())) {
RSGroupProtos.RSGroupInfo proto =
RSGroupProtos.RSGroupInfo.parseFrom(
result.getValue(
RSGroupInfoManager.META_FAMILY_BYTES,
RSGroupInfoManager.META_QUALIFIER_BYTES));
groupMap.put(proto.getName(), ProtobufUtil.toGroupInfo(proto));
}
Assert.assertEquals(Sets.newHashSet(groupMap.values()),
Sets.newHashSet(wrapped.listRSGroups()));
try {
String groupBasePath = ZKUtil.joinZNode(zkw.baseZNode, "rsgroup");
for(String znode: ZKUtil.listChildrenNoWatch(zkw, groupBasePath)) {
byte[] data = ZKUtil.getData(zkw, ZKUtil.joinZNode(groupBasePath, znode));
if(data.length > 0) {
ProtobufUtil.expectPBMagicPrefix(data);
ByteArrayInputStream bis = new ByteArrayInputStream(
data, ProtobufUtil.lengthOfPBMagic(), data.length);
zList.add(ProtobufUtil.toGroupInfo(RSGroupProtos.RSGroupInfo.parseFrom(bis)));
}
}
Assert.assertEquals(zList.size(), groupMap.size());
for(RSGroupInfo RSGroupInfo : zList) {
Assert.assertTrue(groupMap.get(RSGroupInfo.getName()).equals(RSGroupInfo));
}
} catch (KeeperException e) {
throw new IOException("ZK verification failed", e);
} catch (DeserializationException e) {
throw new IOException("ZK verification failed", e);
} catch (InterruptedException e) {
throw new IOException("ZK verification failed", e);
}
}
@Override
public void close() throws IOException {
}
}

View File

@ -30,30 +30,30 @@ AssignmentManager assignmentManager = null;
</%args> </%args>
<%import> <%import>
java.util.*; java.util.*;
org.apache.hadoop.util.StringUtils; org.apache.hadoop.hbase.HBaseConfiguration;
org.apache.hadoop.hbase.util.Bytes;
org.apache.hadoop.hbase.util.JvmVersion;
org.apache.hadoop.hbase.util.FSUtils;
org.apache.hadoop.hbase.master.HMaster;
org.apache.hadoop.hbase.master.AssignmentManager;
org.apache.hadoop.hbase.master.ServerManager;
org.apache.hadoop.hbase.HConstants; org.apache.hadoop.hbase.HConstants;
org.apache.hadoop.hbase.HRegionInfo;
org.apache.hadoop.hbase.HTableDescriptor;
org.apache.hadoop.hbase.NamespaceDescriptor; org.apache.hadoop.hbase.NamespaceDescriptor;
org.apache.hadoop.hbase.ServerLoad; org.apache.hadoop.hbase.ServerLoad;
org.apache.hadoop.hbase.ServerName; org.apache.hadoop.hbase.ServerName;
org.apache.hadoop.hbase.client.Admin;
org.apache.hadoop.hbase.HRegionInfo;
org.apache.hadoop.hbase.master.RegionState;
org.apache.hadoop.hbase.HTableDescriptor;
org.apache.hadoop.hbase.HBaseConfiguration;
org.apache.hadoop.hbase.TableName; org.apache.hadoop.hbase.TableName;
org.apache.hadoop.hbase.tool.Canary; org.apache.hadoop.hbase.client.Admin;
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; org.apache.hadoop.hbase.master.AssignmentManager;
org.apache.hadoop.hbase.master.DeadServer; org.apache.hadoop.hbase.master.DeadServer;
org.apache.hadoop.hbase.master.HMaster;
org.apache.hadoop.hbase.master.RegionState;
org.apache.hadoop.hbase.master.ServerManager;
org.apache.hadoop.hbase.protobuf.ProtobufUtil; org.apache.hadoop.hbase.protobuf.ProtobufUtil;
org.apache.hadoop.hbase.security.visibility.VisibilityConstants; org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
org.apache.hadoop.hbase.security.access.AccessControlLists;
org.apache.hadoop.hbase.quotas.QuotaUtil; org.apache.hadoop.hbase.quotas.QuotaUtil;
org.apache.hadoop.hbase.security.access.AccessControlLists;
org.apache.hadoop.hbase.security.visibility.VisibilityConstants;
org.apache.hadoop.hbase.tool.Canary;
org.apache.hadoop.hbase.util.Bytes;
org.apache.hadoop.hbase.util.FSUtils;
org.apache.hadoop.hbase.util.JvmVersion;
org.apache.hadoop.util.StringUtils;
</%import> </%import>
<%if format.equals("json") %> <%if format.equals("json") %>
@ -380,6 +380,8 @@ AssignmentManager assignmentManager = master.getAssignmentManager();
} else if (tableName.equals(QuotaUtil.QUOTA_TABLE_NAME)){ } else if (tableName.equals(QuotaUtil.QUOTA_TABLE_NAME)){
description = "The hbase:quota table holds quota information about number" + description = "The hbase:quota table holds quota information about number" +
" or size of requests in a given time frame."; " or size of requests in a given time frame.";
} else if (tableName.equals(TableName.valueOf("hbase:rsgroup"))){
description = "The hbase:rsgroup table holds information about regionserver groups";
} }
</%java> </%java>
<td><% description %></td> <td><% description %></td>

View File

@ -19,8 +19,11 @@
package org.apache.hadoop.hbase.coprocessor; package org.apache.hadoop.hbase.coprocessor;
import com.google.common.net.HostAndPort;
import java.io.IOException; import java.io.IOException;
import java.util.List; import java.util.List;
import java.util.Set;
import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.CoprocessorEnvironment;
import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HBaseInterfaceAudience;
@ -631,4 +634,54 @@ public abstract class BaseMasterAndRegionObserver extends BaseRegionObserver
public void postSetNamespaceQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx, public void postSetNamespaceQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final String namespace, final Quotas quotas) throws IOException { final String namespace, final Quotas quotas) throws IOException {
} }
@Override
public void postAddRSGroup(ObserverContext<MasterCoprocessorEnvironment> ctx, String name)
throws IOException {
}
@Override
public void postBalanceRSGroup(ObserverContext<MasterCoprocessorEnvironment> ctx,
String groupName, boolean balancerRan) throws IOException {
}
@Override
public void postMoveServers(ObserverContext<MasterCoprocessorEnvironment> ctx, Set<HostAndPort>
servers, String targetGroup) throws IOException {
}
@Override
public void postMoveTables(ObserverContext<MasterCoprocessorEnvironment> ctx, Set<TableName>
tables, String targetGroup) throws IOException {
}
@Override
public void postRemoveRSGroup(ObserverContext<MasterCoprocessorEnvironment> ctx, String name)
throws IOException {
}
@Override
public void preAddRSGroup(ObserverContext<MasterCoprocessorEnvironment> ctx, String name)
throws IOException {
}
@Override
public void preBalanceRSGroup(ObserverContext<MasterCoprocessorEnvironment> ctx, String groupName)
throws IOException {
}
@Override
public void preMoveServers(ObserverContext<MasterCoprocessorEnvironment> ctx,
Set<HostAndPort> servers, String targetGroup) throws IOException {
}
@Override
public void preMoveTables(ObserverContext<MasterCoprocessorEnvironment> ctx,
Set<TableName> tables, String targetGroup) throws IOException {
}
@Override
public void preRemoveRSGroup(ObserverContext<MasterCoprocessorEnvironment> ctx, String name)
throws IOException {
}
} }

View File

@ -19,8 +19,11 @@
package org.apache.hadoop.hbase.coprocessor; package org.apache.hadoop.hbase.coprocessor;
import com.google.common.net.HostAndPort;
import java.io.IOException; import java.io.IOException;
import java.util.List; import java.util.List;
import java.util.Set;
import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.CoprocessorEnvironment;
import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HBaseInterfaceAudience;
@ -634,4 +637,55 @@ public class BaseMasterObserver implements MasterObserver {
public void postSetNamespaceQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx, public void postSetNamespaceQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final String namespace, final Quotas quotas) throws IOException { final String namespace, final Quotas quotas) throws IOException {
} }
@Override
public void preMoveServers(ObserverContext<MasterCoprocessorEnvironment> ctx, Set<HostAndPort>
servers, String targetGroup) throws IOException {
}
@Override
public void postMoveServers(ObserverContext<MasterCoprocessorEnvironment> ctx, Set<HostAndPort>
servers, String targetGroup) throws IOException {
}
@Override
public void preMoveTables(ObserverContext<MasterCoprocessorEnvironment> ctx, Set<TableName>
tables, String targetGroup) throws IOException {
}
@Override
public void postMoveTables(ObserverContext<MasterCoprocessorEnvironment> ctx,
Set<TableName> tables, String targetGroup) throws IOException {
}
@Override
public void preAddRSGroup(ObserverContext<MasterCoprocessorEnvironment> ctx, String name)
throws IOException {
}
@Override
public void postAddRSGroup(ObserverContext<MasterCoprocessorEnvironment> ctx, String name)
throws IOException {
}
@Override
public void preRemoveRSGroup(ObserverContext<MasterCoprocessorEnvironment> ctx, String name)
throws IOException {
}
@Override
public void postRemoveRSGroup(ObserverContext<MasterCoprocessorEnvironment> ctx, String name)
throws IOException {
}
@Override
public void preBalanceRSGroup(ObserverContext<MasterCoprocessorEnvironment> ctx, String groupName)
throws IOException {
}
@Override
public void postBalanceRSGroup(ObserverContext<MasterCoprocessorEnvironment> ctx,
String groupName, boolean balancerRan) throws IOException {
}
} }

View File

@ -19,8 +19,11 @@
package org.apache.hadoop.hbase.coprocessor; package org.apache.hadoop.hbase.coprocessor;
import com.google.common.net.HostAndPort;
import java.io.IOException; import java.io.IOException;
import java.util.List; import java.util.List;
import java.util.Set;
import org.apache.hadoop.hbase.Coprocessor; import org.apache.hadoop.hbase.Coprocessor;
import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HBaseInterfaceAudience;
@ -1238,4 +1241,99 @@ public interface MasterObserver extends Coprocessor {
*/ */
void postDispatchMerge(final ObserverContext<MasterCoprocessorEnvironment> c, void postDispatchMerge(final ObserverContext<MasterCoprocessorEnvironment> c,
final HRegionInfo regionA, final HRegionInfo regionB) throws IOException; final HRegionInfo regionA, final HRegionInfo regionB) throws IOException;
/**
* Called before servers are moved to target region server group
* @param ctx the environment to interact with the framework and master
* @param servers set of servers to move
* @param targetGroup destination group
* @throws IOException on failure
*/
void preMoveServers(final ObserverContext<MasterCoprocessorEnvironment> ctx,
Set<HostAndPort> servers, String targetGroup) throws IOException;
/**
* Called after servers are moved to target region server group
* @param ctx the environment to interact with the framework and master
* @param servers set of servers to move
* @param targetGroup name of group
* @throws IOException on failure
*/
void postMoveServers(final ObserverContext<MasterCoprocessorEnvironment> ctx,
Set<HostAndPort> servers, String targetGroup) throws IOException;
/**
* Called before tables are moved to target region server group
* @param ctx the environment to interact with the framework and master
* @param tables set of tables to move
* @param targetGroup name of group
* @throws IOException on failure
*/
void preMoveTables(final ObserverContext<MasterCoprocessorEnvironment> ctx,
Set<TableName> tables, String targetGroup) throws IOException;
/**
* Called after servers are moved to target region server group
* @param ctx the environment to interact with the framework and master
* @param tables set of tables to move
* @param targetGroup name of group
* @throws IOException on failure
*/
void postMoveTables(final ObserverContext<MasterCoprocessorEnvironment> ctx,
Set<TableName> tables, String targetGroup) throws IOException;
/**
* Called before a new region server group is added
* @param ctx the environment to interact with the framework and master
* @param name group name
* @throws IOException on failure
*/
void preAddRSGroup(final ObserverContext<MasterCoprocessorEnvironment> ctx,
String name) throws IOException;
/**
* Called after a new region server group is added
* @param ctx the environment to interact with the framework and master
* @param name group name
* @throws IOException on failure
*/
void postAddRSGroup(final ObserverContext<MasterCoprocessorEnvironment> ctx,
String name) throws IOException;
/**
* Called before a region server group is removed
* @param ctx the environment to interact with the framework and master
* @param name group name
* @throws IOException on failure
*/
void preRemoveRSGroup(final ObserverContext<MasterCoprocessorEnvironment> ctx,
String name) throws IOException;
/**
* Called after a region server group is removed
* @param ctx the environment to interact with the framework and master
* @param name group name
* @throws IOException on failure
*/
void postRemoveRSGroup(final ObserverContext<MasterCoprocessorEnvironment> ctx,
String name) throws IOException;
/**
* Called before a region server group is removed
* @param ctx the environment to interact with the framework and master
* @param groupName group name
* @throws IOException on failure
*/
void preBalanceRSGroup(final ObserverContext<MasterCoprocessorEnvironment> ctx,
String groupName) throws IOException;
/**
* Called after a region server group is removed
* @param ctx the environment to interact with the framework and master
* @param groupName group name
* @throws IOException on failure
*/
void postBalanceRSGroup(final ObserverContext<MasterCoprocessorEnvironment> ctx,
String groupName, boolean balancerRan) throws IOException;
} }

View File

@ -1078,7 +1078,7 @@ public class AssignmentManager {
return; return;
} }
LOG.info("Assigning " + region.getRegionNameAsString() + LOG.info("Assigning " + region.getRegionNameAsString() +
" to " + plan.getDestination().toString()); " to " + plan.getDestination());
// Transition RegionState to PENDING_OPEN // Transition RegionState to PENDING_OPEN
regionStates.updateRegionState(region, regionStates.updateRegionState(region,
State.PENDING_OPEN, plan.getDestination()); State.PENDING_OPEN, plan.getDestination());
@ -1267,8 +1267,13 @@ public class AssignmentManager {
|| existingPlan.getDestination() == null || existingPlan.getDestination() == null
|| !destServers.contains(existingPlan.getDestination())) { || !destServers.contains(existingPlan.getDestination())) {
newPlan = true; newPlan = true;
randomPlan = new RegionPlan(region, null, try {
balancer.randomAssignment(region, destServers)); randomPlan = new RegionPlan(region, null,
balancer.randomAssignment(region, destServers));
} catch (IOException ex) {
LOG.warn("Failed to create new plan.",ex);
return null;
}
if (!region.isMetaTable() && shouldAssignRegionsWithFavoredNodes) { if (!region.isMetaTable() && shouldAssignRegionsWithFavoredNodes) {
List<HRegionInfo> regions = new ArrayList<HRegionInfo>(1); List<HRegionInfo> regions = new ArrayList<HRegionInfo>(1);
regions.add(region); regions.add(region);
@ -1513,6 +1518,8 @@ public class AssignmentManager {
throw new IOException("Unable to determine a plan to assign region(s)"); throw new IOException("Unable to determine a plan to assign region(s)");
} }
processBogusAssignments(bulkPlan);
assign(regions.size(), servers.size(), assign(regions.size(), servers.size(),
"retainAssignment=true", bulkPlan); "retainAssignment=true", bulkPlan);
} }
@ -1542,6 +1549,8 @@ public class AssignmentManager {
throw new IOException("Unable to determine a plan to assign region(s)"); throw new IOException("Unable to determine a plan to assign region(s)");
} }
processBogusAssignments(bulkPlan);
processFavoredNodes(regions); processFavoredNodes(regions);
assign(regions.size(), servers.size(), "round-robin=true", bulkPlan); assign(regions.size(), servers.size(), "round-robin=true", bulkPlan);
} }
@ -2954,6 +2963,16 @@ public class AssignmentManager {
} }
} }
private void processBogusAssignments(Map<ServerName, List<HRegionInfo>> bulkPlan) {
if (bulkPlan.containsKey(LoadBalancer.BOGUS_SERVER_NAME)) {
// Found no plan for some regions, put those regions in RIT
for (HRegionInfo hri : bulkPlan.get(LoadBalancer.BOGUS_SERVER_NAME)) {
regionStates.updateRegionState(hri, State.FAILED_OPEN);
}
bulkPlan.remove(LoadBalancer.BOGUS_SERVER_NAME);
}
}
/** /**
* @return Instance of load balancer * @return Instance of load balancer
*/ */

View File

@ -18,6 +18,13 @@
*/ */
package org.apache.hadoop.hbase.master; package org.apache.hadoop.hbase.master;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.protobuf.Descriptors;
import com.google.protobuf.Service;
import java.io.IOException; import java.io.IOException;
import java.io.InterruptedIOException; import java.io.InterruptedIOException;
import java.lang.reflect.Constructor; import java.lang.reflect.Constructor;
@ -165,11 +172,6 @@ import org.mortbay.jetty.Connector;
import org.mortbay.jetty.nio.SelectChannelConnector; import org.mortbay.jetty.nio.SelectChannelConnector;
import org.mortbay.jetty.servlet.Context; import org.mortbay.jetty.servlet.Context;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.Maps;
import com.google.protobuf.Descriptors;
import com.google.protobuf.Service;
/** /**
* HMaster is the "master server" for HBase. An HBase cluster has one active * HMaster is the "master server" for HBase. An HBase cluster has one active
* master. If many masters are started, all compete. Whichever wins goes on to * master. If many masters are started, all compete. Whichever wins goes on to
@ -1396,11 +1398,14 @@ public class HMaster extends HRegionServer implements MasterServices {
final byte[] destServerName) throws HBaseIOException { final byte[] destServerName) throws HBaseIOException {
RegionState regionState = assignmentManager.getRegionStates(). RegionState regionState = assignmentManager.getRegionStates().
getRegionState(Bytes.toString(encodedRegionName)); getRegionState(Bytes.toString(encodedRegionName));
if (regionState == null) {
HRegionInfo hri;
if (regionState != null) {
hri = regionState.getRegion();
} else {
throw new UnknownRegionException(Bytes.toStringBinary(encodedRegionName)); throw new UnknownRegionException(Bytes.toStringBinary(encodedRegionName));
} }
HRegionInfo hri = regionState.getRegion();
ServerName dest; ServerName dest;
if (destServerName == null || destServerName.length == 0) { if (destServerName == null || destServerName.length == 0) {
LOG.info("Passed destination servername is null/empty so " + LOG.info("Passed destination servername is null/empty so " +
@ -1413,7 +1418,12 @@ public class HMaster extends HRegionServer implements MasterServices {
return; return;
} }
} else { } else {
dest = ServerName.valueOf(Bytes.toString(destServerName)); ServerName candidate = ServerName.valueOf(Bytes.toString(destServerName));
dest = balancer.randomAssignment(hri, Lists.newArrayList(candidate));
if (dest == null) {
LOG.debug("Unable to determine a plan to assign " + hri);
return;
}
if (dest.equals(serverName) && balancer instanceof BaseLoadBalancer if (dest.equals(serverName) && balancer instanceof BaseLoadBalancer
&& !((BaseLoadBalancer)balancer).shouldBeOnMaster(hri)) { && !((BaseLoadBalancer)balancer).shouldBeOnMaster(hri)) {
// To avoid unnecessary region moving later by balancer. Don't put user // To avoid unnecessary region moving later by balancer. Don't put user
@ -1476,7 +1486,6 @@ public class HMaster extends HRegionServer implements MasterServices {
HRegionInfo[] newRegions = ModifyRegionUtils.createHRegionInfos(hTableDescriptor, splitKeys); HRegionInfo[] newRegions = ModifyRegionUtils.createHRegionInfos(hTableDescriptor, splitKeys);
checkInitialized(); checkInitialized();
sanityCheckTableDescriptor(hTableDescriptor); sanityCheckTableDescriptor(hTableDescriptor);
if (cpHost != null) { if (cpHost != null) {
cpHost.preCreateTable(hTableDescriptor, newRegions); cpHost.preCreateTable(hTableDescriptor, newRegions);
} }
@ -2827,4 +2836,9 @@ public class HMaster extends HRegionServer implements MasterServices {
public SplitOrMergeTracker getSplitOrMergeTracker() { public SplitOrMergeTracker getSplitOrMergeTracker() {
return splitOrMergeTracker; return splitOrMergeTracker;
} }
@Override
public LoadBalancer getLoadBalancer() {
return balancer;
}
} }

View File

@ -48,6 +48,9 @@ import org.apache.hadoop.hbase.TableName;
@InterfaceAudience.Private @InterfaceAudience.Private
public interface LoadBalancer extends Configurable, Stoppable, ConfigurationObserver { public interface LoadBalancer extends Configurable, Stoppable, ConfigurationObserver {
//used to signal to the caller that the region(s) cannot be assigned
ServerName BOGUS_SERVER_NAME = ServerName.parseServerName("localhost,1,1");
/** /**
* Set the current cluster status. This allows a LoadBalancer to map host name to a server * Set the current cluster status. This allows a LoadBalancer to map host name to a server
* @param st * @param st

View File

@ -19,8 +19,11 @@
package org.apache.hadoop.hbase.master; package org.apache.hadoop.hbase.master;
import com.google.common.net.HostAndPort;
import java.io.IOException; import java.io.IOException;
import java.util.List; import java.util.List;
import java.util.Set;
import org.apache.commons.lang.ClassUtils; import org.apache.commons.lang.ClassUtils;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
@ -63,12 +66,15 @@ public class MasterCoprocessorHost
static class MasterEnvironment extends CoprocessorHost.Environment static class MasterEnvironment extends CoprocessorHost.Environment
implements MasterCoprocessorEnvironment { implements MasterCoprocessorEnvironment {
private MasterServices masterServices; private MasterServices masterServices;
final boolean supportGroupCPs;
public MasterEnvironment(final Class<?> implClass, final Coprocessor impl, public MasterEnvironment(final Class<?> implClass, final Coprocessor impl,
final int priority, final int seq, final Configuration conf, final int priority, final int seq, final Configuration conf,
final MasterServices services) { final MasterServices services) {
super(impl, priority, seq, conf); super(impl, priority, seq, conf);
this.masterServices = services; this.masterServices = services;
supportGroupCPs = !useLegacyMethod(impl.getClass(),
"preBalanceRSGroup", ObserverContext.class, String.class);
} }
public MasterServices getMasterServices() { public MasterServices getMasterServices() {
@ -1170,4 +1176,135 @@ public class MasterCoprocessorHost
} }
return bypass; return bypass;
} }
public void preMoveServers(final Set<HostAndPort> servers, final String targetGroup)
throws IOException {
execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
@Override
public void call(MasterObserver oserver,
ObserverContext<MasterCoprocessorEnvironment> ctx) throws IOException {
if(((MasterEnvironment)ctx.getEnvironment()).supportGroupCPs) {
oserver.preMoveServers(ctx, servers, targetGroup);
}
}
});
}
public void postMoveServers(final Set<HostAndPort> servers, final String targetGroup)
throws IOException {
execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
@Override
public void call(MasterObserver oserver,
ObserverContext<MasterCoprocessorEnvironment> ctx) throws IOException {
if(((MasterEnvironment)ctx.getEnvironment()).supportGroupCPs) {
oserver.postMoveServers(ctx, servers, targetGroup);
}
}
});
}
public void preMoveTables(final Set<TableName> tables, final String targetGroup)
throws IOException {
execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
@Override
public void call(MasterObserver oserver,
ObserverContext<MasterCoprocessorEnvironment> ctx) throws IOException {
if(((MasterEnvironment)ctx.getEnvironment()).supportGroupCPs) {
oserver.preMoveTables(ctx, tables, targetGroup);
}
}
});
}
public void postMoveTables(final Set<TableName> tables, final String targetGroup)
throws IOException {
execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
@Override
public void call(MasterObserver oserver,
ObserverContext<MasterCoprocessorEnvironment> ctx) throws IOException {
if(((MasterEnvironment)ctx.getEnvironment()).supportGroupCPs) {
oserver.postMoveTables(ctx, tables, targetGroup);
}
}
});
}
public void preAddRSGroup(final String name)
throws IOException {
execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
@Override
public void call(MasterObserver oserver,
ObserverContext<MasterCoprocessorEnvironment> ctx) throws IOException {
if(((MasterEnvironment)ctx.getEnvironment()).supportGroupCPs) {
oserver.preAddRSGroup(ctx, name);
}
}
});
}
public void postAddRSGroup(final String name)
throws IOException {
execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
@Override
public void call(MasterObserver oserver,
ObserverContext<MasterCoprocessorEnvironment> ctx) throws IOException {
if (((MasterEnvironment) ctx.getEnvironment()).supportGroupCPs) {
oserver.postAddRSGroup(ctx, name);
}
}
});
}
public void preRemoveRSGroup(final String name)
throws IOException {
execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
@Override
public void call(MasterObserver oserver,
ObserverContext<MasterCoprocessorEnvironment> ctx) throws IOException {
if(((MasterEnvironment)ctx.getEnvironment()).supportGroupCPs) {
oserver.preRemoveRSGroup(ctx, name);
}
}
});
}
public void postRemoveRSGroup(final String name)
throws IOException {
execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
@Override
public void call(MasterObserver oserver,
ObserverContext<MasterCoprocessorEnvironment> ctx) throws IOException {
if(((MasterEnvironment)ctx.getEnvironment()).supportGroupCPs) {
oserver.postRemoveRSGroup(ctx, name);
}
}
});
}
public void preBalanceRSGroup(final String name)
throws IOException {
execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
@Override
public void call(MasterObserver oserver,
ObserverContext<MasterCoprocessorEnvironment> ctx) throws IOException {
if(((MasterEnvironment)ctx.getEnvironment()).supportGroupCPs) {
oserver.preBalanceRSGroup(ctx, name);
}
}
});
}
public void postBalanceRSGroup(final String name, final boolean balanceRan)
throws IOException {
execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
@Override
public void call(MasterObserver oserver,
ObserverContext<MasterCoprocessorEnvironment> ctx) throws IOException {
if(((MasterEnvironment)ctx.getEnvironment()).supportGroupCPs) {
oserver.postBalanceRSGroup(ctx, name, balanceRan);
}
}
});
}
} }

View File

@ -1309,6 +1309,14 @@ public class MasterRpcServices extends RSRpcServices
} }
Pair<HRegionInfo, ServerName> pair = Pair<HRegionInfo, ServerName> pair =
MetaTableAccessor.getRegion(master.getConnection(), regionName); MetaTableAccessor.getRegion(master.getConnection(), regionName);
if (Bytes.equals(HRegionInfo.FIRST_META_REGIONINFO.getRegionName(),regionName)) {
pair = new Pair<HRegionInfo, ServerName>(HRegionInfo.FIRST_META_REGIONINFO,
master.getMetaTableLocator().getMetaRegionLocation(master.getZooKeeper()));
}
if (pair == null) {
throw new UnknownRegionException(Bytes.toString(regionName));
}
if (pair == null) throw new UnknownRegionException(Bytes.toString(regionName)); if (pair == null) throw new UnknownRegionException(Bytes.toString(regionName));
HRegionInfo hri = pair.getFirst(); HRegionInfo hri = pair.getFirst();
if (master.cpHost != null) { if (master.cpHost != null) {

View File

@ -328,4 +328,9 @@ public interface MasterServices extends Server {
* @throws IOException * @throws IOException
*/ */
public long getLastMajorCompactionTimestampForRegion(byte[] regionName) throws IOException; public long getLastMajorCompactionTimestampForRegion(byte[] regionName) throws IOException;
/**
* @return load balancer
*/
public LoadBalancer getLoadBalancer();
} }

View File

@ -18,6 +18,8 @@
*/ */
package org.apache.hadoop.hbase.security.access; package org.apache.hadoop.hbase.security.access;
import com.google.common.net.HostAndPort;
import java.io.IOException; import java.io.IOException;
import java.net.InetAddress; import java.net.InetAddress;
import java.security.PrivilegedExceptionAction; import java.security.PrivilegedExceptionAction;
@ -2619,4 +2621,34 @@ public class AccessController extends BaseMasterAndRegionObserver
public void postReplicateLogEntries(ObserverContext<RegionServerCoprocessorEnvironment> ctx, public void postReplicateLogEntries(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
List<WALEntry> entries, CellScanner cells) throws IOException { List<WALEntry> entries, CellScanner cells) throws IOException {
} }
@Override
public void preMoveServers(ObserverContext<MasterCoprocessorEnvironment> ctx,
Set<HostAndPort> servers, String targetGroup) throws IOException {
requirePermission("moveServers", Action.ADMIN);
}
@Override
public void preMoveTables(ObserverContext<MasterCoprocessorEnvironment> ctx,
Set<TableName> tables, String targetGroup) throws IOException {
requirePermission("moveTables", Action.ADMIN);
}
@Override
public void preAddRSGroup(ObserverContext<MasterCoprocessorEnvironment> ctx,
String name) throws IOException {
requirePermission("addRSGroup", Action.ADMIN);
}
@Override
public void preRemoveRSGroup(ObserverContext<MasterCoprocessorEnvironment> ctx,
String name) throws IOException {
requirePermission("removeRSGroup", Action.ADMIN);
}
@Override
public void preBalanceRSGroup(ObserverContext<MasterCoprocessorEnvironment> ctx,
String groupName) throws IOException {
requirePermission("balanceRSGroup", Action.ADMIN);
}
} }

View File

@ -28,6 +28,7 @@ import java.io.IOException;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collection; import java.util.Collection;
import java.util.List; import java.util.List;
import java.util.Set;
import java.util.concurrent.CountDownLatch; import java.util.concurrent.CountDownLatch;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
@ -74,6 +75,8 @@ import org.junit.Test;
import org.junit.experimental.categories.Category; import org.junit.experimental.categories.Category;
import org.junit.rules.TestName; import org.junit.rules.TestName;
import com.google.common.net.HostAndPort;
/** /**
* Tests invocation of the {@link org.apache.hadoop.hbase.coprocessor.MasterObserver} * Tests invocation of the {@link org.apache.hadoop.hbase.coprocessor.MasterObserver}
* interface hooks at all appropriate times during normal HMaster operations. * interface hooks at all appropriate times during normal HMaster operations.
@ -1312,6 +1315,56 @@ public class TestMasterObserver {
public void postSetNamespaceQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx, public void postSetNamespaceQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final String namespace, final Quotas quotas) throws IOException { final String namespace, final Quotas quotas) throws IOException {
} }
@Override
public void preMoveServers(ObserverContext<MasterCoprocessorEnvironment> ctx,
Set<HostAndPort> servers, String targetGroup) throws IOException {
}
@Override
public void postMoveServers(ObserverContext<MasterCoprocessorEnvironment> ctx,
Set<HostAndPort> servers, String targetGroup) throws IOException {
}
@Override
public void preMoveTables(ObserverContext<MasterCoprocessorEnvironment> ctx,
Set<TableName> tables, String targetGroupGroup) throws IOException {
}
@Override
public void postMoveTables(ObserverContext<MasterCoprocessorEnvironment> ctx,
Set<TableName> tables, String targetGroup) throws IOException {
}
@Override
public void preAddRSGroup(ObserverContext<MasterCoprocessorEnvironment> ctx,
String name) throws IOException {
}
@Override
public void postAddRSGroup(ObserverContext<MasterCoprocessorEnvironment> ctx,
String name) throws IOException {
}
@Override
public void preRemoveRSGroup(ObserverContext<MasterCoprocessorEnvironment> ctx,
String name) throws IOException {
}
@Override
public void postRemoveRSGroup(ObserverContext<MasterCoprocessorEnvironment> ctx,
String name) throws IOException {
}
@Override
public void preBalanceRSGroup(ObserverContext<MasterCoprocessorEnvironment> ctx,
String groupName) throws IOException {
}
@Override
public void postBalanceRSGroup(ObserverContext<MasterCoprocessorEnvironment> ctx,
String groupName, boolean balancerRan) throws IOException {
}
} }
private static HBaseTestingUtility UTIL = new HBaseTestingUtility(); private static HBaseTestingUtility UTIL = new HBaseTestingUtility();

View File

@ -27,12 +27,16 @@ import static org.junit.Assert.fail;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Set; import java.util.Set;
import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicInteger;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
@ -536,7 +540,7 @@ public class TestAssignmentManagerOnCluster {
desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z")); desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z"));
MetaTableAccessor.addRegionToMeta(meta, hri); MetaTableAccessor.addRegionToMeta(meta, hri);
MyLoadBalancer.controledRegion = hri.getEncodedName(); MyLoadBalancer.controledRegion = hri;
HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
master.assignRegion(hri); master.assignRegion(hri);
@ -561,6 +565,105 @@ public class TestAssignmentManagerOnCluster {
} }
} }
/**
* This tests round-robin assignment failed due to no bulkplan
*/
@Test (timeout=60000)
public void testRoundRobinAssignmentFailed() throws Exception {
TableName tableName = TableName.valueOf("testRoundRobinAssignmentFailed");
try {
HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor(FAMILY));
admin.createTable(desc);
Table meta = admin.getConnection().getTable(TableName.META_TABLE_NAME);
HRegionInfo hri = new HRegionInfo(
desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z"));
MetaTableAccessor.addRegionToMeta(meta, hri);
MyLoadBalancer.controledRegion = hri;
HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
AssignmentManager am = master.getAssignmentManager();
// round-robin assignment but balancer cannot find a plan
// assignment should fail
am.assign(Arrays.asList(hri));
// if bulk assignment cannot update region state to online
// or failed_open this waits until timeout
assertFalse(am.waitForAssignment(hri));
RegionState state = am.getRegionStates().getRegionState(hri);
assertEquals(RegionState.State.FAILED_OPEN, state.getState());
// Failed to open since no plan, so it's on no server
assertNull(state.getServerName());
// try again with valid plan
MyLoadBalancer.controledRegion = null;
am.assign(Arrays.asList(hri));
assertTrue(am.waitForAssignment(hri));
ServerName serverName = master.getAssignmentManager().
getRegionStates().getRegionServerOfRegion(hri);
TEST_UTIL.assertRegionOnServer(hri, serverName, 200);
} finally {
MyLoadBalancer.controledRegion = null;
TEST_UTIL.deleteTable(tableName);
}
}
/**
* This tests retain assignment failed due to no bulkplan
*/
@Test (timeout=60000)
public void testRetainAssignmentFailed() throws Exception {
TableName tableName = TableName.valueOf("testRetainAssignmentFailed");
try {
HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor(FAMILY));
admin.createTable(desc);
Table meta = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME);
HRegionInfo hri = new HRegionInfo(
desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z"));
MetaTableAccessor.addRegionToMeta(meta, hri);
MyLoadBalancer.controledRegion = hri;
HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
AssignmentManager am = master.getAssignmentManager();
Map<HRegionInfo, ServerName> regions = new HashMap<HRegionInfo, ServerName>();
ServerName dest = TEST_UTIL.getHBaseCluster().getRegionServer(0).getServerName();
regions.put(hri, dest);
// retainAssignment but balancer cannot find a plan
// assignment should fail
am.assign(regions);
// if retain assignment cannot update region state to online
// or failed_open this waits until timeout
assertFalse(am.waitForAssignment(hri));
RegionState state = am.getRegionStates().getRegionState(hri);
assertEquals(RegionState.State.FAILED_OPEN, state.getState());
// Failed to open since no plan, so it's on no server
assertNull(state.getServerName());
// try retainAssigment again with valid plan
MyLoadBalancer.controledRegion = null;
am.assign(regions);
assertTrue(am.waitForAssignment(hri));
ServerName serverName = master.getAssignmentManager().
getRegionStates().getRegionServerOfRegion(hri);
TEST_UTIL.assertRegionOnServer(hri, serverName, 200);
// it retains on same server as specified
assertEquals(serverName, dest);
} finally {
MyLoadBalancer.controledRegion = null;
TEST_UTIL.deleteTable(tableName);
}
}
/** /**
* This tests region open failure which is not recoverable * This tests region open failure which is not recoverable
*/ */
@ -1169,7 +1272,7 @@ public class TestAssignmentManagerOnCluster {
static class MyLoadBalancer extends StochasticLoadBalancer { static class MyLoadBalancer extends StochasticLoadBalancer {
// For this region, if specified, always assign to nowhere // For this region, if specified, always assign to nowhere
static volatile String controledRegion = null; static volatile HRegionInfo controledRegion = null;
static volatile Integer countRegionServers = null; static volatile Integer countRegionServers = null;
static AtomicInteger counter = new AtomicInteger(0); static AtomicInteger counter = new AtomicInteger(0);
@ -1177,7 +1280,7 @@ public class TestAssignmentManagerOnCluster {
@Override @Override
public ServerName randomAssignment(HRegionInfo regionInfo, public ServerName randomAssignment(HRegionInfo regionInfo,
List<ServerName> servers) { List<ServerName> servers) {
if (regionInfo.getEncodedName().equals(controledRegion)) { if (regionInfo.equals(controledRegion)) {
return null; return null;
} }
return super.randomAssignment(regionInfo, servers); return super.randomAssignment(regionInfo, servers);
@ -1195,8 +1298,26 @@ public class TestAssignmentManagerOnCluster {
return null; return null;
} }
} }
if (regions.get(0).equals(controledRegion)) {
Map<ServerName, List<HRegionInfo>> m = Maps.newHashMap();
m.put(LoadBalancer.BOGUS_SERVER_NAME, regions);
return m;
}
return super.roundRobinAssignment(regions, servers); return super.roundRobinAssignment(regions, servers);
} }
@Override
public Map<ServerName, List<HRegionInfo>> retainAssignment(
Map<HRegionInfo, ServerName> regions, List<ServerName> servers) {
for (HRegionInfo hri : regions.keySet()) {
if (hri.equals(controledRegion)) {
Map<ServerName, List<HRegionInfo>> m = Maps.newHashMap();
m.put(LoadBalancer.BOGUS_SERVER_NAME, Lists.newArrayList(regions.keySet()));
return m;
}
}
return super.retainAssignment(regions, servers);
}
} }
public static class MyMaster extends HMaster { public static class MyMaster extends HMaster {

View File

@ -434,6 +434,9 @@ public class TestCatalogJanitor {
final long nonce) throws IOException { final long nonce) throws IOException {
return -1; return -1;
} }
public LoadBalancer getLoadBalancer() {
return null;
}
@Override @Override
public long truncateTable( public long truncateTable(

View File

@ -135,7 +135,7 @@ public class TestMasterStatusServlet {
setupMockTables(); setupMockTables();
new MasterStatusTmpl() new MasterStatusTmpl()
.setMetaLocation(ServerName.valueOf("metaserver:123,12345")) .setMetaLocation(ServerName.valueOf("metaserver,123,12345"))
.render(new StringWriter(), master); .render(new StringWriter(), master);
} }
@ -144,16 +144,16 @@ public class TestMasterStatusServlet {
setupMockTables(); setupMockTables();
List<ServerName> servers = Lists.newArrayList( List<ServerName> servers = Lists.newArrayList(
ServerName.valueOf("rootserver:123,12345"), ServerName.valueOf("rootserver,123,12345"),
ServerName.valueOf("metaserver:123,12345")); ServerName.valueOf("metaserver,123,12345"));
Set<ServerName> deadServers = new HashSet<ServerName>( Set<ServerName> deadServers = new HashSet<ServerName>(
Lists.newArrayList( Lists.newArrayList(
ServerName.valueOf("badserver:123,12345"), ServerName.valueOf("badserver,123,12345"),
ServerName.valueOf("uglyserver:123,12345")) ServerName.valueOf("uglyserver,123,12345"))
); );
new MasterStatusTmpl() new MasterStatusTmpl()
.setMetaLocation(ServerName.valueOf("metaserver:123,12345")) .setMetaLocation(ServerName.valueOf("metaserver,123,12345"))
.setServers(servers) .setServers(servers)
.setDeadServers(deadServers) .setDeadServers(deadServers)
.render(new StringWriter(), master); .render(new StringWriter(), master);

View File

@ -261,7 +261,7 @@ public class TestSimpleRegionNormalizer {
masterServices = Mockito.mock(MasterServices.class, RETURNS_DEEP_STUBS); masterServices = Mockito.mock(MasterServices.class, RETURNS_DEEP_STUBS);
// for simplicity all regions are assumed to be on one server; doesn't matter to us // for simplicity all regions are assumed to be on one server; doesn't matter to us
ServerName sn = ServerName.valueOf("localhost", -1, 1L); ServerName sn = ServerName.valueOf("localhost", 0, 1L);
when(masterServices.getAssignmentManager().getRegionStates(). when(masterServices.getAssignmentManager().getRegionStates().
getRegionsOfTable(any(TableName.class))).thenReturn(hris); getRegionsOfTable(any(TableName.class))).thenReturn(hris);
when(masterServices.getAssignmentManager().getRegionStates(). when(masterServices.getAssignmentManager().getRegionStates().

View File

@ -2738,4 +2738,79 @@ public class TestAccessController extends SecureTestUtil {
verifyDenied(replicateLogEntriesAction, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER, verifyDenied(replicateLogEntriesAction, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER,
USER_GROUP_READ, USER_GROUP_ADMIN, USER_GROUP_CREATE); USER_GROUP_READ, USER_GROUP_ADMIN, USER_GROUP_CREATE);
} }
@Test
public void testMoveServers() throws Exception {
AccessTestAction action1 = new AccessTestAction() {
@Override
public Object run() throws Exception {
ACCESS_CONTROLLER.preMoveServers(ObserverContext.createAndPrepare(CP_ENV, null),
null, null);
return null;
}
};
verifyAllowed(action1, SUPERUSER, USER_ADMIN);
verifyDenied(action1, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER);
}
@Test
public void testMoveTables() throws Exception {
AccessTestAction action1 = new AccessTestAction() {
@Override
public Object run() throws Exception {
ACCESS_CONTROLLER.preMoveTables(ObserverContext.createAndPrepare(CP_ENV, null),
null, null);
return null;
}
};
verifyAllowed(action1, SUPERUSER, USER_ADMIN);
verifyDenied(action1, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER);
}
@Test
public void testAddGroup() throws Exception {
AccessTestAction action1 = new AccessTestAction() {
@Override
public Object run() throws Exception {
ACCESS_CONTROLLER.preAddRSGroup(ObserverContext.createAndPrepare(CP_ENV, null),
null);
return null;
}
};
verifyAllowed(action1, SUPERUSER, USER_ADMIN);
verifyDenied(action1, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER);
}
@Test
public void testRemoveGroup() throws Exception {
AccessTestAction action1 = new AccessTestAction() {
@Override
public Object run() throws Exception {
ACCESS_CONTROLLER.preRemoveRSGroup(ObserverContext.createAndPrepare(CP_ENV, null),
null);
return null;
}
};
verifyAllowed(action1, SUPERUSER, USER_ADMIN);
verifyDenied(action1, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER);
}
@Test
public void testBalanceGroup() throws Exception {
AccessTestAction action1 = new AccessTestAction() {
@Override
public Object run() throws Exception {
ACCESS_CONTROLLER.preBalanceRSGroup(ObserverContext.createAndPrepare(CP_ENV, null),
null);
return null;
}
};
verifyAllowed(action1, SUPERUSER, USER_ADMIN);
verifyDenied(action1, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER);
}
} }

View File

@ -253,6 +253,41 @@
</dependency> </dependency>
</dependencies> </dependencies>
<profiles> <profiles>
<profile>
<id>rsgroup</id>
<activation>
<property>
<name>!skip-rsgroup</name>
</property>
</activation>
<dependencies>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-rsgroup</artifactId>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>build-helper-maven-plugin</artifactId>
<executions>
<execution>
<id>add-test-source</id>
<goals>
<goal>add-test-source</goal>
</goals>
<configuration>
<sources>
<source>src/test/rsgroup</source>
</sources>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</profile>
<!-- Skip the tests in this module --> <!-- Skip the tests in this module -->
<profile> <profile>
<id>skipShellTests</id> <id>skipShellTests</id>

View File

@ -102,5 +102,6 @@ require 'hbase/quotas'
require 'hbase/replication_admin' require 'hbase/replication_admin'
require 'hbase/security' require 'hbase/security'
require 'hbase/visibility_labels' require 'hbase/visibility_labels'
require 'hbase/rsgroup_admin'
include HBaseQuotasConstants include HBaseQuotasConstants

View File

@ -47,6 +47,10 @@ module Hbase
::Hbase::Admin.new(@connection.getAdmin, formatter) ::Hbase::Admin.new(@connection.getAdmin, formatter)
end end
def rsgroup_admin(formatter)
::Hbase::RSGroupAdmin.new(@connection, formatter)
end
# Create new one each time # Create new one each time
def table(table, shell) def table(table, shell)
::Hbase::Table.new(@connection.getTable(table), shell) ::Hbase::Table.new(@connection.getTable(table), shell)

View File

@ -0,0 +1,150 @@
#
# Copyright The Apache Software Foundation
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
include Java
java_import org.apache.hadoop.hbase.util.Pair
# Wrapper for org.apache.hadoop.hbase.group.GroupAdminClient
# Which is an API to manage region server groups
module Hbase
class RSGroupAdmin
include HBaseConstants
def initialize(connection, formatter)
@admin = org.apache.hadoop.hbase.rsgroup.RSGroupAdmin.newClient(connection)
@formatter = formatter
end
def close
@admin.close
end
#--------------------------------------------------------------------------
# Returns a list of groups in hbase
def list_rs_groups
@admin.listRSGroups.map { |g| g.getName }
end
#--------------------------------------------------------------------------
# get a group's information
def get_rsgroup(group_name)
group = @admin.getRSGroupInfo(group_name)
if group.nil?
raise(ArgumentError, 'Group does not exist: ' + group_name)
end
res = {}
if block_given?
yield('Servers:')
end
servers = []
group.getServers.each do |v|
if block_given?
yield(v.toString)
else
servers << v.toString
end
end
res[:servers] = servers
tables = []
if block_given?
yield('Tables:')
end
group.getTables.each do |v|
if block_given?
yield(v.toString)
else
tables << v.toString
end
end
res[:tables] = tables
if !block_given?
res
else
nil
end
end
#--------------------------------------------------------------------------
# add a group
def add_rs_group(group_name)
@admin.addRSGroup(group_name)
end
#--------------------------------------------------------------------------
# remove a group
def remove_rs_group(group_name)
@admin.removeRSGroup(group_name)
end
#--------------------------------------------------------------------------
# balance a group
def balance_rs_group(group_name)
@admin.balanceRSGroup(group_name)
end
#--------------------------------------------------------------------------
# move server to a group
def move_servers(dest, *args)
servers = java.util.HashSet.new
args[0].each do |s|
servers.add(com.google.common.net.HostAndPort.fromString(s))
end
@admin.moveServers(servers, dest)
end
#--------------------------------------------------------------------------
# move server to a group
def move_tables(dest, *args)
tables = java.util.HashSet.new;
args[0].each do |s|
tables.add(org.apache.hadoop.hbase.TableName.valueOf(s))
end
@admin.moveTables(tables, dest)
end
#--------------------------------------------------------------------------
# get group of server
def get_rsgroup_of_server(server)
res = @admin.getRSGroupOfServer(
com.google.common.net.HostAndPort.fromString(server))
if res.nil?
raise(ArgumentError,'Server has no group: ' + server)
end
res
end
#--------------------------------------------------------------------------
# get group of table
def get_rsgroup_of_table(table)
res = @admin.getRSGroupInfoOfTable(
org.apache.hadoop.hbase.TableName.valueOf(table))
if res.nil?
raise(ArgumentError,'Table has no group: ' + table)
end
res
end
end
end

View File

@ -107,6 +107,10 @@ module Shell
@hbase_quotas_admin ||= hbase.quotas_admin(formatter) @hbase_quotas_admin ||= hbase.quotas_admin(formatter)
end end
def hbase_rsgroup_admin
@rsgroup_admin ||= hbase.rsgroup_admin(formatter)
end
def export_commands(where) def export_commands(where)
::Shell.commands.keys.each do |cmd| ::Shell.commands.keys.each do |cmd|
# here where is the IRB namespace # here where is the IRB namespace
@ -429,3 +433,20 @@ Shell.load_command_group(
set_visibility set_visibility
] ]
) )
Shell.load_command_group(
'rsgroup',
:full_name => 'RSGroups',
:comment => "NOTE: Above commands are only applicable if running with the Groups setup",
:commands => %w[
list_rsgroups
get_rsgroup
add_rsgroup
remove_rsgroup
balance_rsgroup
move_rsgroup_servers
move_rsgroup_tables
get_server_rsgroup
get_table_rsgroup
]
)

View File

@ -74,6 +74,10 @@ module Shell
@shell.hbase_quotas_admin @shell.hbase_quotas_admin
end end
def rsgroup_admin
@shell.hbase_rsgroup_admin
end
#---------------------------------------------------------------------- #----------------------------------------------------------------------
def formatter def formatter

View File

@ -0,0 +1,39 @@
#
# Copyright The Apache Software Foundation
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
module Shell
module Commands
class AddRsgroup < Command
def help
return <<-EOF
Create a new region server group.
Example:
hbase> add_rsgroup 'my_group'
EOF
end
def command(group_name)
rsgroup_admin.add_rs_group(group_name)
end
end
end
end

View File

@ -0,0 +1,37 @@
#
# Copyright The Apache Software Foundation
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
module Shell
module Commands
class BalanceRsgroup < Command
def help
return <<-EOF
Balance a region server group
hbase> balance_rsgroup 'my_group'
EOF
end
def command(group_name)
rsgroup_admin.balance_rs_group(group_name)
end
end
end
end

View File

@ -0,0 +1,44 @@
#
# Copyright The Apache Software Foundation
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
module Shell
module Commands
class GetRsgroup < Command
def help
return <<-EOF
Get a region server group's information.
Example:
hbase> get_rsgroup 'default'
EOF
end
def command(group_name)
now = Time.now
formatter.header(['GROUP INFORMATION'])
group_admin.get_rsgroup(group_name) do |s|
formatter.row([s])
end
formatter.footer(now)
end
end
end
end

View File

@ -0,0 +1,40 @@
#
# Copyright The Apache Software Foundation
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
module Shell
module Commands
class GetServerRsgroup < Command
def help
return <<-EOF
Get the group name the given region server is a member of.
hbase> get_server_rsgroup 'server1:port1'
EOF
end
def command(server)
now = Time.now
group_name = rsgroup_admin.getGroupOfServer(server).getName
formatter.row([group_name])
formatter.footer(now, 1)
end
end
end
end

View File

@ -0,0 +1,41 @@
#
# Copyright The Apache Software Foundation
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
module Shell
module Commands
class GetTableRsgroup < Command
def help
return <<-EOF
Get the group name the given table is a member of.
hbase> get_table_rsgroup 'myTable'
EOF
end
def command(table)
now = Time.now
group_name =
rsgroup_admin.get_rsgroup_of_table(table).getName
formatter.row([group_name])
formatter.footer(now, 1)
end
end
end
end

View File

@ -0,0 +1,50 @@
#
# Copyright The Apache Software Foundation
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
module Shell
module Commands
class ListRsgroups < Command
def help
return <<-EOF
List all region server groups. Optional regular expression parameter could
be used to filter the output.
Example:
hbase> list_rsgroups
hbase> list_rsgroups 'abc.*'
EOF
end
def command(regex = '.*')
now = Time.now
formatter.header(['GROUPS'])
regex = /#{regex}/ unless regex.is_a?(Regexp)
list = rsgroup_admin.list_rs_groups.grep(regex)
list.each do |group|
formatter.row([group])
end
formatter.footer(now, list.size)
end
end
end
end

View File

@ -0,0 +1,37 @@
#
# Copyright The Apache Software Foundation
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
module Shell
module Commands
class MoveRsgroupServers < Command
def help
return <<-EOF
Reassign a region server from one group to another.
hbase> move_rsgroup_servers 'dest',['server1:port','server2:port']
EOF
end
def command(dest, servers)
rsgroup_admin.move_servers(dest, servers)
end
end
end
end

View File

@ -0,0 +1,37 @@
#
# Copyright The Apache Software Foundation
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
module Shell
module Commands
class MoveRsgroupTables < Command
def help
return <<-EOF
Reassign tables from one group to another.
hbase> move_rsgroup_tables 'dest',['table1','table2']
EOF
end
def command(dest, tables)
rsgroup_admin.move_tables(dest, tables)
end
end
end
end

View File

@ -0,0 +1,37 @@
#
# Copyright The Apache Software Foundation
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
module Shell
module Commands
class RemoveRsgroup < Command
def help
return <<-EOF
Remove a group.
hbase> remove_rsgroup 'my_group'
EOF
end
def command(group_name)
rsgroup_admin.remove_rs_group(group_name)
end
end
end
end

View File

@ -31,7 +31,7 @@ public class TestShell extends AbstractTestShell {
@Test @Test
public void testRunShellTests() throws IOException { public void testRunShellTests() throws IOException {
System.setProperty("shell.test.exclude", "replication_admin_test.rb"); System.setProperty("shell.test.exclude", "replication_admin_test.rb,rsgroup_shell_test.rb");
// Start all ruby tests // Start all ruby tests
jruby.runScriptlet(PathType.ABSOLUTE, "src/test/ruby/tests_runner.rb"); jruby.runScriptlet(PathType.ABSOLUTE, "src/test/ruby/tests_runner.rb");
} }

View File

@ -0,0 +1,111 @@
/**
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.client.rsgroup;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.rsgroup.RSGroupAdminEndpoint;
import org.apache.hadoop.hbase.rsgroup.RSGroupBasedLoadBalancer;
import org.apache.hadoop.hbase.security.access.SecureTestUtil;
import org.apache.hadoop.hbase.security.visibility.VisibilityTestUtil;
import org.apache.hadoop.hbase.testclassification.ClientTests;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.jruby.embed.PathType;
import org.jruby.embed.ScriptingContainer;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
//Separate Shell test class for Groups
//Since we need to use a different balancer and run more than 1 RS
@Category({ClientTests.class, LargeTests.class})
public class TestShellRSGroups {
final Log LOG = LogFactory.getLog(getClass());
private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private final static ScriptingContainer jruby = new ScriptingContainer();
private static String basePath;
@BeforeClass
public static void setUpBeforeClass() throws Exception {
basePath = System.getProperty("basedir");
// Start mini cluster
TEST_UTIL.getConfiguration().setBoolean("hbase.online.schema.update.enable", true);
TEST_UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100);
TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 250);
TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6);
TEST_UTIL.getConfiguration().setBoolean(CoprocessorHost.ABORT_ON_ERROR_KEY, false);
TEST_UTIL.getConfiguration().setInt("hfile.format.version", 3);
TEST_UTIL.getConfiguration().setInt(HConstants.MASTER_INFO_PORT, -1);
TEST_UTIL.getConfiguration().setInt(HConstants.REGIONSERVER_INFO_PORT, -1);
// Security setup configuration
SecureTestUtil.enableSecurity(TEST_UTIL.getConfiguration());
VisibilityTestUtil.enableVisiblityLabels(TEST_UTIL.getConfiguration());
//Setup RegionServer Groups
TEST_UTIL.getConfiguration().set(
HConstants.HBASE_MASTER_LOADBALANCER_CLASS,
RSGroupBasedLoadBalancer.class.getName());
TEST_UTIL.getConfiguration().set(
CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY,
RSGroupAdminEndpoint.class.getName());
TEST_UTIL.getConfiguration().setBoolean(
HConstants.ZOOKEEPER_USEMULTI,
true);
TEST_UTIL.startMiniCluster(1,4);
// Configure jruby runtime
List<String> loadPaths = new ArrayList();
loadPaths.add(basePath+"/src/main/ruby");
loadPaths.add(basePath+"/src/test/ruby");
jruby.getProvider().setLoadPaths(loadPaths);
jruby.put("$TEST_CLUSTER", TEST_UTIL);
System.setProperty("jruby.jit.logging.verbose", "true");
System.setProperty("jruby.jit.logging", "true");
System.setProperty("jruby.native.verbose", "true");
}
@AfterClass
public static void tearDownAfterClass() throws Exception {
TEST_UTIL.shutdownMiniCluster();
}
@Test
public void testRunShellTests() throws IOException {
try {
// Start only GroupShellTest
System.setProperty("shell.test", "Hbase::RSGroupShellTest");
jruby.runScriptlet(PathType.ABSOLUTE,
basePath + "/src/test/ruby/tests_runner.rb");
} finally {
System.clearProperty("shell.test");
}
}
}

View File

@ -0,0 +1,96 @@
#
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
require 'hbase'
require 'shell'
require 'shell/formatter'
module Hbase
class RSGroupShellTest < Test::Unit::TestCase
def setup
@formatter = ::Shell::Formatter::Console.new
@hbase = ::Hbase::Hbase.new($TEST_CLUSTER.getConfiguration)
@shell = Shell::Shell.new(@hbase, @formatter)
connection = $TEST_CLUSTER.getConnection
@rsgroup_admin =
org.apache.hadoop.hbase.rsgroup.RSGroupAdmin.newClient(connection)
end
define_test 'Test Basic RSGroup Commands' do
group_name = 'test_group'
table_name = 'test_table'
@shell.command('create', table_name, 'f')
@shell.command('add_rsgroup', group_name)
assert_not_nil(@rsgroup_admin.getRSGroupInfo(group_name))
@shell.command('remove_rsgroup', group_name)
assert_nil(@rsgroup_admin.getRSGroupInfo(group_name))
@shell.command('add_rsgroup', group_name)
group = @rsgroup_admin.getRSGroupInfo(group_name)
assert_not_nil(group)
assert_equal(0, group.getServers.count)
hostport =
@rsgroup_admin.getRSGroupInfo('default').getServers.iterator.next.toString
@shell.command('move_rsgroup_servers',
group_name,
[hostport])
assert_equal(1, @rsgroup_admin.getRSGroupInfo(group_name).getServers.count)
@shell.command('move_rsgroup_tables',
group_name,
[table_name])
assert_equal(1, @rsgroup_admin.getRSGroupInfo(group_name).getTables.count)
count = 0
@hbase.rsgroup_admin(@formatter).get_rsgroup(group_name) do |line|
case count
when 1
assert_equal(hostport, line)
when 3
assert_equal(table_name, line)
end
count += 1
end
assert_equal(4, count)
assert_equal(2,
@hbase.rsgroup_admin(@formatter).list_rs_groups.count)
# just run it to verify jruby->java api binding
@hbase.rsgroup_admin(@formatter).balance_rs_group(group_name)
end
# we test exceptions that could be thrown by the ruby wrappers
define_test 'Test bogus arguments' do
assert_raise(ArgumentError) do
@hbase.rsgroup_admin(@formatter).get_rsgroup('foobar')
end
assert_raise(ArgumentError) do
@hbase.rsgroup_admin(@formatter).get_rsgroup_of_server('foobar:123')
end
assert_raise(ArgumentError) do
@hbase.rsgroup_admin(@formatter).get_rsgroup_of_table('foobar')
end
end
end
end

View File

@ -72,6 +72,10 @@ module Hbase
@shell.hbase_replication_admin @shell.hbase_replication_admin
end end
def group_admin(_formatter)
@shell.hbase_group_admin
end
def create_test_table(name) def create_test_table(name)
# Create the table if needed # Create the table if needed
unless admin.exists?(name) unless admin.exists?(name)

23
pom.xml
View File

@ -1352,6 +1352,18 @@
<type>test-jar</type> <type>test-jar</type>
<scope>test</scope> <scope>test</scope>
</dependency> </dependency>
<dependency>
<artifactId>hbase-rsgroup</artifactId>
<groupId>org.apache.hbase</groupId>
<version>${project.version}</version>
</dependency>
<dependency>
<artifactId>hbase-rsgroup</artifactId>
<groupId>org.apache.hbase</groupId>
<version>${project.version}</version>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<dependency> <dependency>
<artifactId>hbase-server</artifactId> <artifactId>hbase-server</artifactId>
<groupId>org.apache.hbase</groupId> <groupId>org.apache.hbase</groupId>
@ -1799,6 +1811,17 @@
--> -->
<profiles> <profiles>
<profile>
<id>rsgroup</id>
<activation>
<property>
<name>!skip-rsgroup</name>
</property>
</activation>
<modules>
<module>hbase-rsgroup</module>
</modules>
</profile>
<profile> <profile>
<id>build-with-jdk8</id> <id>build-with-jdk8</id>
<activation> <activation>