From 64328caef0bb712bb69d0241b4b8b3474a82702c Mon Sep 17 00:00:00 2001 From: Andrew Purtell Date: Mon, 23 Oct 2017 14:15:06 -0700 Subject: [PATCH] HBASE-15631 Backport Regionserver Groups (HBASE-6721) to branch-1 (Francis Liu and Andrew Purtell) --- .../org/apache/hadoop/hbase/ServerName.java | 25 +- .../org/apache/hadoop/hbase/net/Address.java | 89 + hbase-it/pom.xml | 41 + .../hbase/rsgroup/IntegrationTestRSGroup.java | 99 + hbase-protocol/pom.xml | 2 + .../protobuf/generated/ClientProtos.java | 16 +- .../generated/RSGroupAdminProtos.java | 13571 ++++++++++++++++ .../protobuf/generated/RSGroupProtos.java | 1332 ++ .../src/main/protobuf/RSGroup.proto | 35 + .../src/main/protobuf/RSGroupAdmin.proto | 149 + hbase-rsgroup/pom.xml | 278 + .../hadoop/hbase/rsgroup/RSGroupAdmin.java | 92 + .../hbase/rsgroup/RSGroupAdminClient.java | 212 + .../hbase/rsgroup/RSGroupAdminEndpoint.java | 1049 ++ .../hbase/rsgroup/RSGroupAdminServer.java | 516 + .../rsgroup/RSGroupBasedLoadBalancer.java | 431 + .../hadoop/hbase/rsgroup/RSGroupInfo.java | 190 + .../hbase/rsgroup/RSGroupInfoManager.java | 116 + .../hbase/rsgroup/RSGroupInfoManagerImpl.java | 795 + .../hbase/rsgroup/RSGroupProtobufUtil.java | 61 + .../hadoop/hbase/rsgroup/RSGroupSerDe.java | 88 + .../hbase/rsgroup/RSGroupableBalancer.java | 32 + .../TestRSGroupBasedLoadBalancer.java | 573 + .../hadoop/hbase/rsgroup/TestRSGroups.java | 300 + .../hbase/rsgroup/TestRSGroupsBase.java | 815 + .../rsgroup/TestRSGroupsOfflineMode.java | 187 + .../rsgroup/VerifyingRSGroupAdminClient.java | 155 + .../hbase/tmpl/master/MasterStatusTmpl.jamon | 2 + .../hadoop/hbase/LocalHBaseCluster.java | 3 + .../BaseMasterAndRegionObserver.java | 62 + .../hbase/coprocessor/BaseMasterObserver.java | 63 + .../hbase/coprocessor/MasterObserver.java | 113 + .../hbase/master/AssignmentManager.java | 16 +- .../apache/hadoop/hbase/master/HMaster.java | 5 + .../hadoop/hbase/master/LoadBalancer.java | 3 + .../hbase/master/MasterCoprocessorHost.java | 160 + .../hadoop/hbase/master/MasterServices.java | 5 + .../security/access/AccessController.java | 37 + .../hbase/coprocessor/TestMasterObserver.java | 61 + .../hbase/master/MockNoopMasterServices.java | 5 + .../TestAssignmentManagerOnCluster.java | 127 +- .../hbase/master/TestCatalogJanitor.java | 3 + .../hbase/master/TestMasterStatusServlet.java | 12 +- .../TestSimpleRegionNormalizer.java | 2 +- .../security/access/TestAccessController.java | 75 + hbase-shell/pom.xml | 35 + hbase-shell/src/main/ruby/hbase.rb | 1 + hbase-shell/src/main/ruby/hbase/hbase.rb | 4 + .../src/main/ruby/hbase/rsgroup_admin.rb | 164 + hbase-shell/src/main/ruby/shell.rb | 22 + hbase-shell/src/main/ruby/shell/commands.rb | 4 + .../main/ruby/shell/commands/add_rsgroup.rb | 39 + .../ruby/shell/commands/balance_rsgroup.rb | 37 + .../main/ruby/shell/commands/get_rsgroup.rb | 43 + .../ruby/shell/commands/get_server_rsgroup.rb | 39 + .../ruby/shell/commands/get_table_rsgroup.rb | 40 + .../main/ruby/shell/commands/list_rsgroups.rb | 49 + .../shell/commands/move_servers_rsgroup.rb | 37 + .../commands/move_servers_tables_rsgroup.rb | 37 + .../shell/commands/move_tables_rsgroup.rb | 37 + .../ruby/shell/commands/remove_rsgroup.rb | 37 + .../apache/hadoop/hbase/client/TestShell.java | 2 +- .../client/rsgroup/TestShellRSGroups.java | 111 + .../src/test/ruby/shell/rsgroup_shell_test.rb | 96 + hbase-shell/src/test/ruby/test_helper.rb | 4 + pom.xml | 23 + 66 files changed, 22843 insertions(+), 21 deletions(-) create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/net/Address.java create mode 100644 hbase-it/src/test/rsgroup/org/apache/hadoop/hbase/rsgroup/IntegrationTestRSGroup.java create mode 100644 hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupAdminProtos.java create mode 100644 hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupProtos.java create mode 100644 hbase-protocol/src/main/protobuf/RSGroup.proto create mode 100644 hbase-protocol/src/main/protobuf/RSGroupAdmin.proto create mode 100644 hbase-rsgroup/pom.xml create mode 100644 hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdmin.java create mode 100644 hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminClient.java create mode 100644 hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java create mode 100644 hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java create mode 100644 hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java create mode 100644 hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java create mode 100644 hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java create mode 100644 hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java create mode 100644 hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupProtobufUtil.java create mode 100644 hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupSerDe.java create mode 100644 hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupableBalancer.java create mode 100644 hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java create mode 100644 hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroups.java create mode 100644 hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java create mode 100644 hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsOfflineMode.java create mode 100644 hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdminClient.java create mode 100644 hbase-shell/src/main/ruby/hbase/rsgroup_admin.rb create mode 100644 hbase-shell/src/main/ruby/shell/commands/add_rsgroup.rb create mode 100644 hbase-shell/src/main/ruby/shell/commands/balance_rsgroup.rb create mode 100644 hbase-shell/src/main/ruby/shell/commands/get_rsgroup.rb create mode 100644 hbase-shell/src/main/ruby/shell/commands/get_server_rsgroup.rb create mode 100644 hbase-shell/src/main/ruby/shell/commands/get_table_rsgroup.rb create mode 100644 hbase-shell/src/main/ruby/shell/commands/list_rsgroups.rb create mode 100644 hbase-shell/src/main/ruby/shell/commands/move_servers_rsgroup.rb create mode 100644 hbase-shell/src/main/ruby/shell/commands/move_servers_tables_rsgroup.rb create mode 100644 hbase-shell/src/main/ruby/shell/commands/move_tables_rsgroup.rb create mode 100644 hbase-shell/src/main/ruby/shell/commands/remove_rsgroup.rb create mode 100644 hbase-shell/src/test/rsgroup/org/apache/hadoop/hbase/client/rsgroup/TestShellRSGroups.java create mode 100644 hbase-shell/src/test/ruby/shell/rsgroup_shell_test.rb diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerName.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerName.java index c90e7e1f56f..339f58806a1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerName.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerName.java @@ -24,6 +24,7 @@ import com.google.protobuf.InvalidProtocolBufferException; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.hadoop.hbase.net.Address; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.util.Addressing; @@ -106,7 +107,7 @@ public class ServerName implements Comparable, Serializable { this.hostnameOnly = hostname; this.port = port; this.startcode = startcode; - this.servername = getServerName(this.hostnameOnly, port, startcode); + this.servername = getServerName(hostname, port, startcode); } /** @@ -402,4 +403,26 @@ public class ServerName implements Comparable, Serializable { int port = Addressing.parsePort(str); return valueOf(hostname, port, -1L); } + + /** + * @return an Address constructed from the hostname and port carried by this ServerName + */ + public Address getAddress() { + return Address.fromParts(getHostname(), getPort()); + } + + /** + * @param left + * @param right + * @return True if other has same hostname and port. + */ + public static boolean isSameAddress(final ServerName left, + final ServerName right) { + // TODO: Make this left.getAddress().equals(right.getAddress()) + if (left == null) return false; + if (right == null) return false; + return left.getHostname().compareToIgnoreCase(right.getHostname()) == 0 && + left.getPort() == right.getPort(); + } + } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/net/Address.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/net/Address.java new file mode 100644 index 00000000000..15b496029b8 --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/net/Address.java @@ -0,0 +1,89 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.net; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + +import com.google.common.net.HostAndPort; + +/** + * An immutable type to hold a hostname and port combo, like an Endpoint + * or java.net.InetSocketAddress (but without danger of our calling + * resolve -- we do NOT want a resolve happening every time we want + * to hold a hostname and port combo). This class is also <>. + *

In implementation this class is a facade over Guava's {@link HostAndPort}. + * We cannot have Guava classes in our API hence this Type. + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class Address implements Comparable

{ + private HostAndPort hostAndPort; + + private Address(HostAndPort hostAndPort) { + this.hostAndPort = hostAndPort; + } + + public static Address fromParts(String hostname, int port) { + return new Address(HostAndPort.fromParts(hostname, port)); + } + + public static Address fromString(String hostnameAndPort) { + return new Address(HostAndPort.fromString(hostnameAndPort)); + } + + public String getHostname() { + return this.hostAndPort.getHostText(); + } + + public int getPort() { + return this.hostAndPort.getPort(); + } + + @Override + public String toString() { + return this.hostAndPort.toString(); + } + + @Override + // Don't use HostAndPort equals... It is wonky including + // ipv6 brackets + public boolean equals(Object other) { + if (this == other) { + return true; + } + if (other instanceof Address) { + Address that = (Address)other; + return this.getHostname().equals(that.getHostname()) && + this.getPort() == that.getPort(); + } + return false; + } + + @Override + public int hashCode() { + return this.getHostname().hashCode() ^ getPort(); + } + + @Override + public int compareTo(Address that) { + int compare = this.getHostname().compareTo(that.getHostname()); + if (compare != 0) return compare; + return this.getPort() - that.getPort(); + } +} diff --git a/hbase-it/pom.xml b/hbase-it/pom.xml index 715bcac9b59..8839a9c8b6d 100644 --- a/hbase-it/pom.xml +++ b/hbase-it/pom.xml @@ -265,6 +265,47 @@ + + rsgroup + + + !skip-rsgroup + + + + + org.apache.hbase + hbase-rsgroup + + + org.apache.hbase + hbase-rsgroup + test-jar + test + + + + + + org.codehaus.mojo + build-helper-maven-plugin + + + add-test-source + + add-test-source + + + + src/test/rsgroup + + + + + + + + skipIntegrationTests diff --git a/hbase-it/src/test/rsgroup/org/apache/hadoop/hbase/rsgroup/IntegrationTestRSGroup.java b/hbase-it/src/test/rsgroup/org/apache/hadoop/hbase/rsgroup/IntegrationTestRSGroup.java new file mode 100644 index 00000000000..e5bb9951856 --- /dev/null +++ b/hbase-it/src/test/rsgroup/org/apache/hadoop/hbase/rsgroup/IntegrationTestRSGroup.java @@ -0,0 +1,99 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rsgroup; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.IntegrationTestingUtility; +import org.apache.hadoop.hbase.Waiter; +import org.apache.hadoop.hbase.testclassification.IntegrationTests; +import org.junit.After; +import org.junit.Before; +import org.junit.experimental.categories.Category; + +/** + * Runs all of the units tests defined in TestGroupBase + * as an integration test. + * Requires TestRSGroupBase.NUM_SLAVE_BASE servers to run. + */ +@Category(IntegrationTests.class) +public class IntegrationTestRSGroup extends TestRSGroupsBase { + //Integration specific + private final static Log LOG = LogFactory.getLog(IntegrationTestRSGroup.class); + private static boolean initialized = false; + + @Before + public void beforeMethod() throws Exception { + if(!initialized) { + LOG.info("Setting up IntegrationTestGroup"); + LOG.info("Initializing cluster with " + NUM_SLAVES_BASE + " servers"); + TEST_UTIL = new IntegrationTestingUtility(); + ((IntegrationTestingUtility)TEST_UTIL).initializeCluster(NUM_SLAVES_BASE); + //set shared configs + admin = TEST_UTIL.getHBaseAdmin(); + cluster = TEST_UTIL.getHBaseClusterInterface(); + rsGroupAdmin = new VerifyingRSGroupAdminClient(new RSGroupAdminClient(TEST_UTIL.getConnection()), + TEST_UTIL.getConfiguration()); + LOG.info("Done initializing cluster"); + initialized = true; + //cluster may not be clean + //cleanup when initializing + afterMethod(); + } + } + + @After + public void afterMethod() throws Exception { + LOG.info("Cleaning up previous test run"); + //cleanup previous artifacts + deleteTableIfNecessary(); + deleteNamespaceIfNecessary(); + deleteGroups(); + admin.setBalancerRunning(true, true); + + LOG.info("Restoring the cluster"); + ((IntegrationTestingUtility)TEST_UTIL).restoreCluster(); + LOG.info("Done restoring the cluster"); + + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + LOG.info("Waiting for cleanup to finish "+ rsGroupAdmin.listRSGroups()); + //Might be greater since moving servers back to default + //is after starting a server + return rsGroupAdmin.getRSGroupInfo(RSGroupInfo.DEFAULT_GROUP).getServers().size() + >= NUM_SLAVES_BASE; + } + }); + + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + LOG.info("Waiting for regionservers to be registered "+ rsGroupAdmin.listRSGroups()); + //Might be greater since moving servers back to default + //is after starting a server + return rsGroupAdmin.getRSGroupInfo(RSGroupInfo.DEFAULT_GROUP).getServers().size() + == getNumServers(); + } + }); + + LOG.info("Done cleaning up previous test run"); + } +} diff --git a/hbase-protocol/pom.xml b/hbase-protocol/pom.xml index 8588a8c28bf..a3b9be1bf23 100644 --- a/hbase-protocol/pom.xml +++ b/hbase-protocol/pom.xml @@ -195,6 +195,8 @@ RegionNormalizer.proto RegionServerStatus.proto RowProcessor.proto + RSGroup.proto + RSGroupAdmin.proto SecureBulkLoad.proto Snapshot.proto Table.proto diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java index 06ebc652ae6..35fddc281c0 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java @@ -1986,7 +1986,7 @@ public final class ClientProtos { * optional bool load_column_families_on_demand = 14; * *
-     * DO NOT add defaults to load_column_families_on_demand.
+     * DO NOT add defaults to load_column_families_on_demand. 
      * 
*/ boolean hasLoadColumnFamiliesOnDemand(); @@ -1994,7 +1994,7 @@ public final class ClientProtos { * optional bool load_column_families_on_demand = 14; * *
-     * DO NOT add defaults to load_column_families_on_demand.
+     * DO NOT add defaults to load_column_families_on_demand. 
      * 
*/ boolean getLoadColumnFamiliesOnDemand(); @@ -2515,7 +2515,7 @@ public final class ClientProtos { * optional bool load_column_families_on_demand = 14; * *
-     * DO NOT add defaults to load_column_families_on_demand.
+     * DO NOT add defaults to load_column_families_on_demand. 
      * 
*/ public boolean hasLoadColumnFamiliesOnDemand() { @@ -2525,7 +2525,7 @@ public final class ClientProtos { * optional bool load_column_families_on_demand = 14; * *
-     * DO NOT add defaults to load_column_families_on_demand.
+     * DO NOT add defaults to load_column_families_on_demand. 
      * 
*/ public boolean getLoadColumnFamiliesOnDemand() { @@ -4577,7 +4577,7 @@ public final class ClientProtos { * optional bool load_column_families_on_demand = 14; * *
-       * DO NOT add defaults to load_column_families_on_demand.
+       * DO NOT add defaults to load_column_families_on_demand. 
        * 
*/ public boolean hasLoadColumnFamiliesOnDemand() { @@ -4587,7 +4587,7 @@ public final class ClientProtos { * optional bool load_column_families_on_demand = 14; * *
-       * DO NOT add defaults to load_column_families_on_demand.
+       * DO NOT add defaults to load_column_families_on_demand. 
        * 
*/ public boolean getLoadColumnFamiliesOnDemand() { @@ -4597,7 +4597,7 @@ public final class ClientProtos { * optional bool load_column_families_on_demand = 14; * *
-       * DO NOT add defaults to load_column_families_on_demand.
+       * DO NOT add defaults to load_column_families_on_demand. 
        * 
*/ public Builder setLoadColumnFamiliesOnDemand(boolean value) { @@ -4610,7 +4610,7 @@ public final class ClientProtos { * optional bool load_column_families_on_demand = 14; * *
-       * DO NOT add defaults to load_column_families_on_demand.
+       * DO NOT add defaults to load_column_families_on_demand. 
        * 
*/ public Builder clearLoadColumnFamiliesOnDemand() { diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupAdminProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupAdminProtos.java new file mode 100644 index 00000000000..3d2285c3a13 --- /dev/null +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupAdminProtos.java @@ -0,0 +1,13571 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: RSGroupAdmin.proto + +package org.apache.hadoop.hbase.protobuf.generated; + +public final class RSGroupAdminProtos { + private RSGroupAdminProtos() {} + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistry registry) { + } + public interface ListTablesOfRSGroupRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string r_s_group_name = 1; + /** + * required string r_s_group_name = 1; + */ + boolean hasRSGroupName(); + /** + * required string r_s_group_name = 1; + */ + java.lang.String getRSGroupName(); + /** + * required string r_s_group_name = 1; + */ + com.google.protobuf.ByteString + getRSGroupNameBytes(); + } + /** + * Protobuf type {@code hbase.pb.ListTablesOfRSGroupRequest} + */ + public static final class ListTablesOfRSGroupRequest extends + com.google.protobuf.GeneratedMessage + implements ListTablesOfRSGroupRequestOrBuilder { + // Use ListTablesOfRSGroupRequest.newBuilder() to construct. + private ListTablesOfRSGroupRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private ListTablesOfRSGroupRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final ListTablesOfRSGroupRequest defaultInstance; + public static ListTablesOfRSGroupRequest getDefaultInstance() { + return defaultInstance; + } + + public ListTablesOfRSGroupRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ListTablesOfRSGroupRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + rSGroupName_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_ListTablesOfRSGroupRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_ListTablesOfRSGroupRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupRequest.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ListTablesOfRSGroupRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ListTablesOfRSGroupRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string r_s_group_name = 1; + public static final int R_S_GROUP_NAME_FIELD_NUMBER = 1; + private java.lang.Object rSGroupName_; + /** + * required string r_s_group_name = 1; + */ + public boolean hasRSGroupName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string r_s_group_name = 1; + */ + public java.lang.String getRSGroupName() { + java.lang.Object ref = rSGroupName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + rSGroupName_ = s; + } + return s; + } + } + /** + * required string r_s_group_name = 1; + */ + public com.google.protobuf.ByteString + getRSGroupNameBytes() { + java.lang.Object ref = rSGroupName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + rSGroupName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + rSGroupName_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasRSGroupName()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getRSGroupNameBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getRSGroupNameBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupRequest other = (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupRequest) obj; + + boolean result = true; + result = result && (hasRSGroupName() == other.hasRSGroupName()); + if (hasRSGroupName()) { + result = result && getRSGroupName() + .equals(other.getRSGroupName()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasRSGroupName()) { + hash = (37 * hash) + R_S_GROUP_NAME_FIELD_NUMBER; + hash = (53 * hash) + getRSGroupName().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.ListTablesOfRSGroupRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_ListTablesOfRSGroupRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_ListTablesOfRSGroupRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupRequest.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + rSGroupName_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_ListTablesOfRSGroupRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupRequest build() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupRequest result = new org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.rSGroupName_ = rSGroupName_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupRequest.getDefaultInstance()) return this; + if (other.hasRSGroupName()) { + bitField0_ |= 0x00000001; + rSGroupName_ = other.rSGroupName_; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasRSGroupName()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string r_s_group_name = 1; + private java.lang.Object rSGroupName_ = ""; + /** + * required string r_s_group_name = 1; + */ + public boolean hasRSGroupName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string r_s_group_name = 1; + */ + public java.lang.String getRSGroupName() { + java.lang.Object ref = rSGroupName_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + rSGroupName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string r_s_group_name = 1; + */ + public com.google.protobuf.ByteString + getRSGroupNameBytes() { + java.lang.Object ref = rSGroupName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + rSGroupName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string r_s_group_name = 1; + */ + public Builder setRSGroupName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + rSGroupName_ = value; + onChanged(); + return this; + } + /** + * required string r_s_group_name = 1; + */ + public Builder clearRSGroupName() { + bitField0_ = (bitField0_ & ~0x00000001); + rSGroupName_ = getDefaultInstance().getRSGroupName(); + onChanged(); + return this; + } + /** + * required string r_s_group_name = 1; + */ + public Builder setRSGroupNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + rSGroupName_ = value; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.ListTablesOfRSGroupRequest) + } + + static { + defaultInstance = new ListTablesOfRSGroupRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.ListTablesOfRSGroupRequest) + } + + public interface ListTablesOfRSGroupResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // repeated .hbase.pb.TableName table_name = 1; + /** + * repeated .hbase.pb.TableName table_name = 1; + */ + java.util.List + getTableNameList(); + /** + * repeated .hbase.pb.TableName table_name = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTableName(int index); + /** + * repeated .hbase.pb.TableName table_name = 1; + */ + int getTableNameCount(); + /** + * repeated .hbase.pb.TableName table_name = 1; + */ + java.util.List + getTableNameOrBuilderList(); + /** + * repeated .hbase.pb.TableName table_name = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableNameOrBuilder( + int index); + } + /** + * Protobuf type {@code hbase.pb.ListTablesOfRSGroupResponse} + */ + public static final class ListTablesOfRSGroupResponse extends + com.google.protobuf.GeneratedMessage + implements ListTablesOfRSGroupResponseOrBuilder { + // Use ListTablesOfRSGroupResponse.newBuilder() to construct. + private ListTablesOfRSGroupResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private ListTablesOfRSGroupResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final ListTablesOfRSGroupResponse defaultInstance; + public static ListTablesOfRSGroupResponse getDefaultInstance() { + return defaultInstance; + } + + public ListTablesOfRSGroupResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ListTablesOfRSGroupResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + tableName_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + tableName_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + tableName_ = java.util.Collections.unmodifiableList(tableName_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_ListTablesOfRSGroupResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_ListTablesOfRSGroupResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupResponse.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ListTablesOfRSGroupResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ListTablesOfRSGroupResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + // repeated .hbase.pb.TableName table_name = 1; + public static final int TABLE_NAME_FIELD_NUMBER = 1; + private java.util.List tableName_; + /** + * repeated .hbase.pb.TableName table_name = 1; + */ + public java.util.List getTableNameList() { + return tableName_; + } + /** + * repeated .hbase.pb.TableName table_name = 1; + */ + public java.util.List + getTableNameOrBuilderList() { + return tableName_; + } + /** + * repeated .hbase.pb.TableName table_name = 1; + */ + public int getTableNameCount() { + return tableName_.size(); + } + /** + * repeated .hbase.pb.TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTableName(int index) { + return tableName_.get(index); + } + /** + * repeated .hbase.pb.TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableNameOrBuilder( + int index) { + return tableName_.get(index); + } + + private void initFields() { + tableName_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + for (int i = 0; i < getTableNameCount(); i++) { + if (!getTableName(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + for (int i = 0; i < tableName_.size(); i++) { + output.writeMessage(1, tableName_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < tableName_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, tableName_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupResponse other = (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupResponse) obj; + + boolean result = true; + result = result && getTableNameList() + .equals(other.getTableNameList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (getTableNameCount() > 0) { + hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER; + hash = (53 * hash) + getTableNameList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.ListTablesOfRSGroupResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_ListTablesOfRSGroupResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_ListTablesOfRSGroupResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupResponse.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableNameFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (tableNameBuilder_ == null) { + tableName_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + tableNameBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_ListTablesOfRSGroupResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupResponse build() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupResponse result = new org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupResponse(this); + int from_bitField0_ = bitField0_; + if (tableNameBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + tableName_ = java.util.Collections.unmodifiableList(tableName_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.tableName_ = tableName_; + } else { + result.tableName_ = tableNameBuilder_.build(); + } + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupResponse.getDefaultInstance()) return this; + if (tableNameBuilder_ == null) { + if (!other.tableName_.isEmpty()) { + if (tableName_.isEmpty()) { + tableName_ = other.tableName_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureTableNameIsMutable(); + tableName_.addAll(other.tableName_); + } + onChanged(); + } + } else { + if (!other.tableName_.isEmpty()) { + if (tableNameBuilder_.isEmpty()) { + tableNameBuilder_.dispose(); + tableNameBuilder_ = null; + tableName_ = other.tableName_; + bitField0_ = (bitField0_ & ~0x00000001); + tableNameBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getTableNameFieldBuilder() : null; + } else { + tableNameBuilder_.addAllMessages(other.tableName_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + for (int i = 0; i < getTableNameCount(); i++) { + if (!getTableName(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListTablesOfRSGroupResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // repeated .hbase.pb.TableName table_name = 1; + private java.util.List tableName_ = + java.util.Collections.emptyList(); + private void ensureTableNameIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + tableName_ = new java.util.ArrayList(tableName_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder> tableNameBuilder_; + + /** + * repeated .hbase.pb.TableName table_name = 1; + */ + public java.util.List getTableNameList() { + if (tableNameBuilder_ == null) { + return java.util.Collections.unmodifiableList(tableName_); + } else { + return tableNameBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.TableName table_name = 1; + */ + public int getTableNameCount() { + if (tableNameBuilder_ == null) { + return tableName_.size(); + } else { + return tableNameBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTableName(int index) { + if (tableNameBuilder_ == null) { + return tableName_.get(index); + } else { + return tableNameBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.TableName table_name = 1; + */ + public Builder setTableName( + int index, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableNameIsMutable(); + tableName_.set(index, value); + onChanged(); + } else { + tableNameBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_name = 1; + */ + public Builder setTableName( + int index, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + ensureTableNameIsMutable(); + tableName_.set(index, builderForValue.build()); + onChanged(); + } else { + tableNameBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_name = 1; + */ + public Builder addTableName(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableNameIsMutable(); + tableName_.add(value); + onChanged(); + } else { + tableNameBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_name = 1; + */ + public Builder addTableName( + int index, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableNameIsMutable(); + tableName_.add(index, value); + onChanged(); + } else { + tableNameBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_name = 1; + */ + public Builder addTableName( + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + ensureTableNameIsMutable(); + tableName_.add(builderForValue.build()); + onChanged(); + } else { + tableNameBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_name = 1; + */ + public Builder addTableName( + int index, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + ensureTableNameIsMutable(); + tableName_.add(index, builderForValue.build()); + onChanged(); + } else { + tableNameBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_name = 1; + */ + public Builder addAllTableName( + java.lang.Iterable values) { + if (tableNameBuilder_ == null) { + ensureTableNameIsMutable(); + super.addAll(values, tableName_); + onChanged(); + } else { + tableNameBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_name = 1; + */ + public Builder clearTableName() { + if (tableNameBuilder_ == null) { + tableName_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + tableNameBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_name = 1; + */ + public Builder removeTableName(int index) { + if (tableNameBuilder_ == null) { + ensureTableNameIsMutable(); + tableName_.remove(index); + onChanged(); + } else { + tableNameBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder getTableNameBuilder( + int index) { + return getTableNameFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableNameOrBuilder( + int index) { + if (tableNameBuilder_ == null) { + return tableName_.get(index); } else { + return tableNameBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.TableName table_name = 1; + */ + public java.util.List + getTableNameOrBuilderList() { + if (tableNameBuilder_ != null) { + return tableNameBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(tableName_); + } + } + /** + * repeated .hbase.pb.TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder addTableNameBuilder() { + return getTableNameFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance()); + } + /** + * repeated .hbase.pb.TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder addTableNameBuilder( + int index) { + return getTableNameFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance()); + } + /** + * repeated .hbase.pb.TableName table_name = 1; + */ + public java.util.List + getTableNameBuilderList() { + return getTableNameFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder> + getTableNameFieldBuilder() { + if (tableNameBuilder_ == null) { + tableNameBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder>( + tableName_, + ((bitField0_ & 0x00000001) == 0x00000001), + getParentForChildren(), + isClean()); + tableName_ = null; + } + return tableNameBuilder_; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.ListTablesOfRSGroupResponse) + } + + static { + defaultInstance = new ListTablesOfRSGroupResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.ListTablesOfRSGroupResponse) + } + + public interface GetRSGroupInfoRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string r_s_group_name = 1; + /** + * required string r_s_group_name = 1; + */ + boolean hasRSGroupName(); + /** + * required string r_s_group_name = 1; + */ + java.lang.String getRSGroupName(); + /** + * required string r_s_group_name = 1; + */ + com.google.protobuf.ByteString + getRSGroupNameBytes(); + } + /** + * Protobuf type {@code hbase.pb.GetRSGroupInfoRequest} + */ + public static final class GetRSGroupInfoRequest extends + com.google.protobuf.GeneratedMessage + implements GetRSGroupInfoRequestOrBuilder { + // Use GetRSGroupInfoRequest.newBuilder() to construct. + private GetRSGroupInfoRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private GetRSGroupInfoRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final GetRSGroupInfoRequest defaultInstance; + public static GetRSGroupInfoRequest getDefaultInstance() { + return defaultInstance; + } + + public GetRSGroupInfoRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private GetRSGroupInfoRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + rSGroupName_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_GetRSGroupInfoRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_GetRSGroupInfoRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetRSGroupInfoRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new GetRSGroupInfoRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string r_s_group_name = 1; + public static final int R_S_GROUP_NAME_FIELD_NUMBER = 1; + private java.lang.Object rSGroupName_; + /** + * required string r_s_group_name = 1; + */ + public boolean hasRSGroupName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string r_s_group_name = 1; + */ + public java.lang.String getRSGroupName() { + java.lang.Object ref = rSGroupName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + rSGroupName_ = s; + } + return s; + } + } + /** + * required string r_s_group_name = 1; + */ + public com.google.protobuf.ByteString + getRSGroupNameBytes() { + java.lang.Object ref = rSGroupName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + rSGroupName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + rSGroupName_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasRSGroupName()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getRSGroupNameBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getRSGroupNameBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest other = (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest) obj; + + boolean result = true; + result = result && (hasRSGroupName() == other.hasRSGroupName()); + if (hasRSGroupName()) { + result = result && getRSGroupName() + .equals(other.getRSGroupName()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasRSGroupName()) { + hash = (37 * hash) + R_S_GROUP_NAME_FIELD_NUMBER; + hash = (53 * hash) + getRSGroupName().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.GetRSGroupInfoRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_GetRSGroupInfoRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_GetRSGroupInfoRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + rSGroupName_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_GetRSGroupInfoRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest build() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest result = new org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.rSGroupName_ = rSGroupName_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest.getDefaultInstance()) return this; + if (other.hasRSGroupName()) { + bitField0_ |= 0x00000001; + rSGroupName_ = other.rSGroupName_; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasRSGroupName()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string r_s_group_name = 1; + private java.lang.Object rSGroupName_ = ""; + /** + * required string r_s_group_name = 1; + */ + public boolean hasRSGroupName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string r_s_group_name = 1; + */ + public java.lang.String getRSGroupName() { + java.lang.Object ref = rSGroupName_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + rSGroupName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string r_s_group_name = 1; + */ + public com.google.protobuf.ByteString + getRSGroupNameBytes() { + java.lang.Object ref = rSGroupName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + rSGroupName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string r_s_group_name = 1; + */ + public Builder setRSGroupName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + rSGroupName_ = value; + onChanged(); + return this; + } + /** + * required string r_s_group_name = 1; + */ + public Builder clearRSGroupName() { + bitField0_ = (bitField0_ & ~0x00000001); + rSGroupName_ = getDefaultInstance().getRSGroupName(); + onChanged(); + return this; + } + /** + * required string r_s_group_name = 1; + */ + public Builder setRSGroupNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + rSGroupName_ = value; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.GetRSGroupInfoRequest) + } + + static { + defaultInstance = new GetRSGroupInfoRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.GetRSGroupInfoRequest) + } + + public interface GetRSGroupInfoResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional .hbase.pb.RSGroupInfo r_s_group_info = 1; + /** + * optional .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + boolean hasRSGroupInfo(); + /** + * optional .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo getRSGroupInfo(); + /** + * optional .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfoOrBuilder getRSGroupInfoOrBuilder(); + } + /** + * Protobuf type {@code hbase.pb.GetRSGroupInfoResponse} + */ + public static final class GetRSGroupInfoResponse extends + com.google.protobuf.GeneratedMessage + implements GetRSGroupInfoResponseOrBuilder { + // Use GetRSGroupInfoResponse.newBuilder() to construct. + private GetRSGroupInfoResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private GetRSGroupInfoResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final GetRSGroupInfoResponse defaultInstance; + public static GetRSGroupInfoResponse getDefaultInstance() { + return defaultInstance; + } + + public GetRSGroupInfoResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private GetRSGroupInfoResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = rSGroupInfo_.toBuilder(); + } + rSGroupInfo_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(rSGroupInfo_); + rSGroupInfo_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_GetRSGroupInfoResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_GetRSGroupInfoResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetRSGroupInfoResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new GetRSGroupInfoResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional .hbase.pb.RSGroupInfo r_s_group_info = 1; + public static final int R_S_GROUP_INFO_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo rSGroupInfo_; + /** + * optional .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + public boolean hasRSGroupInfo() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo getRSGroupInfo() { + return rSGroupInfo_; + } + /** + * optional .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfoOrBuilder getRSGroupInfoOrBuilder() { + return rSGroupInfo_; + } + + private void initFields() { + rSGroupInfo_ = org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (hasRSGroupInfo()) { + if (!getRSGroupInfo().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, rSGroupInfo_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, rSGroupInfo_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse other = (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse) obj; + + boolean result = true; + result = result && (hasRSGroupInfo() == other.hasRSGroupInfo()); + if (hasRSGroupInfo()) { + result = result && getRSGroupInfo() + .equals(other.getRSGroupInfo()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasRSGroupInfo()) { + hash = (37 * hash) + R_S_GROUP_INFO_FIELD_NUMBER; + hash = (53 * hash) + getRSGroupInfo().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.GetRSGroupInfoResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_GetRSGroupInfoResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_GetRSGroupInfoResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getRSGroupInfoFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (rSGroupInfoBuilder_ == null) { + rSGroupInfo_ = org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo.getDefaultInstance(); + } else { + rSGroupInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_GetRSGroupInfoResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse build() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse result = new org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (rSGroupInfoBuilder_ == null) { + result.rSGroupInfo_ = rSGroupInfo_; + } else { + result.rSGroupInfo_ = rSGroupInfoBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse.getDefaultInstance()) return this; + if (other.hasRSGroupInfo()) { + mergeRSGroupInfo(other.getRSGroupInfo()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (hasRSGroupInfo()) { + if (!getRSGroupInfo().isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional .hbase.pb.RSGroupInfo r_s_group_info = 1; + private org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo rSGroupInfo_ = org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfoOrBuilder> rSGroupInfoBuilder_; + /** + * optional .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + public boolean hasRSGroupInfo() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo getRSGroupInfo() { + if (rSGroupInfoBuilder_ == null) { + return rSGroupInfo_; + } else { + return rSGroupInfoBuilder_.getMessage(); + } + } + /** + * optional .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + public Builder setRSGroupInfo(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo value) { + if (rSGroupInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + rSGroupInfo_ = value; + onChanged(); + } else { + rSGroupInfoBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + public Builder setRSGroupInfo( + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo.Builder builderForValue) { + if (rSGroupInfoBuilder_ == null) { + rSGroupInfo_ = builderForValue.build(); + onChanged(); + } else { + rSGroupInfoBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + public Builder mergeRSGroupInfo(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo value) { + if (rSGroupInfoBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + rSGroupInfo_ != org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo.getDefaultInstance()) { + rSGroupInfo_ = + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo.newBuilder(rSGroupInfo_).mergeFrom(value).buildPartial(); + } else { + rSGroupInfo_ = value; + } + onChanged(); + } else { + rSGroupInfoBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + public Builder clearRSGroupInfo() { + if (rSGroupInfoBuilder_ == null) { + rSGroupInfo_ = org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo.getDefaultInstance(); + onChanged(); + } else { + rSGroupInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * optional .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo.Builder getRSGroupInfoBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getRSGroupInfoFieldBuilder().getBuilder(); + } + /** + * optional .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfoOrBuilder getRSGroupInfoOrBuilder() { + if (rSGroupInfoBuilder_ != null) { + return rSGroupInfoBuilder_.getMessageOrBuilder(); + } else { + return rSGroupInfo_; + } + } + /** + * optional .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfoOrBuilder> + getRSGroupInfoFieldBuilder() { + if (rSGroupInfoBuilder_ == null) { + rSGroupInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfoOrBuilder>( + rSGroupInfo_, + getParentForChildren(), + isClean()); + rSGroupInfo_ = null; + } + return rSGroupInfoBuilder_; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.GetRSGroupInfoResponse) + } + + static { + defaultInstance = new GetRSGroupInfoResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.GetRSGroupInfoResponse) + } + + public interface GetRSGroupInfoOfTableRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .hbase.pb.TableName table_name = 1; + /** + * required .hbase.pb.TableName table_name = 1; + */ + boolean hasTableName(); + /** + * required .hbase.pb.TableName table_name = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTableName(); + /** + * required .hbase.pb.TableName table_name = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableNameOrBuilder(); + } + /** + * Protobuf type {@code hbase.pb.GetRSGroupInfoOfTableRequest} + */ + public static final class GetRSGroupInfoOfTableRequest extends + com.google.protobuf.GeneratedMessage + implements GetRSGroupInfoOfTableRequestOrBuilder { + // Use GetRSGroupInfoOfTableRequest.newBuilder() to construct. + private GetRSGroupInfoOfTableRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private GetRSGroupInfoOfTableRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final GetRSGroupInfoOfTableRequest defaultInstance; + public static GetRSGroupInfoOfTableRequest getDefaultInstance() { + return defaultInstance; + } + + public GetRSGroupInfoOfTableRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private GetRSGroupInfoOfTableRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = tableName_.toBuilder(); + } + tableName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(tableName_); + tableName_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_GetRSGroupInfoOfTableRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_GetRSGroupInfoOfTableRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequest.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetRSGroupInfoOfTableRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new GetRSGroupInfoOfTableRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .hbase.pb.TableName table_name = 1; + public static final int TABLE_NAME_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName tableName_; + /** + * required .hbase.pb.TableName table_name = 1; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTableName() { + return tableName_; + } + /** + * required .hbase.pb.TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableNameOrBuilder() { + return tableName_; + } + + private void initFields() { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasTableName()) { + memoizedIsInitialized = 0; + return false; + } + if (!getTableName().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, tableName_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, tableName_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequest other = (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequest) obj; + + boolean result = true; + result = result && (hasTableName() == other.hasTableName()); + if (hasTableName()) { + result = result && getTableName() + .equals(other.getTableName()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasTableName()) { + hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER; + hash = (53 * hash) + getTableName().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.GetRSGroupInfoOfTableRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_GetRSGroupInfoOfTableRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_GetRSGroupInfoOfTableRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequest.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableNameFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance(); + } else { + tableNameBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_GetRSGroupInfoOfTableRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequest build() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequest result = new org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (tableNameBuilder_ == null) { + result.tableName_ = tableName_; + } else { + result.tableName_ = tableNameBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequest.getDefaultInstance()) return this; + if (other.hasTableName()) { + mergeTableName(other.getTableName()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasTableName()) { + + return false; + } + if (!getTableName().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .hbase.pb.TableName table_name = 1; + private org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder> tableNameBuilder_; + /** + * required .hbase.pb.TableName table_name = 1; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTableName() { + if (tableNameBuilder_ == null) { + return tableName_; + } else { + return tableNameBuilder_.getMessage(); + } + } + /** + * required .hbase.pb.TableName table_name = 1; + */ + public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + tableName_ = value; + onChanged(); + } else { + tableNameBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.TableName table_name = 1; + */ + public Builder setTableName( + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + tableName_ = builderForValue.build(); + onChanged(); + } else { + tableNameBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.TableName table_name = 1; + */ + public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + tableName_ != org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance()) { + tableName_ = + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial(); + } else { + tableName_ = value; + } + onChanged(); + } else { + tableNameBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.TableName table_name = 1; + */ + public Builder clearTableName() { + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance(); + onChanged(); + } else { + tableNameBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .hbase.pb.TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder getTableNameBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getTableNameFieldBuilder().getBuilder(); + } + /** + * required .hbase.pb.TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableNameOrBuilder() { + if (tableNameBuilder_ != null) { + return tableNameBuilder_.getMessageOrBuilder(); + } else { + return tableName_; + } + } + /** + * required .hbase.pb.TableName table_name = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder> + getTableNameFieldBuilder() { + if (tableNameBuilder_ == null) { + tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder>( + tableName_, + getParentForChildren(), + isClean()); + tableName_ = null; + } + return tableNameBuilder_; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.GetRSGroupInfoOfTableRequest) + } + + static { + defaultInstance = new GetRSGroupInfoOfTableRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.GetRSGroupInfoOfTableRequest) + } + + public interface GetRSGroupInfoOfTableResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional .hbase.pb.RSGroupInfo r_s_group_info = 1; + /** + * optional .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + boolean hasRSGroupInfo(); + /** + * optional .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo getRSGroupInfo(); + /** + * optional .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfoOrBuilder getRSGroupInfoOrBuilder(); + } + /** + * Protobuf type {@code hbase.pb.GetRSGroupInfoOfTableResponse} + */ + public static final class GetRSGroupInfoOfTableResponse extends + com.google.protobuf.GeneratedMessage + implements GetRSGroupInfoOfTableResponseOrBuilder { + // Use GetRSGroupInfoOfTableResponse.newBuilder() to construct. + private GetRSGroupInfoOfTableResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private GetRSGroupInfoOfTableResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final GetRSGroupInfoOfTableResponse defaultInstance; + public static GetRSGroupInfoOfTableResponse getDefaultInstance() { + return defaultInstance; + } + + public GetRSGroupInfoOfTableResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private GetRSGroupInfoOfTableResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = rSGroupInfo_.toBuilder(); + } + rSGroupInfo_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(rSGroupInfo_); + rSGroupInfo_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_GetRSGroupInfoOfTableResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_GetRSGroupInfoOfTableResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetRSGroupInfoOfTableResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new GetRSGroupInfoOfTableResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional .hbase.pb.RSGroupInfo r_s_group_info = 1; + public static final int R_S_GROUP_INFO_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo rSGroupInfo_; + /** + * optional .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + public boolean hasRSGroupInfo() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo getRSGroupInfo() { + return rSGroupInfo_; + } + /** + * optional .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfoOrBuilder getRSGroupInfoOrBuilder() { + return rSGroupInfo_; + } + + private void initFields() { + rSGroupInfo_ = org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (hasRSGroupInfo()) { + if (!getRSGroupInfo().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, rSGroupInfo_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, rSGroupInfo_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse other = (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse) obj; + + boolean result = true; + result = result && (hasRSGroupInfo() == other.hasRSGroupInfo()); + if (hasRSGroupInfo()) { + result = result && getRSGroupInfo() + .equals(other.getRSGroupInfo()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasRSGroupInfo()) { + hash = (37 * hash) + R_S_GROUP_INFO_FIELD_NUMBER; + hash = (53 * hash) + getRSGroupInfo().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.GetRSGroupInfoOfTableResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_GetRSGroupInfoOfTableResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_GetRSGroupInfoOfTableResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getRSGroupInfoFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (rSGroupInfoBuilder_ == null) { + rSGroupInfo_ = org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo.getDefaultInstance(); + } else { + rSGroupInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_GetRSGroupInfoOfTableResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse build() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse result = new org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (rSGroupInfoBuilder_ == null) { + result.rSGroupInfo_ = rSGroupInfo_; + } else { + result.rSGroupInfo_ = rSGroupInfoBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse.getDefaultInstance()) return this; + if (other.hasRSGroupInfo()) { + mergeRSGroupInfo(other.getRSGroupInfo()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (hasRSGroupInfo()) { + if (!getRSGroupInfo().isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional .hbase.pb.RSGroupInfo r_s_group_info = 1; + private org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo rSGroupInfo_ = org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfoOrBuilder> rSGroupInfoBuilder_; + /** + * optional .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + public boolean hasRSGroupInfo() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo getRSGroupInfo() { + if (rSGroupInfoBuilder_ == null) { + return rSGroupInfo_; + } else { + return rSGroupInfoBuilder_.getMessage(); + } + } + /** + * optional .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + public Builder setRSGroupInfo(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo value) { + if (rSGroupInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + rSGroupInfo_ = value; + onChanged(); + } else { + rSGroupInfoBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + public Builder setRSGroupInfo( + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo.Builder builderForValue) { + if (rSGroupInfoBuilder_ == null) { + rSGroupInfo_ = builderForValue.build(); + onChanged(); + } else { + rSGroupInfoBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + public Builder mergeRSGroupInfo(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo value) { + if (rSGroupInfoBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + rSGroupInfo_ != org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo.getDefaultInstance()) { + rSGroupInfo_ = + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo.newBuilder(rSGroupInfo_).mergeFrom(value).buildPartial(); + } else { + rSGroupInfo_ = value; + } + onChanged(); + } else { + rSGroupInfoBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + public Builder clearRSGroupInfo() { + if (rSGroupInfoBuilder_ == null) { + rSGroupInfo_ = org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo.getDefaultInstance(); + onChanged(); + } else { + rSGroupInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * optional .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo.Builder getRSGroupInfoBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getRSGroupInfoFieldBuilder().getBuilder(); + } + /** + * optional .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfoOrBuilder getRSGroupInfoOrBuilder() { + if (rSGroupInfoBuilder_ != null) { + return rSGroupInfoBuilder_.getMessageOrBuilder(); + } else { + return rSGroupInfo_; + } + } + /** + * optional .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfoOrBuilder> + getRSGroupInfoFieldBuilder() { + if (rSGroupInfoBuilder_ == null) { + rSGroupInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfoOrBuilder>( + rSGroupInfo_, + getParentForChildren(), + isClean()); + rSGroupInfo_ = null; + } + return rSGroupInfoBuilder_; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.GetRSGroupInfoOfTableResponse) + } + + static { + defaultInstance = new GetRSGroupInfoOfTableResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.GetRSGroupInfoOfTableResponse) + } + + public interface MoveServersRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string target_group = 1; + /** + * required string target_group = 1; + */ + boolean hasTargetGroup(); + /** + * required string target_group = 1; + */ + java.lang.String getTargetGroup(); + /** + * required string target_group = 1; + */ + com.google.protobuf.ByteString + getTargetGroupBytes(); + + // repeated .hbase.pb.ServerName servers = 3; + /** + * repeated .hbase.pb.ServerName servers = 3; + */ + java.util.List + getServersList(); + /** + * repeated .hbase.pb.ServerName servers = 3; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServers(int index); + /** + * repeated .hbase.pb.ServerName servers = 3; + */ + int getServersCount(); + /** + * repeated .hbase.pb.ServerName servers = 3; + */ + java.util.List + getServersOrBuilderList(); + /** + * repeated .hbase.pb.ServerName servers = 3; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServersOrBuilder( + int index); + } + /** + * Protobuf type {@code hbase.pb.MoveServersRequest} + */ + public static final class MoveServersRequest extends + com.google.protobuf.GeneratedMessage + implements MoveServersRequestOrBuilder { + // Use MoveServersRequest.newBuilder() to construct. + private MoveServersRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private MoveServersRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final MoveServersRequest defaultInstance; + public static MoveServersRequest getDefaultInstance() { + return defaultInstance; + } + + public MoveServersRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private MoveServersRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + targetGroup_ = input.readBytes(); + break; + } + case 26: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + servers_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000002; + } + servers_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + servers_ = java.util.Collections.unmodifiableList(servers_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_MoveServersRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_MoveServersRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersRequest.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public MoveServersRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new MoveServersRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string target_group = 1; + public static final int TARGET_GROUP_FIELD_NUMBER = 1; + private java.lang.Object targetGroup_; + /** + * required string target_group = 1; + */ + public boolean hasTargetGroup() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string target_group = 1; + */ + public java.lang.String getTargetGroup() { + java.lang.Object ref = targetGroup_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + targetGroup_ = s; + } + return s; + } + } + /** + * required string target_group = 1; + */ + public com.google.protobuf.ByteString + getTargetGroupBytes() { + java.lang.Object ref = targetGroup_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + targetGroup_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // repeated .hbase.pb.ServerName servers = 3; + public static final int SERVERS_FIELD_NUMBER = 3; + private java.util.List servers_; + /** + * repeated .hbase.pb.ServerName servers = 3; + */ + public java.util.List getServersList() { + return servers_; + } + /** + * repeated .hbase.pb.ServerName servers = 3; + */ + public java.util.List + getServersOrBuilderList() { + return servers_; + } + /** + * repeated .hbase.pb.ServerName servers = 3; + */ + public int getServersCount() { + return servers_.size(); + } + /** + * repeated .hbase.pb.ServerName servers = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServers(int index) { + return servers_.get(index); + } + /** + * repeated .hbase.pb.ServerName servers = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServersOrBuilder( + int index) { + return servers_.get(index); + } + + private void initFields() { + targetGroup_ = ""; + servers_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasTargetGroup()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getServersCount(); i++) { + if (!getServers(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getTargetGroupBytes()); + } + for (int i = 0; i < servers_.size(); i++) { + output.writeMessage(3, servers_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getTargetGroupBytes()); + } + for (int i = 0; i < servers_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, servers_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersRequest other = (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersRequest) obj; + + boolean result = true; + result = result && (hasTargetGroup() == other.hasTargetGroup()); + if (hasTargetGroup()) { + result = result && getTargetGroup() + .equals(other.getTargetGroup()); + } + result = result && getServersList() + .equals(other.getServersList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasTargetGroup()) { + hash = (37 * hash) + TARGET_GROUP_FIELD_NUMBER; + hash = (53 * hash) + getTargetGroup().hashCode(); + } + if (getServersCount() > 0) { + hash = (37 * hash) + SERVERS_FIELD_NUMBER; + hash = (53 * hash) + getServersList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.MoveServersRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_MoveServersRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_MoveServersRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersRequest.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getServersFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + targetGroup_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + if (serversBuilder_ == null) { + servers_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + } else { + serversBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_MoveServersRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersRequest build() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersRequest result = new org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.targetGroup_ = targetGroup_; + if (serversBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { + servers_ = java.util.Collections.unmodifiableList(servers_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.servers_ = servers_; + } else { + result.servers_ = serversBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersRequest.getDefaultInstance()) return this; + if (other.hasTargetGroup()) { + bitField0_ |= 0x00000001; + targetGroup_ = other.targetGroup_; + onChanged(); + } + if (serversBuilder_ == null) { + if (!other.servers_.isEmpty()) { + if (servers_.isEmpty()) { + servers_ = other.servers_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureServersIsMutable(); + servers_.addAll(other.servers_); + } + onChanged(); + } + } else { + if (!other.servers_.isEmpty()) { + if (serversBuilder_.isEmpty()) { + serversBuilder_.dispose(); + serversBuilder_ = null; + servers_ = other.servers_; + bitField0_ = (bitField0_ & ~0x00000002); + serversBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getServersFieldBuilder() : null; + } else { + serversBuilder_.addAllMessages(other.servers_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasTargetGroup()) { + + return false; + } + for (int i = 0; i < getServersCount(); i++) { + if (!getServers(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string target_group = 1; + private java.lang.Object targetGroup_ = ""; + /** + * required string target_group = 1; + */ + public boolean hasTargetGroup() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string target_group = 1; + */ + public java.lang.String getTargetGroup() { + java.lang.Object ref = targetGroup_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + targetGroup_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string target_group = 1; + */ + public com.google.protobuf.ByteString + getTargetGroupBytes() { + java.lang.Object ref = targetGroup_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + targetGroup_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string target_group = 1; + */ + public Builder setTargetGroup( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + targetGroup_ = value; + onChanged(); + return this; + } + /** + * required string target_group = 1; + */ + public Builder clearTargetGroup() { + bitField0_ = (bitField0_ & ~0x00000001); + targetGroup_ = getDefaultInstance().getTargetGroup(); + onChanged(); + return this; + } + /** + * required string target_group = 1; + */ + public Builder setTargetGroupBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + targetGroup_ = value; + onChanged(); + return this; + } + + // repeated .hbase.pb.ServerName servers = 3; + private java.util.List servers_ = + java.util.Collections.emptyList(); + private void ensureServersIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + servers_ = new java.util.ArrayList(servers_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> serversBuilder_; + + /** + * repeated .hbase.pb.ServerName servers = 3; + */ + public java.util.List getServersList() { + if (serversBuilder_ == null) { + return java.util.Collections.unmodifiableList(servers_); + } else { + return serversBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.ServerName servers = 3; + */ + public int getServersCount() { + if (serversBuilder_ == null) { + return servers_.size(); + } else { + return serversBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.ServerName servers = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServers(int index) { + if (serversBuilder_ == null) { + return servers_.get(index); + } else { + return serversBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.ServerName servers = 3; + */ + public Builder setServers( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { + if (serversBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureServersIsMutable(); + servers_.set(index, value); + onChanged(); + } else { + serversBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.ServerName servers = 3; + */ + public Builder setServers( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { + if (serversBuilder_ == null) { + ensureServersIsMutable(); + servers_.set(index, builderForValue.build()); + onChanged(); + } else { + serversBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.ServerName servers = 3; + */ + public Builder addServers(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { + if (serversBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureServersIsMutable(); + servers_.add(value); + onChanged(); + } else { + serversBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.ServerName servers = 3; + */ + public Builder addServers( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { + if (serversBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureServersIsMutable(); + servers_.add(index, value); + onChanged(); + } else { + serversBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.ServerName servers = 3; + */ + public Builder addServers( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { + if (serversBuilder_ == null) { + ensureServersIsMutable(); + servers_.add(builderForValue.build()); + onChanged(); + } else { + serversBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.ServerName servers = 3; + */ + public Builder addServers( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { + if (serversBuilder_ == null) { + ensureServersIsMutable(); + servers_.add(index, builderForValue.build()); + onChanged(); + } else { + serversBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.ServerName servers = 3; + */ + public Builder addAllServers( + java.lang.Iterable values) { + if (serversBuilder_ == null) { + ensureServersIsMutable(); + super.addAll(values, servers_); + onChanged(); + } else { + serversBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.ServerName servers = 3; + */ + public Builder clearServers() { + if (serversBuilder_ == null) { + servers_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + serversBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.ServerName servers = 3; + */ + public Builder removeServers(int index) { + if (serversBuilder_ == null) { + ensureServersIsMutable(); + servers_.remove(index); + onChanged(); + } else { + serversBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.ServerName servers = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getServersBuilder( + int index) { + return getServersFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.ServerName servers = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServersOrBuilder( + int index) { + if (serversBuilder_ == null) { + return servers_.get(index); } else { + return serversBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.ServerName servers = 3; + */ + public java.util.List + getServersOrBuilderList() { + if (serversBuilder_ != null) { + return serversBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(servers_); + } + } + /** + * repeated .hbase.pb.ServerName servers = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder addServersBuilder() { + return getServersFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()); + } + /** + * repeated .hbase.pb.ServerName servers = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder addServersBuilder( + int index) { + return getServersFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()); + } + /** + * repeated .hbase.pb.ServerName servers = 3; + */ + public java.util.List + getServersBuilderList() { + return getServersFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> + getServersFieldBuilder() { + if (serversBuilder_ == null) { + serversBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>( + servers_, + ((bitField0_ & 0x00000002) == 0x00000002), + getParentForChildren(), + isClean()); + servers_ = null; + } + return serversBuilder_; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.MoveServersRequest) + } + + static { + defaultInstance = new MoveServersRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.MoveServersRequest) + } + + public interface MoveServersResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + /** + * Protobuf type {@code hbase.pb.MoveServersResponse} + */ + public static final class MoveServersResponse extends + com.google.protobuf.GeneratedMessage + implements MoveServersResponseOrBuilder { + // Use MoveServersResponse.newBuilder() to construct. + private MoveServersResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private MoveServersResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final MoveServersResponse defaultInstance; + public static MoveServersResponse getDefaultInstance() { + return defaultInstance; + } + + public MoveServersResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private MoveServersResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_MoveServersResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_MoveServersResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersResponse.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public MoveServersResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new MoveServersResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersResponse other = (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersResponse) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.MoveServersResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_MoveServersResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_MoveServersResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersResponse.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_MoveServersResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersResponse build() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersResponse result = new org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersResponse(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersResponse.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.MoveServersResponse) + } + + static { + defaultInstance = new MoveServersResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.MoveServersResponse) + } + + public interface MoveTablesRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string target_group = 1; + /** + * required string target_group = 1; + */ + boolean hasTargetGroup(); + /** + * required string target_group = 1; + */ + java.lang.String getTargetGroup(); + /** + * required string target_group = 1; + */ + com.google.protobuf.ByteString + getTargetGroupBytes(); + + // repeated .hbase.pb.TableName table_name = 2; + /** + * repeated .hbase.pb.TableName table_name = 2; + */ + java.util.List + getTableNameList(); + /** + * repeated .hbase.pb.TableName table_name = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTableName(int index); + /** + * repeated .hbase.pb.TableName table_name = 2; + */ + int getTableNameCount(); + /** + * repeated .hbase.pb.TableName table_name = 2; + */ + java.util.List + getTableNameOrBuilderList(); + /** + * repeated .hbase.pb.TableName table_name = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableNameOrBuilder( + int index); + } + /** + * Protobuf type {@code hbase.pb.MoveTablesRequest} + */ + public static final class MoveTablesRequest extends + com.google.protobuf.GeneratedMessage + implements MoveTablesRequestOrBuilder { + // Use MoveTablesRequest.newBuilder() to construct. + private MoveTablesRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private MoveTablesRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final MoveTablesRequest defaultInstance; + public static MoveTablesRequest getDefaultInstance() { + return defaultInstance; + } + + public MoveTablesRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private MoveTablesRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + targetGroup_ = input.readBytes(); + break; + } + case 18: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + tableName_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000002; + } + tableName_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + tableName_ = java.util.Collections.unmodifiableList(tableName_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_MoveTablesRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_MoveTablesRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesRequest.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public MoveTablesRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new MoveTablesRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string target_group = 1; + public static final int TARGET_GROUP_FIELD_NUMBER = 1; + private java.lang.Object targetGroup_; + /** + * required string target_group = 1; + */ + public boolean hasTargetGroup() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string target_group = 1; + */ + public java.lang.String getTargetGroup() { + java.lang.Object ref = targetGroup_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + targetGroup_ = s; + } + return s; + } + } + /** + * required string target_group = 1; + */ + public com.google.protobuf.ByteString + getTargetGroupBytes() { + java.lang.Object ref = targetGroup_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + targetGroup_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // repeated .hbase.pb.TableName table_name = 2; + public static final int TABLE_NAME_FIELD_NUMBER = 2; + private java.util.List tableName_; + /** + * repeated .hbase.pb.TableName table_name = 2; + */ + public java.util.List getTableNameList() { + return tableName_; + } + /** + * repeated .hbase.pb.TableName table_name = 2; + */ + public java.util.List + getTableNameOrBuilderList() { + return tableName_; + } + /** + * repeated .hbase.pb.TableName table_name = 2; + */ + public int getTableNameCount() { + return tableName_.size(); + } + /** + * repeated .hbase.pb.TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTableName(int index) { + return tableName_.get(index); + } + /** + * repeated .hbase.pb.TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableNameOrBuilder( + int index) { + return tableName_.get(index); + } + + private void initFields() { + targetGroup_ = ""; + tableName_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasTargetGroup()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getTableNameCount(); i++) { + if (!getTableName(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getTargetGroupBytes()); + } + for (int i = 0; i < tableName_.size(); i++) { + output.writeMessage(2, tableName_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getTargetGroupBytes()); + } + for (int i = 0; i < tableName_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, tableName_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesRequest other = (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesRequest) obj; + + boolean result = true; + result = result && (hasTargetGroup() == other.hasTargetGroup()); + if (hasTargetGroup()) { + result = result && getTargetGroup() + .equals(other.getTargetGroup()); + } + result = result && getTableNameList() + .equals(other.getTableNameList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasTargetGroup()) { + hash = (37 * hash) + TARGET_GROUP_FIELD_NUMBER; + hash = (53 * hash) + getTargetGroup().hashCode(); + } + if (getTableNameCount() > 0) { + hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER; + hash = (53 * hash) + getTableNameList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.MoveTablesRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_MoveTablesRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_MoveTablesRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesRequest.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableNameFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + targetGroup_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + if (tableNameBuilder_ == null) { + tableName_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + } else { + tableNameBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_MoveTablesRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesRequest build() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesRequest result = new org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.targetGroup_ = targetGroup_; + if (tableNameBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { + tableName_ = java.util.Collections.unmodifiableList(tableName_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.tableName_ = tableName_; + } else { + result.tableName_ = tableNameBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesRequest.getDefaultInstance()) return this; + if (other.hasTargetGroup()) { + bitField0_ |= 0x00000001; + targetGroup_ = other.targetGroup_; + onChanged(); + } + if (tableNameBuilder_ == null) { + if (!other.tableName_.isEmpty()) { + if (tableName_.isEmpty()) { + tableName_ = other.tableName_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureTableNameIsMutable(); + tableName_.addAll(other.tableName_); + } + onChanged(); + } + } else { + if (!other.tableName_.isEmpty()) { + if (tableNameBuilder_.isEmpty()) { + tableNameBuilder_.dispose(); + tableNameBuilder_ = null; + tableName_ = other.tableName_; + bitField0_ = (bitField0_ & ~0x00000002); + tableNameBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getTableNameFieldBuilder() : null; + } else { + tableNameBuilder_.addAllMessages(other.tableName_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasTargetGroup()) { + + return false; + } + for (int i = 0; i < getTableNameCount(); i++) { + if (!getTableName(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string target_group = 1; + private java.lang.Object targetGroup_ = ""; + /** + * required string target_group = 1; + */ + public boolean hasTargetGroup() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string target_group = 1; + */ + public java.lang.String getTargetGroup() { + java.lang.Object ref = targetGroup_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + targetGroup_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string target_group = 1; + */ + public com.google.protobuf.ByteString + getTargetGroupBytes() { + java.lang.Object ref = targetGroup_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + targetGroup_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string target_group = 1; + */ + public Builder setTargetGroup( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + targetGroup_ = value; + onChanged(); + return this; + } + /** + * required string target_group = 1; + */ + public Builder clearTargetGroup() { + bitField0_ = (bitField0_ & ~0x00000001); + targetGroup_ = getDefaultInstance().getTargetGroup(); + onChanged(); + return this; + } + /** + * required string target_group = 1; + */ + public Builder setTargetGroupBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + targetGroup_ = value; + onChanged(); + return this; + } + + // repeated .hbase.pb.TableName table_name = 2; + private java.util.List tableName_ = + java.util.Collections.emptyList(); + private void ensureTableNameIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + tableName_ = new java.util.ArrayList(tableName_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder> tableNameBuilder_; + + /** + * repeated .hbase.pb.TableName table_name = 2; + */ + public java.util.List getTableNameList() { + if (tableNameBuilder_ == null) { + return java.util.Collections.unmodifiableList(tableName_); + } else { + return tableNameBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.TableName table_name = 2; + */ + public int getTableNameCount() { + if (tableNameBuilder_ == null) { + return tableName_.size(); + } else { + return tableNameBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTableName(int index) { + if (tableNameBuilder_ == null) { + return tableName_.get(index); + } else { + return tableNameBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.TableName table_name = 2; + */ + public Builder setTableName( + int index, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableNameIsMutable(); + tableName_.set(index, value); + onChanged(); + } else { + tableNameBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_name = 2; + */ + public Builder setTableName( + int index, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + ensureTableNameIsMutable(); + tableName_.set(index, builderForValue.build()); + onChanged(); + } else { + tableNameBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_name = 2; + */ + public Builder addTableName(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableNameIsMutable(); + tableName_.add(value); + onChanged(); + } else { + tableNameBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_name = 2; + */ + public Builder addTableName( + int index, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableNameIsMutable(); + tableName_.add(index, value); + onChanged(); + } else { + tableNameBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_name = 2; + */ + public Builder addTableName( + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + ensureTableNameIsMutable(); + tableName_.add(builderForValue.build()); + onChanged(); + } else { + tableNameBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_name = 2; + */ + public Builder addTableName( + int index, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + ensureTableNameIsMutable(); + tableName_.add(index, builderForValue.build()); + onChanged(); + } else { + tableNameBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_name = 2; + */ + public Builder addAllTableName( + java.lang.Iterable values) { + if (tableNameBuilder_ == null) { + ensureTableNameIsMutable(); + super.addAll(values, tableName_); + onChanged(); + } else { + tableNameBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_name = 2; + */ + public Builder clearTableName() { + if (tableNameBuilder_ == null) { + tableName_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + tableNameBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_name = 2; + */ + public Builder removeTableName(int index) { + if (tableNameBuilder_ == null) { + ensureTableNameIsMutable(); + tableName_.remove(index); + onChanged(); + } else { + tableNameBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder getTableNameBuilder( + int index) { + return getTableNameFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableNameOrBuilder( + int index) { + if (tableNameBuilder_ == null) { + return tableName_.get(index); } else { + return tableNameBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.TableName table_name = 2; + */ + public java.util.List + getTableNameOrBuilderList() { + if (tableNameBuilder_ != null) { + return tableNameBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(tableName_); + } + } + /** + * repeated .hbase.pb.TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder addTableNameBuilder() { + return getTableNameFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance()); + } + /** + * repeated .hbase.pb.TableName table_name = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder addTableNameBuilder( + int index) { + return getTableNameFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance()); + } + /** + * repeated .hbase.pb.TableName table_name = 2; + */ + public java.util.List + getTableNameBuilderList() { + return getTableNameFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder> + getTableNameFieldBuilder() { + if (tableNameBuilder_ == null) { + tableNameBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder>( + tableName_, + ((bitField0_ & 0x00000002) == 0x00000002), + getParentForChildren(), + isClean()); + tableName_ = null; + } + return tableNameBuilder_; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.MoveTablesRequest) + } + + static { + defaultInstance = new MoveTablesRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.MoveTablesRequest) + } + + public interface MoveTablesResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + /** + * Protobuf type {@code hbase.pb.MoveTablesResponse} + */ + public static final class MoveTablesResponse extends + com.google.protobuf.GeneratedMessage + implements MoveTablesResponseOrBuilder { + // Use MoveTablesResponse.newBuilder() to construct. + private MoveTablesResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private MoveTablesResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final MoveTablesResponse defaultInstance; + public static MoveTablesResponse getDefaultInstance() { + return defaultInstance; + } + + public MoveTablesResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private MoveTablesResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_MoveTablesResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_MoveTablesResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesResponse.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public MoveTablesResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new MoveTablesResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesResponse other = (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesResponse) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.MoveTablesResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_MoveTablesResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_MoveTablesResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesResponse.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_MoveTablesResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesResponse build() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesResponse result = new org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesResponse(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesResponse.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.MoveTablesResponse) + } + + static { + defaultInstance = new MoveTablesResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.MoveTablesResponse) + } + + public interface AddRSGroupRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string r_s_group_name = 1; + /** + * required string r_s_group_name = 1; + */ + boolean hasRSGroupName(); + /** + * required string r_s_group_name = 1; + */ + java.lang.String getRSGroupName(); + /** + * required string r_s_group_name = 1; + */ + com.google.protobuf.ByteString + getRSGroupNameBytes(); + } + /** + * Protobuf type {@code hbase.pb.AddRSGroupRequest} + */ + public static final class AddRSGroupRequest extends + com.google.protobuf.GeneratedMessage + implements AddRSGroupRequestOrBuilder { + // Use AddRSGroupRequest.newBuilder() to construct. + private AddRSGroupRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private AddRSGroupRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final AddRSGroupRequest defaultInstance; + public static AddRSGroupRequest getDefaultInstance() { + return defaultInstance; + } + + public AddRSGroupRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private AddRSGroupRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + rSGroupName_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_AddRSGroupRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_AddRSGroupRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public AddRSGroupRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new AddRSGroupRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string r_s_group_name = 1; + public static final int R_S_GROUP_NAME_FIELD_NUMBER = 1; + private java.lang.Object rSGroupName_; + /** + * required string r_s_group_name = 1; + */ + public boolean hasRSGroupName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string r_s_group_name = 1; + */ + public java.lang.String getRSGroupName() { + java.lang.Object ref = rSGroupName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + rSGroupName_ = s; + } + return s; + } + } + /** + * required string r_s_group_name = 1; + */ + public com.google.protobuf.ByteString + getRSGroupNameBytes() { + java.lang.Object ref = rSGroupName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + rSGroupName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + rSGroupName_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasRSGroupName()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getRSGroupNameBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getRSGroupNameBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest other = (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest) obj; + + boolean result = true; + result = result && (hasRSGroupName() == other.hasRSGroupName()); + if (hasRSGroupName()) { + result = result && getRSGroupName() + .equals(other.getRSGroupName()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasRSGroupName()) { + hash = (37 * hash) + R_S_GROUP_NAME_FIELD_NUMBER; + hash = (53 * hash) + getRSGroupName().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.AddRSGroupRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_AddRSGroupRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_AddRSGroupRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + rSGroupName_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_AddRSGroupRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest build() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest result = new org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.rSGroupName_ = rSGroupName_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest.getDefaultInstance()) return this; + if (other.hasRSGroupName()) { + bitField0_ |= 0x00000001; + rSGroupName_ = other.rSGroupName_; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasRSGroupName()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string r_s_group_name = 1; + private java.lang.Object rSGroupName_ = ""; + /** + * required string r_s_group_name = 1; + */ + public boolean hasRSGroupName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string r_s_group_name = 1; + */ + public java.lang.String getRSGroupName() { + java.lang.Object ref = rSGroupName_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + rSGroupName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string r_s_group_name = 1; + */ + public com.google.protobuf.ByteString + getRSGroupNameBytes() { + java.lang.Object ref = rSGroupName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + rSGroupName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string r_s_group_name = 1; + */ + public Builder setRSGroupName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + rSGroupName_ = value; + onChanged(); + return this; + } + /** + * required string r_s_group_name = 1; + */ + public Builder clearRSGroupName() { + bitField0_ = (bitField0_ & ~0x00000001); + rSGroupName_ = getDefaultInstance().getRSGroupName(); + onChanged(); + return this; + } + /** + * required string r_s_group_name = 1; + */ + public Builder setRSGroupNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + rSGroupName_ = value; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.AddRSGroupRequest) + } + + static { + defaultInstance = new AddRSGroupRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.AddRSGroupRequest) + } + + public interface AddRSGroupResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + /** + * Protobuf type {@code hbase.pb.AddRSGroupResponse} + */ + public static final class AddRSGroupResponse extends + com.google.protobuf.GeneratedMessage + implements AddRSGroupResponseOrBuilder { + // Use AddRSGroupResponse.newBuilder() to construct. + private AddRSGroupResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private AddRSGroupResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final AddRSGroupResponse defaultInstance; + public static AddRSGroupResponse getDefaultInstance() { + return defaultInstance; + } + + public AddRSGroupResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private AddRSGroupResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_AddRSGroupResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_AddRSGroupResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupResponse.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public AddRSGroupResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new AddRSGroupResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupResponse other = (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupResponse) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.AddRSGroupResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_AddRSGroupResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_AddRSGroupResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupResponse.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_AddRSGroupResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupResponse build() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupResponse result = new org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupResponse(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupResponse.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.AddRSGroupResponse) + } + + static { + defaultInstance = new AddRSGroupResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.AddRSGroupResponse) + } + + public interface RemoveRSGroupRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string r_s_group_name = 1; + /** + * required string r_s_group_name = 1; + */ + boolean hasRSGroupName(); + /** + * required string r_s_group_name = 1; + */ + java.lang.String getRSGroupName(); + /** + * required string r_s_group_name = 1; + */ + com.google.protobuf.ByteString + getRSGroupNameBytes(); + } + /** + * Protobuf type {@code hbase.pb.RemoveRSGroupRequest} + */ + public static final class RemoveRSGroupRequest extends + com.google.protobuf.GeneratedMessage + implements RemoveRSGroupRequestOrBuilder { + // Use RemoveRSGroupRequest.newBuilder() to construct. + private RemoveRSGroupRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private RemoveRSGroupRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final RemoveRSGroupRequest defaultInstance; + public static RemoveRSGroupRequest getDefaultInstance() { + return defaultInstance; + } + + public RemoveRSGroupRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private RemoveRSGroupRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + rSGroupName_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_RemoveRSGroupRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_RemoveRSGroupRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public RemoveRSGroupRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new RemoveRSGroupRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string r_s_group_name = 1; + public static final int R_S_GROUP_NAME_FIELD_NUMBER = 1; + private java.lang.Object rSGroupName_; + /** + * required string r_s_group_name = 1; + */ + public boolean hasRSGroupName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string r_s_group_name = 1; + */ + public java.lang.String getRSGroupName() { + java.lang.Object ref = rSGroupName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + rSGroupName_ = s; + } + return s; + } + } + /** + * required string r_s_group_name = 1; + */ + public com.google.protobuf.ByteString + getRSGroupNameBytes() { + java.lang.Object ref = rSGroupName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + rSGroupName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + rSGroupName_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasRSGroupName()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getRSGroupNameBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getRSGroupNameBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest other = (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest) obj; + + boolean result = true; + result = result && (hasRSGroupName() == other.hasRSGroupName()); + if (hasRSGroupName()) { + result = result && getRSGroupName() + .equals(other.getRSGroupName()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasRSGroupName()) { + hash = (37 * hash) + R_S_GROUP_NAME_FIELD_NUMBER; + hash = (53 * hash) + getRSGroupName().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.RemoveRSGroupRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_RemoveRSGroupRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_RemoveRSGroupRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + rSGroupName_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_RemoveRSGroupRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest build() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest result = new org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.rSGroupName_ = rSGroupName_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest.getDefaultInstance()) return this; + if (other.hasRSGroupName()) { + bitField0_ |= 0x00000001; + rSGroupName_ = other.rSGroupName_; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasRSGroupName()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string r_s_group_name = 1; + private java.lang.Object rSGroupName_ = ""; + /** + * required string r_s_group_name = 1; + */ + public boolean hasRSGroupName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string r_s_group_name = 1; + */ + public java.lang.String getRSGroupName() { + java.lang.Object ref = rSGroupName_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + rSGroupName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string r_s_group_name = 1; + */ + public com.google.protobuf.ByteString + getRSGroupNameBytes() { + java.lang.Object ref = rSGroupName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + rSGroupName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string r_s_group_name = 1; + */ + public Builder setRSGroupName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + rSGroupName_ = value; + onChanged(); + return this; + } + /** + * required string r_s_group_name = 1; + */ + public Builder clearRSGroupName() { + bitField0_ = (bitField0_ & ~0x00000001); + rSGroupName_ = getDefaultInstance().getRSGroupName(); + onChanged(); + return this; + } + /** + * required string r_s_group_name = 1; + */ + public Builder setRSGroupNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + rSGroupName_ = value; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.RemoveRSGroupRequest) + } + + static { + defaultInstance = new RemoveRSGroupRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.RemoveRSGroupRequest) + } + + public interface RemoveRSGroupResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + /** + * Protobuf type {@code hbase.pb.RemoveRSGroupResponse} + */ + public static final class RemoveRSGroupResponse extends + com.google.protobuf.GeneratedMessage + implements RemoveRSGroupResponseOrBuilder { + // Use RemoveRSGroupResponse.newBuilder() to construct. + private RemoveRSGroupResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private RemoveRSGroupResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final RemoveRSGroupResponse defaultInstance; + public static RemoveRSGroupResponse getDefaultInstance() { + return defaultInstance; + } + + public RemoveRSGroupResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private RemoveRSGroupResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_RemoveRSGroupResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_RemoveRSGroupResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public RemoveRSGroupResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new RemoveRSGroupResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse other = (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.RemoveRSGroupResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_RemoveRSGroupResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_RemoveRSGroupResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_RemoveRSGroupResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse build() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse result = new org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.RemoveRSGroupResponse) + } + + static { + defaultInstance = new RemoveRSGroupResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.RemoveRSGroupResponse) + } + + public interface BalanceRSGroupRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string r_s_group_name = 1; + /** + * required string r_s_group_name = 1; + */ + boolean hasRSGroupName(); + /** + * required string r_s_group_name = 1; + */ + java.lang.String getRSGroupName(); + /** + * required string r_s_group_name = 1; + */ + com.google.protobuf.ByteString + getRSGroupNameBytes(); + } + /** + * Protobuf type {@code hbase.pb.BalanceRSGroupRequest} + */ + public static final class BalanceRSGroupRequest extends + com.google.protobuf.GeneratedMessage + implements BalanceRSGroupRequestOrBuilder { + // Use BalanceRSGroupRequest.newBuilder() to construct. + private BalanceRSGroupRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private BalanceRSGroupRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final BalanceRSGroupRequest defaultInstance; + public static BalanceRSGroupRequest getDefaultInstance() { + return defaultInstance; + } + + public BalanceRSGroupRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private BalanceRSGroupRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + rSGroupName_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_BalanceRSGroupRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_BalanceRSGroupRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public BalanceRSGroupRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new BalanceRSGroupRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string r_s_group_name = 1; + public static final int R_S_GROUP_NAME_FIELD_NUMBER = 1; + private java.lang.Object rSGroupName_; + /** + * required string r_s_group_name = 1; + */ + public boolean hasRSGroupName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string r_s_group_name = 1; + */ + public java.lang.String getRSGroupName() { + java.lang.Object ref = rSGroupName_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + rSGroupName_ = s; + } + return s; + } + } + /** + * required string r_s_group_name = 1; + */ + public com.google.protobuf.ByteString + getRSGroupNameBytes() { + java.lang.Object ref = rSGroupName_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + rSGroupName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + rSGroupName_ = ""; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasRSGroupName()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getRSGroupNameBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getRSGroupNameBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest other = (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest) obj; + + boolean result = true; + result = result && (hasRSGroupName() == other.hasRSGroupName()); + if (hasRSGroupName()) { + result = result && getRSGroupName() + .equals(other.getRSGroupName()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasRSGroupName()) { + hash = (37 * hash) + R_S_GROUP_NAME_FIELD_NUMBER; + hash = (53 * hash) + getRSGroupName().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.BalanceRSGroupRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_BalanceRSGroupRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_BalanceRSGroupRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + rSGroupName_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_BalanceRSGroupRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest build() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest result = new org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.rSGroupName_ = rSGroupName_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest.getDefaultInstance()) return this; + if (other.hasRSGroupName()) { + bitField0_ |= 0x00000001; + rSGroupName_ = other.rSGroupName_; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasRSGroupName()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string r_s_group_name = 1; + private java.lang.Object rSGroupName_ = ""; + /** + * required string r_s_group_name = 1; + */ + public boolean hasRSGroupName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string r_s_group_name = 1; + */ + public java.lang.String getRSGroupName() { + java.lang.Object ref = rSGroupName_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + rSGroupName_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string r_s_group_name = 1; + */ + public com.google.protobuf.ByteString + getRSGroupNameBytes() { + java.lang.Object ref = rSGroupName_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + rSGroupName_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string r_s_group_name = 1; + */ + public Builder setRSGroupName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + rSGroupName_ = value; + onChanged(); + return this; + } + /** + * required string r_s_group_name = 1; + */ + public Builder clearRSGroupName() { + bitField0_ = (bitField0_ & ~0x00000001); + rSGroupName_ = getDefaultInstance().getRSGroupName(); + onChanged(); + return this; + } + /** + * required string r_s_group_name = 1; + */ + public Builder setRSGroupNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + rSGroupName_ = value; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.BalanceRSGroupRequest) + } + + static { + defaultInstance = new BalanceRSGroupRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.BalanceRSGroupRequest) + } + + public interface BalanceRSGroupResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required bool balanceRan = 1; + /** + * required bool balanceRan = 1; + */ + boolean hasBalanceRan(); + /** + * required bool balanceRan = 1; + */ + boolean getBalanceRan(); + } + /** + * Protobuf type {@code hbase.pb.BalanceRSGroupResponse} + */ + public static final class BalanceRSGroupResponse extends + com.google.protobuf.GeneratedMessage + implements BalanceRSGroupResponseOrBuilder { + // Use BalanceRSGroupResponse.newBuilder() to construct. + private BalanceRSGroupResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private BalanceRSGroupResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final BalanceRSGroupResponse defaultInstance; + public static BalanceRSGroupResponse getDefaultInstance() { + return defaultInstance; + } + + public BalanceRSGroupResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private BalanceRSGroupResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + balanceRan_ = input.readBool(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_BalanceRSGroupResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_BalanceRSGroupResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponse.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public BalanceRSGroupResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new BalanceRSGroupResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required bool balanceRan = 1; + public static final int BALANCERAN_FIELD_NUMBER = 1; + private boolean balanceRan_; + /** + * required bool balanceRan = 1; + */ + public boolean hasBalanceRan() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bool balanceRan = 1; + */ + public boolean getBalanceRan() { + return balanceRan_; + } + + private void initFields() { + balanceRan_ = false; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasBalanceRan()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBool(1, balanceRan_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(1, balanceRan_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponse other = (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponse) obj; + + boolean result = true; + result = result && (hasBalanceRan() == other.hasBalanceRan()); + if (hasBalanceRan()) { + result = result && (getBalanceRan() + == other.getBalanceRan()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasBalanceRan()) { + hash = (37 * hash) + BALANCERAN_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getBalanceRan()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.BalanceRSGroupResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_BalanceRSGroupResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_BalanceRSGroupResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponse.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + balanceRan_ = false; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_BalanceRSGroupResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponse build() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponse result = new org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.balanceRan_ = balanceRan_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponse.getDefaultInstance()) return this; + if (other.hasBalanceRan()) { + setBalanceRan(other.getBalanceRan()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasBalanceRan()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required bool balanceRan = 1; + private boolean balanceRan_ ; + /** + * required bool balanceRan = 1; + */ + public boolean hasBalanceRan() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bool balanceRan = 1; + */ + public boolean getBalanceRan() { + return balanceRan_; + } + /** + * required bool balanceRan = 1; + */ + public Builder setBalanceRan(boolean value) { + bitField0_ |= 0x00000001; + balanceRan_ = value; + onChanged(); + return this; + } + /** + * required bool balanceRan = 1; + */ + public Builder clearBalanceRan() { + bitField0_ = (bitField0_ & ~0x00000001); + balanceRan_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.BalanceRSGroupResponse) + } + + static { + defaultInstance = new BalanceRSGroupResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.BalanceRSGroupResponse) + } + + public interface ListRSGroupInfosRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + /** + * Protobuf type {@code hbase.pb.ListRSGroupInfosRequest} + */ + public static final class ListRSGroupInfosRequest extends + com.google.protobuf.GeneratedMessage + implements ListRSGroupInfosRequestOrBuilder { + // Use ListRSGroupInfosRequest.newBuilder() to construct. + private ListRSGroupInfosRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private ListRSGroupInfosRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final ListRSGroupInfosRequest defaultInstance; + public static ListRSGroupInfosRequest getDefaultInstance() { + return defaultInstance; + } + + public ListRSGroupInfosRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ListRSGroupInfosRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_ListRSGroupInfosRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_ListRSGroupInfosRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ListRSGroupInfosRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ListRSGroupInfosRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest other = (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.ListRSGroupInfosRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_ListRSGroupInfosRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_ListRSGroupInfosRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_ListRSGroupInfosRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest build() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest result = new org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.ListRSGroupInfosRequest) + } + + static { + defaultInstance = new ListRSGroupInfosRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.ListRSGroupInfosRequest) + } + + public interface ListRSGroupInfosResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // repeated .hbase.pb.RSGroupInfo r_s_group_info = 1; + /** + * repeated .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + java.util.List + getRSGroupInfoList(); + /** + * repeated .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo getRSGroupInfo(int index); + /** + * repeated .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + int getRSGroupInfoCount(); + /** + * repeated .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + java.util.List + getRSGroupInfoOrBuilderList(); + /** + * repeated .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfoOrBuilder getRSGroupInfoOrBuilder( + int index); + } + /** + * Protobuf type {@code hbase.pb.ListRSGroupInfosResponse} + */ + public static final class ListRSGroupInfosResponse extends + com.google.protobuf.GeneratedMessage + implements ListRSGroupInfosResponseOrBuilder { + // Use ListRSGroupInfosResponse.newBuilder() to construct. + private ListRSGroupInfosResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private ListRSGroupInfosResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final ListRSGroupInfosResponse defaultInstance; + public static ListRSGroupInfosResponse getDefaultInstance() { + return defaultInstance; + } + + public ListRSGroupInfosResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ListRSGroupInfosResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + rSGroupInfo_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + rSGroupInfo_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + rSGroupInfo_ = java.util.Collections.unmodifiableList(rSGroupInfo_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_ListRSGroupInfosResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_ListRSGroupInfosResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ListRSGroupInfosResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ListRSGroupInfosResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + // repeated .hbase.pb.RSGroupInfo r_s_group_info = 1; + public static final int R_S_GROUP_INFO_FIELD_NUMBER = 1; + private java.util.List rSGroupInfo_; + /** + * repeated .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + public java.util.List getRSGroupInfoList() { + return rSGroupInfo_; + } + /** + * repeated .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + public java.util.List + getRSGroupInfoOrBuilderList() { + return rSGroupInfo_; + } + /** + * repeated .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + public int getRSGroupInfoCount() { + return rSGroupInfo_.size(); + } + /** + * repeated .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo getRSGroupInfo(int index) { + return rSGroupInfo_.get(index); + } + /** + * repeated .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfoOrBuilder getRSGroupInfoOrBuilder( + int index) { + return rSGroupInfo_.get(index); + } + + private void initFields() { + rSGroupInfo_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + for (int i = 0; i < getRSGroupInfoCount(); i++) { + if (!getRSGroupInfo(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + for (int i = 0; i < rSGroupInfo_.size(); i++) { + output.writeMessage(1, rSGroupInfo_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < rSGroupInfo_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, rSGroupInfo_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse other = (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse) obj; + + boolean result = true; + result = result && getRSGroupInfoList() + .equals(other.getRSGroupInfoList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (getRSGroupInfoCount() > 0) { + hash = (37 * hash) + R_S_GROUP_INFO_FIELD_NUMBER; + hash = (53 * hash) + getRSGroupInfoList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.ListRSGroupInfosResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_ListRSGroupInfosResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_ListRSGroupInfosResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getRSGroupInfoFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (rSGroupInfoBuilder_ == null) { + rSGroupInfo_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + rSGroupInfoBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_ListRSGroupInfosResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse build() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse result = new org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse(this); + int from_bitField0_ = bitField0_; + if (rSGroupInfoBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + rSGroupInfo_ = java.util.Collections.unmodifiableList(rSGroupInfo_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.rSGroupInfo_ = rSGroupInfo_; + } else { + result.rSGroupInfo_ = rSGroupInfoBuilder_.build(); + } + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse.getDefaultInstance()) return this; + if (rSGroupInfoBuilder_ == null) { + if (!other.rSGroupInfo_.isEmpty()) { + if (rSGroupInfo_.isEmpty()) { + rSGroupInfo_ = other.rSGroupInfo_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureRSGroupInfoIsMutable(); + rSGroupInfo_.addAll(other.rSGroupInfo_); + } + onChanged(); + } + } else { + if (!other.rSGroupInfo_.isEmpty()) { + if (rSGroupInfoBuilder_.isEmpty()) { + rSGroupInfoBuilder_.dispose(); + rSGroupInfoBuilder_ = null; + rSGroupInfo_ = other.rSGroupInfo_; + bitField0_ = (bitField0_ & ~0x00000001); + rSGroupInfoBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getRSGroupInfoFieldBuilder() : null; + } else { + rSGroupInfoBuilder_.addAllMessages(other.rSGroupInfo_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + for (int i = 0; i < getRSGroupInfoCount(); i++) { + if (!getRSGroupInfo(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // repeated .hbase.pb.RSGroupInfo r_s_group_info = 1; + private java.util.List rSGroupInfo_ = + java.util.Collections.emptyList(); + private void ensureRSGroupInfoIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + rSGroupInfo_ = new java.util.ArrayList(rSGroupInfo_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfoOrBuilder> rSGroupInfoBuilder_; + + /** + * repeated .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + public java.util.List getRSGroupInfoList() { + if (rSGroupInfoBuilder_ == null) { + return java.util.Collections.unmodifiableList(rSGroupInfo_); + } else { + return rSGroupInfoBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + public int getRSGroupInfoCount() { + if (rSGroupInfoBuilder_ == null) { + return rSGroupInfo_.size(); + } else { + return rSGroupInfoBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo getRSGroupInfo(int index) { + if (rSGroupInfoBuilder_ == null) { + return rSGroupInfo_.get(index); + } else { + return rSGroupInfoBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + public Builder setRSGroupInfo( + int index, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo value) { + if (rSGroupInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRSGroupInfoIsMutable(); + rSGroupInfo_.set(index, value); + onChanged(); + } else { + rSGroupInfoBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + public Builder setRSGroupInfo( + int index, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo.Builder builderForValue) { + if (rSGroupInfoBuilder_ == null) { + ensureRSGroupInfoIsMutable(); + rSGroupInfo_.set(index, builderForValue.build()); + onChanged(); + } else { + rSGroupInfoBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + public Builder addRSGroupInfo(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo value) { + if (rSGroupInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRSGroupInfoIsMutable(); + rSGroupInfo_.add(value); + onChanged(); + } else { + rSGroupInfoBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + public Builder addRSGroupInfo( + int index, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo value) { + if (rSGroupInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRSGroupInfoIsMutable(); + rSGroupInfo_.add(index, value); + onChanged(); + } else { + rSGroupInfoBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + public Builder addRSGroupInfo( + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo.Builder builderForValue) { + if (rSGroupInfoBuilder_ == null) { + ensureRSGroupInfoIsMutable(); + rSGroupInfo_.add(builderForValue.build()); + onChanged(); + } else { + rSGroupInfoBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + public Builder addRSGroupInfo( + int index, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo.Builder builderForValue) { + if (rSGroupInfoBuilder_ == null) { + ensureRSGroupInfoIsMutable(); + rSGroupInfo_.add(index, builderForValue.build()); + onChanged(); + } else { + rSGroupInfoBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + public Builder addAllRSGroupInfo( + java.lang.Iterable values) { + if (rSGroupInfoBuilder_ == null) { + ensureRSGroupInfoIsMutable(); + super.addAll(values, rSGroupInfo_); + onChanged(); + } else { + rSGroupInfoBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + public Builder clearRSGroupInfo() { + if (rSGroupInfoBuilder_ == null) { + rSGroupInfo_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + rSGroupInfoBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + public Builder removeRSGroupInfo(int index) { + if (rSGroupInfoBuilder_ == null) { + ensureRSGroupInfoIsMutable(); + rSGroupInfo_.remove(index); + onChanged(); + } else { + rSGroupInfoBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo.Builder getRSGroupInfoBuilder( + int index) { + return getRSGroupInfoFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfoOrBuilder getRSGroupInfoOrBuilder( + int index) { + if (rSGroupInfoBuilder_ == null) { + return rSGroupInfo_.get(index); } else { + return rSGroupInfoBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + public java.util.List + getRSGroupInfoOrBuilderList() { + if (rSGroupInfoBuilder_ != null) { + return rSGroupInfoBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(rSGroupInfo_); + } + } + /** + * repeated .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo.Builder addRSGroupInfoBuilder() { + return getRSGroupInfoFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo.getDefaultInstance()); + } + /** + * repeated .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo.Builder addRSGroupInfoBuilder( + int index) { + return getRSGroupInfoFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo.getDefaultInstance()); + } + /** + * repeated .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + public java.util.List + getRSGroupInfoBuilderList() { + return getRSGroupInfoFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfoOrBuilder> + getRSGroupInfoFieldBuilder() { + if (rSGroupInfoBuilder_ == null) { + rSGroupInfoBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfoOrBuilder>( + rSGroupInfo_, + ((bitField0_ & 0x00000001) == 0x00000001), + getParentForChildren(), + isClean()); + rSGroupInfo_ = null; + } + return rSGroupInfoBuilder_; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.ListRSGroupInfosResponse) + } + + static { + defaultInstance = new ListRSGroupInfosResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.ListRSGroupInfosResponse) + } + + public interface GetRSGroupInfoOfServerRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .hbase.pb.ServerName server = 2; + /** + * required .hbase.pb.ServerName server = 2; + */ + boolean hasServer(); + /** + * required .hbase.pb.ServerName server = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServer(); + /** + * required .hbase.pb.ServerName server = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerOrBuilder(); + } + /** + * Protobuf type {@code hbase.pb.GetRSGroupInfoOfServerRequest} + */ + public static final class GetRSGroupInfoOfServerRequest extends + com.google.protobuf.GeneratedMessage + implements GetRSGroupInfoOfServerRequestOrBuilder { + // Use GetRSGroupInfoOfServerRequest.newBuilder() to construct. + private GetRSGroupInfoOfServerRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private GetRSGroupInfoOfServerRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final GetRSGroupInfoOfServerRequest defaultInstance; + public static GetRSGroupInfoOfServerRequest getDefaultInstance() { + return defaultInstance; + } + + public GetRSGroupInfoOfServerRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private GetRSGroupInfoOfServerRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 18: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = server_.toBuilder(); + } + server_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(server_); + server_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_GetRSGroupInfoOfServerRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_GetRSGroupInfoOfServerRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetRSGroupInfoOfServerRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new GetRSGroupInfoOfServerRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .hbase.pb.ServerName server = 2; + public static final int SERVER_FIELD_NUMBER = 2; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName server_; + /** + * required .hbase.pb.ServerName server = 2; + */ + public boolean hasServer() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.ServerName server = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServer() { + return server_; + } + /** + * required .hbase.pb.ServerName server = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerOrBuilder() { + return server_; + } + + private void initFields() { + server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasServer()) { + memoizedIsInitialized = 0; + return false; + } + if (!getServer().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(2, server_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, server_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest other = (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest) obj; + + boolean result = true; + result = result && (hasServer() == other.hasServer()); + if (hasServer()) { + result = result && getServer() + .equals(other.getServer()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasServer()) { + hash = (37 * hash) + SERVER_FIELD_NUMBER; + hash = (53 * hash) + getServer().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.GetRSGroupInfoOfServerRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_GetRSGroupInfoOfServerRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_GetRSGroupInfoOfServerRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getServerFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (serverBuilder_ == null) { + server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); + } else { + serverBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_GetRSGroupInfoOfServerRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest build() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest result = new org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (serverBuilder_ == null) { + result.server_ = server_; + } else { + result.server_ = serverBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest.getDefaultInstance()) return this; + if (other.hasServer()) { + mergeServer(other.getServer()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasServer()) { + + return false; + } + if (!getServer().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .hbase.pb.ServerName server = 2; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> serverBuilder_; + /** + * required .hbase.pb.ServerName server = 2; + */ + public boolean hasServer() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .hbase.pb.ServerName server = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServer() { + if (serverBuilder_ == null) { + return server_; + } else { + return serverBuilder_.getMessage(); + } + } + /** + * required .hbase.pb.ServerName server = 2; + */ + public Builder setServer(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { + if (serverBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + server_ = value; + onChanged(); + } else { + serverBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.ServerName server = 2; + */ + public Builder setServer( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { + if (serverBuilder_ == null) { + server_ = builderForValue.build(); + onChanged(); + } else { + serverBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.ServerName server = 2; + */ + public Builder mergeServer(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { + if (serverBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + server_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()) { + server_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder(server_).mergeFrom(value).buildPartial(); + } else { + server_ = value; + } + onChanged(); + } else { + serverBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .hbase.pb.ServerName server = 2; + */ + public Builder clearServer() { + if (serverBuilder_ == null) { + server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance(); + onChanged(); + } else { + serverBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .hbase.pb.ServerName server = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getServerBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getServerFieldBuilder().getBuilder(); + } + /** + * required .hbase.pb.ServerName server = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerOrBuilder() { + if (serverBuilder_ != null) { + return serverBuilder_.getMessageOrBuilder(); + } else { + return server_; + } + } + /** + * required .hbase.pb.ServerName server = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> + getServerFieldBuilder() { + if (serverBuilder_ == null) { + serverBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>( + server_, + getParentForChildren(), + isClean()); + server_ = null; + } + return serverBuilder_; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.GetRSGroupInfoOfServerRequest) + } + + static { + defaultInstance = new GetRSGroupInfoOfServerRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.GetRSGroupInfoOfServerRequest) + } + + public interface GetRSGroupInfoOfServerResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // optional .hbase.pb.RSGroupInfo r_s_group_info = 1; + /** + * optional .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + boolean hasRSGroupInfo(); + /** + * optional .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo getRSGroupInfo(); + /** + * optional .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfoOrBuilder getRSGroupInfoOrBuilder(); + } + /** + * Protobuf type {@code hbase.pb.GetRSGroupInfoOfServerResponse} + */ + public static final class GetRSGroupInfoOfServerResponse extends + com.google.protobuf.GeneratedMessage + implements GetRSGroupInfoOfServerResponseOrBuilder { + // Use GetRSGroupInfoOfServerResponse.newBuilder() to construct. + private GetRSGroupInfoOfServerResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private GetRSGroupInfoOfServerResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final GetRSGroupInfoOfServerResponse defaultInstance; + public static GetRSGroupInfoOfServerResponse getDefaultInstance() { + return defaultInstance; + } + + public GetRSGroupInfoOfServerResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private GetRSGroupInfoOfServerResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = rSGroupInfo_.toBuilder(); + } + rSGroupInfo_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(rSGroupInfo_); + rSGroupInfo_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_GetRSGroupInfoOfServerResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_GetRSGroupInfoOfServerResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetRSGroupInfoOfServerResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new GetRSGroupInfoOfServerResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // optional .hbase.pb.RSGroupInfo r_s_group_info = 1; + public static final int R_S_GROUP_INFO_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo rSGroupInfo_; + /** + * optional .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + public boolean hasRSGroupInfo() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo getRSGroupInfo() { + return rSGroupInfo_; + } + /** + * optional .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfoOrBuilder getRSGroupInfoOrBuilder() { + return rSGroupInfo_; + } + + private void initFields() { + rSGroupInfo_ = org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (hasRSGroupInfo()) { + if (!getRSGroupInfo().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, rSGroupInfo_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, rSGroupInfo_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse other = (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse) obj; + + boolean result = true; + result = result && (hasRSGroupInfo() == other.hasRSGroupInfo()); + if (hasRSGroupInfo()) { + result = result && getRSGroupInfo() + .equals(other.getRSGroupInfo()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasRSGroupInfo()) { + hash = (37 * hash) + R_S_GROUP_INFO_FIELD_NUMBER; + hash = (53 * hash) + getRSGroupInfo().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.GetRSGroupInfoOfServerResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_GetRSGroupInfoOfServerResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_GetRSGroupInfoOfServerResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getRSGroupInfoFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (rSGroupInfoBuilder_ == null) { + rSGroupInfo_ = org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo.getDefaultInstance(); + } else { + rSGroupInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_GetRSGroupInfoOfServerResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse build() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse result = new org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (rSGroupInfoBuilder_ == null) { + result.rSGroupInfo_ = rSGroupInfo_; + } else { + result.rSGroupInfo_ = rSGroupInfoBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse.getDefaultInstance()) return this; + if (other.hasRSGroupInfo()) { + mergeRSGroupInfo(other.getRSGroupInfo()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (hasRSGroupInfo()) { + if (!getRSGroupInfo().isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // optional .hbase.pb.RSGroupInfo r_s_group_info = 1; + private org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo rSGroupInfo_ = org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfoOrBuilder> rSGroupInfoBuilder_; + /** + * optional .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + public boolean hasRSGroupInfo() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * optional .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo getRSGroupInfo() { + if (rSGroupInfoBuilder_ == null) { + return rSGroupInfo_; + } else { + return rSGroupInfoBuilder_.getMessage(); + } + } + /** + * optional .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + public Builder setRSGroupInfo(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo value) { + if (rSGroupInfoBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + rSGroupInfo_ = value; + onChanged(); + } else { + rSGroupInfoBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + public Builder setRSGroupInfo( + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo.Builder builderForValue) { + if (rSGroupInfoBuilder_ == null) { + rSGroupInfo_ = builderForValue.build(); + onChanged(); + } else { + rSGroupInfoBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + public Builder mergeRSGroupInfo(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo value) { + if (rSGroupInfoBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + rSGroupInfo_ != org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo.getDefaultInstance()) { + rSGroupInfo_ = + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo.newBuilder(rSGroupInfo_).mergeFrom(value).buildPartial(); + } else { + rSGroupInfo_ = value; + } + onChanged(); + } else { + rSGroupInfoBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * optional .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + public Builder clearRSGroupInfo() { + if (rSGroupInfoBuilder_ == null) { + rSGroupInfo_ = org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo.getDefaultInstance(); + onChanged(); + } else { + rSGroupInfoBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * optional .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo.Builder getRSGroupInfoBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getRSGroupInfoFieldBuilder().getBuilder(); + } + /** + * optional .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfoOrBuilder getRSGroupInfoOrBuilder() { + if (rSGroupInfoBuilder_ != null) { + return rSGroupInfoBuilder_.getMessageOrBuilder(); + } else { + return rSGroupInfo_; + } + } + /** + * optional .hbase.pb.RSGroupInfo r_s_group_info = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfoOrBuilder> + getRSGroupInfoFieldBuilder() { + if (rSGroupInfoBuilder_ == null) { + rSGroupInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfoOrBuilder>( + rSGroupInfo_, + getParentForChildren(), + isClean()); + rSGroupInfo_ = null; + } + return rSGroupInfoBuilder_; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.GetRSGroupInfoOfServerResponse) + } + + static { + defaultInstance = new GetRSGroupInfoOfServerResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.GetRSGroupInfoOfServerResponse) + } + + public interface MoveServersAndTablesRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string target_group = 1; + /** + * required string target_group = 1; + */ + boolean hasTargetGroup(); + /** + * required string target_group = 1; + */ + java.lang.String getTargetGroup(); + /** + * required string target_group = 1; + */ + com.google.protobuf.ByteString + getTargetGroupBytes(); + + // repeated .hbase.pb.ServerName servers = 2; + /** + * repeated .hbase.pb.ServerName servers = 2; + */ + java.util.List + getServersList(); + /** + * repeated .hbase.pb.ServerName servers = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServers(int index); + /** + * repeated .hbase.pb.ServerName servers = 2; + */ + int getServersCount(); + /** + * repeated .hbase.pb.ServerName servers = 2; + */ + java.util.List + getServersOrBuilderList(); + /** + * repeated .hbase.pb.ServerName servers = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServersOrBuilder( + int index); + + // repeated .hbase.pb.TableName table_name = 3; + /** + * repeated .hbase.pb.TableName table_name = 3; + */ + java.util.List + getTableNameList(); + /** + * repeated .hbase.pb.TableName table_name = 3; + */ + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTableName(int index); + /** + * repeated .hbase.pb.TableName table_name = 3; + */ + int getTableNameCount(); + /** + * repeated .hbase.pb.TableName table_name = 3; + */ + java.util.List + getTableNameOrBuilderList(); + /** + * repeated .hbase.pb.TableName table_name = 3; + */ + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableNameOrBuilder( + int index); + } + /** + * Protobuf type {@code hbase.pb.MoveServersAndTablesRequest} + */ + public static final class MoveServersAndTablesRequest extends + com.google.protobuf.GeneratedMessage + implements MoveServersAndTablesRequestOrBuilder { + // Use MoveServersAndTablesRequest.newBuilder() to construct. + private MoveServersAndTablesRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private MoveServersAndTablesRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final MoveServersAndTablesRequest defaultInstance; + public static MoveServersAndTablesRequest getDefaultInstance() { + return defaultInstance; + } + + public MoveServersAndTablesRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private MoveServersAndTablesRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + targetGroup_ = input.readBytes(); + break; + } + case 18: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + servers_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000002; + } + servers_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry)); + break; + } + case 26: { + if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + tableName_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000004; + } + tableName_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + servers_ = java.util.Collections.unmodifiableList(servers_); + } + if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + tableName_ = java.util.Collections.unmodifiableList(tableName_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_MoveServersAndTablesRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_MoveServersAndTablesRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public MoveServersAndTablesRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new MoveServersAndTablesRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string target_group = 1; + public static final int TARGET_GROUP_FIELD_NUMBER = 1; + private java.lang.Object targetGroup_; + /** + * required string target_group = 1; + */ + public boolean hasTargetGroup() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string target_group = 1; + */ + public java.lang.String getTargetGroup() { + java.lang.Object ref = targetGroup_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + targetGroup_ = s; + } + return s; + } + } + /** + * required string target_group = 1; + */ + public com.google.protobuf.ByteString + getTargetGroupBytes() { + java.lang.Object ref = targetGroup_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + targetGroup_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // repeated .hbase.pb.ServerName servers = 2; + public static final int SERVERS_FIELD_NUMBER = 2; + private java.util.List servers_; + /** + * repeated .hbase.pb.ServerName servers = 2; + */ + public java.util.List getServersList() { + return servers_; + } + /** + * repeated .hbase.pb.ServerName servers = 2; + */ + public java.util.List + getServersOrBuilderList() { + return servers_; + } + /** + * repeated .hbase.pb.ServerName servers = 2; + */ + public int getServersCount() { + return servers_.size(); + } + /** + * repeated .hbase.pb.ServerName servers = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServers(int index) { + return servers_.get(index); + } + /** + * repeated .hbase.pb.ServerName servers = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServersOrBuilder( + int index) { + return servers_.get(index); + } + + // repeated .hbase.pb.TableName table_name = 3; + public static final int TABLE_NAME_FIELD_NUMBER = 3; + private java.util.List tableName_; + /** + * repeated .hbase.pb.TableName table_name = 3; + */ + public java.util.List getTableNameList() { + return tableName_; + } + /** + * repeated .hbase.pb.TableName table_name = 3; + */ + public java.util.List + getTableNameOrBuilderList() { + return tableName_; + } + /** + * repeated .hbase.pb.TableName table_name = 3; + */ + public int getTableNameCount() { + return tableName_.size(); + } + /** + * repeated .hbase.pb.TableName table_name = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTableName(int index) { + return tableName_.get(index); + } + /** + * repeated .hbase.pb.TableName table_name = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableNameOrBuilder( + int index) { + return tableName_.get(index); + } + + private void initFields() { + targetGroup_ = ""; + servers_ = java.util.Collections.emptyList(); + tableName_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasTargetGroup()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getServersCount(); i++) { + if (!getServers(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + for (int i = 0; i < getTableNameCount(); i++) { + if (!getTableName(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getTargetGroupBytes()); + } + for (int i = 0; i < servers_.size(); i++) { + output.writeMessage(2, servers_.get(i)); + } + for (int i = 0; i < tableName_.size(); i++) { + output.writeMessage(3, tableName_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getTargetGroupBytes()); + } + for (int i = 0; i < servers_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, servers_.get(i)); + } + for (int i = 0; i < tableName_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, tableName_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest other = (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest) obj; + + boolean result = true; + result = result && (hasTargetGroup() == other.hasTargetGroup()); + if (hasTargetGroup()) { + result = result && getTargetGroup() + .equals(other.getTargetGroup()); + } + result = result && getServersList() + .equals(other.getServersList()); + result = result && getTableNameList() + .equals(other.getTableNameList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasTargetGroup()) { + hash = (37 * hash) + TARGET_GROUP_FIELD_NUMBER; + hash = (53 * hash) + getTargetGroup().hashCode(); + } + if (getServersCount() > 0) { + hash = (37 * hash) + SERVERS_FIELD_NUMBER; + hash = (53 * hash) + getServersList().hashCode(); + } + if (getTableNameCount() > 0) { + hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER; + hash = (53 * hash) + getTableNameList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.MoveServersAndTablesRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_MoveServersAndTablesRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_MoveServersAndTablesRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getServersFieldBuilder(); + getTableNameFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + targetGroup_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + if (serversBuilder_ == null) { + servers_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + } else { + serversBuilder_.clear(); + } + if (tableNameBuilder_ == null) { + tableName_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + } else { + tableNameBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_MoveServersAndTablesRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest build() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest result = new org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.targetGroup_ = targetGroup_; + if (serversBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { + servers_ = java.util.Collections.unmodifiableList(servers_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.servers_ = servers_; + } else { + result.servers_ = serversBuilder_.build(); + } + if (tableNameBuilder_ == null) { + if (((bitField0_ & 0x00000004) == 0x00000004)) { + tableName_ = java.util.Collections.unmodifiableList(tableName_); + bitField0_ = (bitField0_ & ~0x00000004); + } + result.tableName_ = tableName_; + } else { + result.tableName_ = tableNameBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest.getDefaultInstance()) return this; + if (other.hasTargetGroup()) { + bitField0_ |= 0x00000001; + targetGroup_ = other.targetGroup_; + onChanged(); + } + if (serversBuilder_ == null) { + if (!other.servers_.isEmpty()) { + if (servers_.isEmpty()) { + servers_ = other.servers_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureServersIsMutable(); + servers_.addAll(other.servers_); + } + onChanged(); + } + } else { + if (!other.servers_.isEmpty()) { + if (serversBuilder_.isEmpty()) { + serversBuilder_.dispose(); + serversBuilder_ = null; + servers_ = other.servers_; + bitField0_ = (bitField0_ & ~0x00000002); + serversBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getServersFieldBuilder() : null; + } else { + serversBuilder_.addAllMessages(other.servers_); + } + } + } + if (tableNameBuilder_ == null) { + if (!other.tableName_.isEmpty()) { + if (tableName_.isEmpty()) { + tableName_ = other.tableName_; + bitField0_ = (bitField0_ & ~0x00000004); + } else { + ensureTableNameIsMutable(); + tableName_.addAll(other.tableName_); + } + onChanged(); + } + } else { + if (!other.tableName_.isEmpty()) { + if (tableNameBuilder_.isEmpty()) { + tableNameBuilder_.dispose(); + tableNameBuilder_ = null; + tableName_ = other.tableName_; + bitField0_ = (bitField0_ & ~0x00000004); + tableNameBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getTableNameFieldBuilder() : null; + } else { + tableNameBuilder_.addAllMessages(other.tableName_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasTargetGroup()) { + + return false; + } + for (int i = 0; i < getServersCount(); i++) { + if (!getServers(i).isInitialized()) { + + return false; + } + } + for (int i = 0; i < getTableNameCount(); i++) { + if (!getTableName(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string target_group = 1; + private java.lang.Object targetGroup_ = ""; + /** + * required string target_group = 1; + */ + public boolean hasTargetGroup() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string target_group = 1; + */ + public java.lang.String getTargetGroup() { + java.lang.Object ref = targetGroup_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + targetGroup_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string target_group = 1; + */ + public com.google.protobuf.ByteString + getTargetGroupBytes() { + java.lang.Object ref = targetGroup_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + targetGroup_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string target_group = 1; + */ + public Builder setTargetGroup( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + targetGroup_ = value; + onChanged(); + return this; + } + /** + * required string target_group = 1; + */ + public Builder clearTargetGroup() { + bitField0_ = (bitField0_ & ~0x00000001); + targetGroup_ = getDefaultInstance().getTargetGroup(); + onChanged(); + return this; + } + /** + * required string target_group = 1; + */ + public Builder setTargetGroupBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + targetGroup_ = value; + onChanged(); + return this; + } + + // repeated .hbase.pb.ServerName servers = 2; + private java.util.List servers_ = + java.util.Collections.emptyList(); + private void ensureServersIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + servers_ = new java.util.ArrayList(servers_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> serversBuilder_; + + /** + * repeated .hbase.pb.ServerName servers = 2; + */ + public java.util.List getServersList() { + if (serversBuilder_ == null) { + return java.util.Collections.unmodifiableList(servers_); + } else { + return serversBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.ServerName servers = 2; + */ + public int getServersCount() { + if (serversBuilder_ == null) { + return servers_.size(); + } else { + return serversBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.ServerName servers = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServers(int index) { + if (serversBuilder_ == null) { + return servers_.get(index); + } else { + return serversBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.ServerName servers = 2; + */ + public Builder setServers( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { + if (serversBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureServersIsMutable(); + servers_.set(index, value); + onChanged(); + } else { + serversBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.ServerName servers = 2; + */ + public Builder setServers( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { + if (serversBuilder_ == null) { + ensureServersIsMutable(); + servers_.set(index, builderForValue.build()); + onChanged(); + } else { + serversBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.ServerName servers = 2; + */ + public Builder addServers(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { + if (serversBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureServersIsMutable(); + servers_.add(value); + onChanged(); + } else { + serversBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.ServerName servers = 2; + */ + public Builder addServers( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { + if (serversBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureServersIsMutable(); + servers_.add(index, value); + onChanged(); + } else { + serversBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.ServerName servers = 2; + */ + public Builder addServers( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { + if (serversBuilder_ == null) { + ensureServersIsMutable(); + servers_.add(builderForValue.build()); + onChanged(); + } else { + serversBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.ServerName servers = 2; + */ + public Builder addServers( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { + if (serversBuilder_ == null) { + ensureServersIsMutable(); + servers_.add(index, builderForValue.build()); + onChanged(); + } else { + serversBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.ServerName servers = 2; + */ + public Builder addAllServers( + java.lang.Iterable values) { + if (serversBuilder_ == null) { + ensureServersIsMutable(); + super.addAll(values, servers_); + onChanged(); + } else { + serversBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.ServerName servers = 2; + */ + public Builder clearServers() { + if (serversBuilder_ == null) { + servers_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + serversBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.ServerName servers = 2; + */ + public Builder removeServers(int index) { + if (serversBuilder_ == null) { + ensureServersIsMutable(); + servers_.remove(index); + onChanged(); + } else { + serversBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.ServerName servers = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getServersBuilder( + int index) { + return getServersFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.ServerName servers = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServersOrBuilder( + int index) { + if (serversBuilder_ == null) { + return servers_.get(index); } else { + return serversBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.ServerName servers = 2; + */ + public java.util.List + getServersOrBuilderList() { + if (serversBuilder_ != null) { + return serversBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(servers_); + } + } + /** + * repeated .hbase.pb.ServerName servers = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder addServersBuilder() { + return getServersFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()); + } + /** + * repeated .hbase.pb.ServerName servers = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder addServersBuilder( + int index) { + return getServersFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()); + } + /** + * repeated .hbase.pb.ServerName servers = 2; + */ + public java.util.List + getServersBuilderList() { + return getServersFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> + getServersFieldBuilder() { + if (serversBuilder_ == null) { + serversBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>( + servers_, + ((bitField0_ & 0x00000002) == 0x00000002), + getParentForChildren(), + isClean()); + servers_ = null; + } + return serversBuilder_; + } + + // repeated .hbase.pb.TableName table_name = 3; + private java.util.List tableName_ = + java.util.Collections.emptyList(); + private void ensureTableNameIsMutable() { + if (!((bitField0_ & 0x00000004) == 0x00000004)) { + tableName_ = new java.util.ArrayList(tableName_); + bitField0_ |= 0x00000004; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder> tableNameBuilder_; + + /** + * repeated .hbase.pb.TableName table_name = 3; + */ + public java.util.List getTableNameList() { + if (tableNameBuilder_ == null) { + return java.util.Collections.unmodifiableList(tableName_); + } else { + return tableNameBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.TableName table_name = 3; + */ + public int getTableNameCount() { + if (tableNameBuilder_ == null) { + return tableName_.size(); + } else { + return tableNameBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.TableName table_name = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTableName(int index) { + if (tableNameBuilder_ == null) { + return tableName_.get(index); + } else { + return tableNameBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.TableName table_name = 3; + */ + public Builder setTableName( + int index, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableNameIsMutable(); + tableName_.set(index, value); + onChanged(); + } else { + tableNameBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_name = 3; + */ + public Builder setTableName( + int index, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + ensureTableNameIsMutable(); + tableName_.set(index, builderForValue.build()); + onChanged(); + } else { + tableNameBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_name = 3; + */ + public Builder addTableName(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableNameIsMutable(); + tableName_.add(value); + onChanged(); + } else { + tableNameBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_name = 3; + */ + public Builder addTableName( + int index, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTableNameIsMutable(); + tableName_.add(index, value); + onChanged(); + } else { + tableNameBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_name = 3; + */ + public Builder addTableName( + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + ensureTableNameIsMutable(); + tableName_.add(builderForValue.build()); + onChanged(); + } else { + tableNameBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_name = 3; + */ + public Builder addTableName( + int index, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + ensureTableNameIsMutable(); + tableName_.add(index, builderForValue.build()); + onChanged(); + } else { + tableNameBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_name = 3; + */ + public Builder addAllTableName( + java.lang.Iterable values) { + if (tableNameBuilder_ == null) { + ensureTableNameIsMutable(); + super.addAll(values, tableName_); + onChanged(); + } else { + tableNameBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_name = 3; + */ + public Builder clearTableName() { + if (tableNameBuilder_ == null) { + tableName_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + } else { + tableNameBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_name = 3; + */ + public Builder removeTableName(int index) { + if (tableNameBuilder_ == null) { + ensureTableNameIsMutable(); + tableName_.remove(index); + onChanged(); + } else { + tableNameBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.TableName table_name = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder getTableNameBuilder( + int index) { + return getTableNameFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.TableName table_name = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTableNameOrBuilder( + int index) { + if (tableNameBuilder_ == null) { + return tableName_.get(index); } else { + return tableNameBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.TableName table_name = 3; + */ + public java.util.List + getTableNameOrBuilderList() { + if (tableNameBuilder_ != null) { + return tableNameBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(tableName_); + } + } + /** + * repeated .hbase.pb.TableName table_name = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder addTableNameBuilder() { + return getTableNameFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance()); + } + /** + * repeated .hbase.pb.TableName table_name = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder addTableNameBuilder( + int index) { + return getTableNameFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance()); + } + /** + * repeated .hbase.pb.TableName table_name = 3; + */ + public java.util.List + getTableNameBuilderList() { + return getTableNameFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder> + getTableNameFieldBuilder() { + if (tableNameBuilder_ == null) { + tableNameBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder>( + tableName_, + ((bitField0_ & 0x00000004) == 0x00000004), + getParentForChildren(), + isClean()); + tableName_ = null; + } + return tableNameBuilder_; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.MoveServersAndTablesRequest) + } + + static { + defaultInstance = new MoveServersAndTablesRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.MoveServersAndTablesRequest) + } + + public interface MoveServersAndTablesResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + /** + * Protobuf type {@code hbase.pb.MoveServersAndTablesResponse} + */ + public static final class MoveServersAndTablesResponse extends + com.google.protobuf.GeneratedMessage + implements MoveServersAndTablesResponseOrBuilder { + // Use MoveServersAndTablesResponse.newBuilder() to construct. + private MoveServersAndTablesResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private MoveServersAndTablesResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final MoveServersAndTablesResponse defaultInstance; + public static MoveServersAndTablesResponse getDefaultInstance() { + return defaultInstance; + } + + public MoveServersAndTablesResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private MoveServersAndTablesResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_MoveServersAndTablesResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_MoveServersAndTablesResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public MoveServersAndTablesResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new MoveServersAndTablesResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse other = (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.MoveServersAndTablesResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_MoveServersAndTablesResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_MoveServersAndTablesResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_MoveServersAndTablesResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse build() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse result = new org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.MoveServersAndTablesResponse) + } + + static { + defaultInstance = new MoveServersAndTablesResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.MoveServersAndTablesResponse) + } + + /** + * Protobuf service {@code hbase.pb.RSGroupAdminService} + */ + public static abstract class RSGroupAdminService + implements com.google.protobuf.Service { + protected RSGroupAdminService() {} + + public interface Interface { + /** + * rpc GetRSGroupInfo(.hbase.pb.GetRSGroupInfoRequest) returns (.hbase.pb.GetRSGroupInfoResponse); + */ + public abstract void getRSGroupInfo( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc GetRSGroupInfoOfTable(.hbase.pb.GetRSGroupInfoOfTableRequest) returns (.hbase.pb.GetRSGroupInfoOfTableResponse); + */ + public abstract void getRSGroupInfoOfTable( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc GetRSGroupInfoOfServer(.hbase.pb.GetRSGroupInfoOfServerRequest) returns (.hbase.pb.GetRSGroupInfoOfServerResponse); + */ + public abstract void getRSGroupInfoOfServer( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc MoveServers(.hbase.pb.MoveServersRequest) returns (.hbase.pb.MoveServersResponse); + */ + public abstract void moveServers( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc MoveTables(.hbase.pb.MoveTablesRequest) returns (.hbase.pb.MoveTablesResponse); + */ + public abstract void moveTables( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc AddRSGroup(.hbase.pb.AddRSGroupRequest) returns (.hbase.pb.AddRSGroupResponse); + */ + public abstract void addRSGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc RemoveRSGroup(.hbase.pb.RemoveRSGroupRequest) returns (.hbase.pb.RemoveRSGroupResponse); + */ + public abstract void removeRSGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc BalanceRSGroup(.hbase.pb.BalanceRSGroupRequest) returns (.hbase.pb.BalanceRSGroupResponse); + */ + public abstract void balanceRSGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc ListRSGroupInfos(.hbase.pb.ListRSGroupInfosRequest) returns (.hbase.pb.ListRSGroupInfosResponse); + */ + public abstract void listRSGroupInfos( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc MoveServersAndTables(.hbase.pb.MoveServersAndTablesRequest) returns (.hbase.pb.MoveServersAndTablesResponse); + */ + public abstract void moveServersAndTables( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest request, + com.google.protobuf.RpcCallback done); + + } + + public static com.google.protobuf.Service newReflectiveService( + final Interface impl) { + return new RSGroupAdminService() { + @java.lang.Override + public void getRSGroupInfo( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest request, + com.google.protobuf.RpcCallback done) { + impl.getRSGroupInfo(controller, request, done); + } + + @java.lang.Override + public void getRSGroupInfoOfTable( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequest request, + com.google.protobuf.RpcCallback done) { + impl.getRSGroupInfoOfTable(controller, request, done); + } + + @java.lang.Override + public void getRSGroupInfoOfServer( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest request, + com.google.protobuf.RpcCallback done) { + impl.getRSGroupInfoOfServer(controller, request, done); + } + + @java.lang.Override + public void moveServers( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersRequest request, + com.google.protobuf.RpcCallback done) { + impl.moveServers(controller, request, done); + } + + @java.lang.Override + public void moveTables( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesRequest request, + com.google.protobuf.RpcCallback done) { + impl.moveTables(controller, request, done); + } + + @java.lang.Override + public void addRSGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest request, + com.google.protobuf.RpcCallback done) { + impl.addRSGroup(controller, request, done); + } + + @java.lang.Override + public void removeRSGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest request, + com.google.protobuf.RpcCallback done) { + impl.removeRSGroup(controller, request, done); + } + + @java.lang.Override + public void balanceRSGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest request, + com.google.protobuf.RpcCallback done) { + impl.balanceRSGroup(controller, request, done); + } + + @java.lang.Override + public void listRSGroupInfos( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest request, + com.google.protobuf.RpcCallback done) { + impl.listRSGroupInfos(controller, request, done); + } + + @java.lang.Override + public void moveServersAndTables( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest request, + com.google.protobuf.RpcCallback done) { + impl.moveServersAndTables(controller, request, done); + } + + }; + } + + public static com.google.protobuf.BlockingService + newReflectiveBlockingService(final BlockingInterface impl) { + return new com.google.protobuf.BlockingService() { + public final com.google.protobuf.Descriptors.ServiceDescriptor + getDescriptorForType() { + return getDescriptor(); + } + + public final com.google.protobuf.Message callBlockingMethod( + com.google.protobuf.Descriptors.MethodDescriptor method, + com.google.protobuf.RpcController controller, + com.google.protobuf.Message request) + throws com.google.protobuf.ServiceException { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.callBlockingMethod() given method descriptor for " + + "wrong service type."); + } + switch(method.getIndex()) { + case 0: + return impl.getRSGroupInfo(controller, (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest)request); + case 1: + return impl.getRSGroupInfoOfTable(controller, (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequest)request); + case 2: + return impl.getRSGroupInfoOfServer(controller, (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest)request); + case 3: + return impl.moveServers(controller, (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersRequest)request); + case 4: + return impl.moveTables(controller, (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesRequest)request); + case 5: + return impl.addRSGroup(controller, (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest)request); + case 6: + return impl.removeRSGroup(controller, (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest)request); + case 7: + return impl.balanceRSGroup(controller, (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest)request); + case 8: + return impl.listRSGroupInfos(controller, (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest)request); + case 9: + return impl.moveServersAndTables(controller, (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest)request); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getRequestPrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getRequestPrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest.getDefaultInstance(); + case 1: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequest.getDefaultInstance(); + case 2: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest.getDefaultInstance(); + case 3: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersRequest.getDefaultInstance(); + case 4: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesRequest.getDefaultInstance(); + case 5: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest.getDefaultInstance(); + case 6: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest.getDefaultInstance(); + case 7: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest.getDefaultInstance(); + case 8: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest.getDefaultInstance(); + case 9: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getResponsePrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getResponsePrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse.getDefaultInstance(); + case 1: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse.getDefaultInstance(); + case 2: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse.getDefaultInstance(); + case 3: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersResponse.getDefaultInstance(); + case 4: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesResponse.getDefaultInstance(); + case 5: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupResponse.getDefaultInstance(); + case 6: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse.getDefaultInstance(); + case 7: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponse.getDefaultInstance(); + case 8: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse.getDefaultInstance(); + case 9: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + }; + } + + /** + * rpc GetRSGroupInfo(.hbase.pb.GetRSGroupInfoRequest) returns (.hbase.pb.GetRSGroupInfoResponse); + */ + public abstract void getRSGroupInfo( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc GetRSGroupInfoOfTable(.hbase.pb.GetRSGroupInfoOfTableRequest) returns (.hbase.pb.GetRSGroupInfoOfTableResponse); + */ + public abstract void getRSGroupInfoOfTable( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc GetRSGroupInfoOfServer(.hbase.pb.GetRSGroupInfoOfServerRequest) returns (.hbase.pb.GetRSGroupInfoOfServerResponse); + */ + public abstract void getRSGroupInfoOfServer( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc MoveServers(.hbase.pb.MoveServersRequest) returns (.hbase.pb.MoveServersResponse); + */ + public abstract void moveServers( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc MoveTables(.hbase.pb.MoveTablesRequest) returns (.hbase.pb.MoveTablesResponse); + */ + public abstract void moveTables( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc AddRSGroup(.hbase.pb.AddRSGroupRequest) returns (.hbase.pb.AddRSGroupResponse); + */ + public abstract void addRSGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc RemoveRSGroup(.hbase.pb.RemoveRSGroupRequest) returns (.hbase.pb.RemoveRSGroupResponse); + */ + public abstract void removeRSGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc BalanceRSGroup(.hbase.pb.BalanceRSGroupRequest) returns (.hbase.pb.BalanceRSGroupResponse); + */ + public abstract void balanceRSGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc ListRSGroupInfos(.hbase.pb.ListRSGroupInfosRequest) returns (.hbase.pb.ListRSGroupInfosResponse); + */ + public abstract void listRSGroupInfos( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest request, + com.google.protobuf.RpcCallback done); + + /** + * rpc MoveServersAndTables(.hbase.pb.MoveServersAndTablesRequest) returns (.hbase.pb.MoveServersAndTablesResponse); + */ + public abstract void moveServersAndTables( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest request, + com.google.protobuf.RpcCallback done); + + public static final + com.google.protobuf.Descriptors.ServiceDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.getDescriptor().getServices().get(0); + } + public final com.google.protobuf.Descriptors.ServiceDescriptor + getDescriptorForType() { + return getDescriptor(); + } + + public final void callMethod( + com.google.protobuf.Descriptors.MethodDescriptor method, + com.google.protobuf.RpcController controller, + com.google.protobuf.Message request, + com.google.protobuf.RpcCallback< + com.google.protobuf.Message> done) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.callMethod() given method descriptor for wrong " + + "service type."); + } + switch(method.getIndex()) { + case 0: + this.getRSGroupInfo(controller, (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 1: + this.getRSGroupInfoOfTable(controller, (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 2: + this.getRSGroupInfoOfServer(controller, (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 3: + this.moveServers(controller, (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 4: + this.moveTables(controller, (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 5: + this.addRSGroup(controller, (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 6: + this.removeRSGroup(controller, (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 7: + this.balanceRSGroup(controller, (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 8: + this.listRSGroupInfos(controller, (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 9: + this.moveServersAndTables(controller, (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getRequestPrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getRequestPrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest.getDefaultInstance(); + case 1: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequest.getDefaultInstance(); + case 2: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest.getDefaultInstance(); + case 3: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersRequest.getDefaultInstance(); + case 4: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesRequest.getDefaultInstance(); + case 5: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest.getDefaultInstance(); + case 6: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest.getDefaultInstance(); + case 7: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest.getDefaultInstance(); + case 8: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest.getDefaultInstance(); + case 9: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public final com.google.protobuf.Message + getResponsePrototype( + com.google.protobuf.Descriptors.MethodDescriptor method) { + if (method.getService() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "Service.getResponsePrototype() given method " + + "descriptor for wrong service type."); + } + switch(method.getIndex()) { + case 0: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse.getDefaultInstance(); + case 1: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse.getDefaultInstance(); + case 2: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse.getDefaultInstance(); + case 3: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersResponse.getDefaultInstance(); + case 4: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesResponse.getDefaultInstance(); + case 5: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupResponse.getDefaultInstance(); + case 6: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse.getDefaultInstance(); + case 7: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponse.getDefaultInstance(); + case 8: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse.getDefaultInstance(); + case 9: + return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse.getDefaultInstance(); + default: + throw new java.lang.AssertionError("Can't get here."); + } + } + + public static Stub newStub( + com.google.protobuf.RpcChannel channel) { + return new Stub(channel); + } + + public static final class Stub extends org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RSGroupAdminService implements Interface { + private Stub(com.google.protobuf.RpcChannel channel) { + this.channel = channel; + } + + private final com.google.protobuf.RpcChannel channel; + + public com.google.protobuf.RpcChannel getChannel() { + return channel; + } + + public void getRSGroupInfo( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(0), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse.class, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse.getDefaultInstance())); + } + + public void getRSGroupInfoOfTable( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(1), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse.class, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse.getDefaultInstance())); + } + + public void getRSGroupInfoOfServer( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(2), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse.class, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse.getDefaultInstance())); + } + + public void moveServers( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(3), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersResponse.class, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersResponse.getDefaultInstance())); + } + + public void moveTables( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(4), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesResponse.class, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesResponse.getDefaultInstance())); + } + + public void addRSGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(5), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupResponse.class, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupResponse.getDefaultInstance())); + } + + public void removeRSGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(6), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse.class, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse.getDefaultInstance())); + } + + public void balanceRSGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(7), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponse.class, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponse.getDefaultInstance())); + } + + public void listRSGroupInfos( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(8), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse.class, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse.getDefaultInstance())); + } + + public void moveServersAndTables( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(9), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse.class, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse.getDefaultInstance())); + } + } + + public static BlockingInterface newBlockingStub( + com.google.protobuf.BlockingRpcChannel channel) { + return new BlockingStub(channel); + } + + public interface BlockingInterface { + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse getRSGroupInfo( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse getRSGroupInfoOfTable( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse getRSGroupInfoOfServer( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersResponse moveServers( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesResponse moveTables( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupResponse addRSGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse removeRSGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponse balanceRSGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse listRSGroupInfos( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest request) + throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse moveServersAndTables( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest request) + throws com.google.protobuf.ServiceException; + } + + private static final class BlockingStub implements BlockingInterface { + private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) { + this.channel = channel; + } + + private final com.google.protobuf.BlockingRpcChannel channel; + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse getRSGroupInfo( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(0), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse.getDefaultInstance()); + } + + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse getRSGroupInfoOfTable( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(1), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse.getDefaultInstance()); + } + + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse getRSGroupInfoOfServer( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(2), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse.getDefaultInstance()); + } + + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersResponse moveServers( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(3), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersResponse.getDefaultInstance()); + } + + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesResponse moveTables( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(4), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesResponse.getDefaultInstance()); + } + + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupResponse addRSGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(5), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupResponse.getDefaultInstance()); + } + + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse removeRSGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(6), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse.getDefaultInstance()); + } + + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponse balanceRSGroup( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(7), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponse.getDefaultInstance()); + } + + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse listRSGroupInfos( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(8), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse.getDefaultInstance()); + } + + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse moveServersAndTables( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(9), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse.getDefaultInstance()); + } + + } + + // @@protoc_insertion_point(class_scope:hbase.pb.RSGroupAdminService) + } + + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_ListTablesOfRSGroupRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_ListTablesOfRSGroupRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_ListTablesOfRSGroupResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_ListTablesOfRSGroupResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_GetRSGroupInfoRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_GetRSGroupInfoRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_GetRSGroupInfoResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_GetRSGroupInfoResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_GetRSGroupInfoOfTableRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_GetRSGroupInfoOfTableRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_GetRSGroupInfoOfTableResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_GetRSGroupInfoOfTableResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_MoveServersRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_MoveServersRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_MoveServersResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_MoveServersResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_MoveTablesRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_MoveTablesRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_MoveTablesResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_MoveTablesResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_AddRSGroupRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_AddRSGroupRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_AddRSGroupResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_AddRSGroupResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_RemoveRSGroupRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_RemoveRSGroupRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_RemoveRSGroupResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_RemoveRSGroupResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_BalanceRSGroupRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_BalanceRSGroupRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_BalanceRSGroupResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_BalanceRSGroupResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_ListRSGroupInfosRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_ListRSGroupInfosRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_ListRSGroupInfosResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_ListRSGroupInfosResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_GetRSGroupInfoOfServerRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_GetRSGroupInfoOfServerRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_GetRSGroupInfoOfServerResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_GetRSGroupInfoOfServerResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_MoveServersAndTablesRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_MoveServersAndTablesRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_MoveServersAndTablesResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_MoveServersAndTablesResponse_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n\022RSGroupAdmin.proto\022\010hbase.pb\032\013Table.pr" + + "oto\032\013HBase.proto\032\rRSGroup.proto\"4\n\032ListT" + + "ablesOfRSGroupRequest\022\026\n\016r_s_group_name\030" + + "\001 \002(\t\"F\n\033ListTablesOfRSGroupResponse\022\'\n\n" + + "table_name\030\001 \003(\0132\023.hbase.pb.TableName\"/\n" + + "\025GetRSGroupInfoRequest\022\026\n\016r_s_group_name" + + "\030\001 \002(\t\"G\n\026GetRSGroupInfoResponse\022-\n\016r_s_" + + "group_info\030\001 \001(\0132\025.hbase.pb.RSGroupInfo\"" + + "G\n\034GetRSGroupInfoOfTableRequest\022\'\n\ntable" + + "_name\030\001 \002(\0132\023.hbase.pb.TableName\"N\n\035GetR", + "SGroupInfoOfTableResponse\022-\n\016r_s_group_i" + + "nfo\030\001 \001(\0132\025.hbase.pb.RSGroupInfo\"Q\n\022Move" + + "ServersRequest\022\024\n\014target_group\030\001 \002(\t\022%\n\007" + + "servers\030\003 \003(\0132\024.hbase.pb.ServerName\"\025\n\023M" + + "oveServersResponse\"R\n\021MoveTablesRequest\022" + + "\024\n\014target_group\030\001 \002(\t\022\'\n\ntable_name\030\002 \003(" + + "\0132\023.hbase.pb.TableName\"\024\n\022MoveTablesResp" + + "onse\"+\n\021AddRSGroupRequest\022\026\n\016r_s_group_n" + + "ame\030\001 \002(\t\"\024\n\022AddRSGroupResponse\".\n\024Remov" + + "eRSGroupRequest\022\026\n\016r_s_group_name\030\001 \002(\t\"", + "\027\n\025RemoveRSGroupResponse\"/\n\025BalanceRSGro" + + "upRequest\022\026\n\016r_s_group_name\030\001 \002(\t\",\n\026Bal" + + "anceRSGroupResponse\022\022\n\nbalanceRan\030\001 \002(\010\"" + + "\031\n\027ListRSGroupInfosRequest\"I\n\030ListRSGrou" + + "pInfosResponse\022-\n\016r_s_group_info\030\001 \003(\0132\025" + + ".hbase.pb.RSGroupInfo\"E\n\035GetRSGroupInfoO" + + "fServerRequest\022$\n\006server\030\002 \002(\0132\024.hbase.p" + + "b.ServerName\"O\n\036GetRSGroupInfoOfServerRe" + + "sponse\022-\n\016r_s_group_info\030\001 \001(\0132\025.hbase.p" + + "b.RSGroupInfo\"\203\001\n\033MoveServersAndTablesRe", + "quest\022\024\n\014target_group\030\001 \002(\t\022%\n\007servers\030\002" + + " \003(\0132\024.hbase.pb.ServerName\022\'\n\ntable_name" + + "\030\003 \003(\0132\023.hbase.pb.TableName\"\036\n\034MoveServe" + + "rsAndTablesResponse2\210\007\n\023RSGroupAdminServ" + + "ice\022S\n\016GetRSGroupInfo\022\037.hbase.pb.GetRSGr" + + "oupInfoRequest\032 .hbase.pb.GetRSGroupInfo" + + "Response\022h\n\025GetRSGroupInfoOfTable\022&.hbas" + + "e.pb.GetRSGroupInfoOfTableRequest\032\'.hbas" + + "e.pb.GetRSGroupInfoOfTableResponse\022k\n\026Ge" + + "tRSGroupInfoOfServer\022\'.hbase.pb.GetRSGro", + "upInfoOfServerRequest\032(.hbase.pb.GetRSGr" + + "oupInfoOfServerResponse\022J\n\013MoveServers\022\034" + + ".hbase.pb.MoveServersRequest\032\035.hbase.pb." + + "MoveServersResponse\022G\n\nMoveTables\022\033.hbas" + + "e.pb.MoveTablesRequest\032\034.hbase.pb.MoveTa" + + "blesResponse\022G\n\nAddRSGroup\022\033.hbase.pb.Ad" + + "dRSGroupRequest\032\034.hbase.pb.AddRSGroupRes" + + "ponse\022P\n\rRemoveRSGroup\022\036.hbase.pb.Remove" + + "RSGroupRequest\032\037.hbase.pb.RemoveRSGroupR" + + "esponse\022S\n\016BalanceRSGroup\022\037.hbase.pb.Bal", + "anceRSGroupRequest\032 .hbase.pb.BalanceRSG" + + "roupResponse\022Y\n\020ListRSGroupInfos\022!.hbase" + + ".pb.ListRSGroupInfosRequest\032\".hbase.pb.L" + + "istRSGroupInfosResponse\022e\n\024MoveServersAn" + + "dTables\022%.hbase.pb.MoveServersAndTablesR" + + "equest\032&.hbase.pb.MoveServersAndTablesRe" + + "sponseBH\n*org.apache.hadoop.hbase.protob" + + "uf.generatedB\022RSGroupAdminProtosH\001\210\001\001\240\001\001" + }; + com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = + new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { + public com.google.protobuf.ExtensionRegistry assignDescriptors( + com.google.protobuf.Descriptors.FileDescriptor root) { + descriptor = root; + internal_static_hbase_pb_ListTablesOfRSGroupRequest_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_hbase_pb_ListTablesOfRSGroupRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_ListTablesOfRSGroupRequest_descriptor, + new java.lang.String[] { "RSGroupName", }); + internal_static_hbase_pb_ListTablesOfRSGroupResponse_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_hbase_pb_ListTablesOfRSGroupResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_ListTablesOfRSGroupResponse_descriptor, + new java.lang.String[] { "TableName", }); + internal_static_hbase_pb_GetRSGroupInfoRequest_descriptor = + getDescriptor().getMessageTypes().get(2); + internal_static_hbase_pb_GetRSGroupInfoRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_GetRSGroupInfoRequest_descriptor, + new java.lang.String[] { "RSGroupName", }); + internal_static_hbase_pb_GetRSGroupInfoResponse_descriptor = + getDescriptor().getMessageTypes().get(3); + internal_static_hbase_pb_GetRSGroupInfoResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_GetRSGroupInfoResponse_descriptor, + new java.lang.String[] { "RSGroupInfo", }); + internal_static_hbase_pb_GetRSGroupInfoOfTableRequest_descriptor = + getDescriptor().getMessageTypes().get(4); + internal_static_hbase_pb_GetRSGroupInfoOfTableRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_GetRSGroupInfoOfTableRequest_descriptor, + new java.lang.String[] { "TableName", }); + internal_static_hbase_pb_GetRSGroupInfoOfTableResponse_descriptor = + getDescriptor().getMessageTypes().get(5); + internal_static_hbase_pb_GetRSGroupInfoOfTableResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_GetRSGroupInfoOfTableResponse_descriptor, + new java.lang.String[] { "RSGroupInfo", }); + internal_static_hbase_pb_MoveServersRequest_descriptor = + getDescriptor().getMessageTypes().get(6); + internal_static_hbase_pb_MoveServersRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_MoveServersRequest_descriptor, + new java.lang.String[] { "TargetGroup", "Servers", }); + internal_static_hbase_pb_MoveServersResponse_descriptor = + getDescriptor().getMessageTypes().get(7); + internal_static_hbase_pb_MoveServersResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_MoveServersResponse_descriptor, + new java.lang.String[] { }); + internal_static_hbase_pb_MoveTablesRequest_descriptor = + getDescriptor().getMessageTypes().get(8); + internal_static_hbase_pb_MoveTablesRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_MoveTablesRequest_descriptor, + new java.lang.String[] { "TargetGroup", "TableName", }); + internal_static_hbase_pb_MoveTablesResponse_descriptor = + getDescriptor().getMessageTypes().get(9); + internal_static_hbase_pb_MoveTablesResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_MoveTablesResponse_descriptor, + new java.lang.String[] { }); + internal_static_hbase_pb_AddRSGroupRequest_descriptor = + getDescriptor().getMessageTypes().get(10); + internal_static_hbase_pb_AddRSGroupRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_AddRSGroupRequest_descriptor, + new java.lang.String[] { "RSGroupName", }); + internal_static_hbase_pb_AddRSGroupResponse_descriptor = + getDescriptor().getMessageTypes().get(11); + internal_static_hbase_pb_AddRSGroupResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_AddRSGroupResponse_descriptor, + new java.lang.String[] { }); + internal_static_hbase_pb_RemoveRSGroupRequest_descriptor = + getDescriptor().getMessageTypes().get(12); + internal_static_hbase_pb_RemoveRSGroupRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_RemoveRSGroupRequest_descriptor, + new java.lang.String[] { "RSGroupName", }); + internal_static_hbase_pb_RemoveRSGroupResponse_descriptor = + getDescriptor().getMessageTypes().get(13); + internal_static_hbase_pb_RemoveRSGroupResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_RemoveRSGroupResponse_descriptor, + new java.lang.String[] { }); + internal_static_hbase_pb_BalanceRSGroupRequest_descriptor = + getDescriptor().getMessageTypes().get(14); + internal_static_hbase_pb_BalanceRSGroupRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_BalanceRSGroupRequest_descriptor, + new java.lang.String[] { "RSGroupName", }); + internal_static_hbase_pb_BalanceRSGroupResponse_descriptor = + getDescriptor().getMessageTypes().get(15); + internal_static_hbase_pb_BalanceRSGroupResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_BalanceRSGroupResponse_descriptor, + new java.lang.String[] { "BalanceRan", }); + internal_static_hbase_pb_ListRSGroupInfosRequest_descriptor = + getDescriptor().getMessageTypes().get(16); + internal_static_hbase_pb_ListRSGroupInfosRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_ListRSGroupInfosRequest_descriptor, + new java.lang.String[] { }); + internal_static_hbase_pb_ListRSGroupInfosResponse_descriptor = + getDescriptor().getMessageTypes().get(17); + internal_static_hbase_pb_ListRSGroupInfosResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_ListRSGroupInfosResponse_descriptor, + new java.lang.String[] { "RSGroupInfo", }); + internal_static_hbase_pb_GetRSGroupInfoOfServerRequest_descriptor = + getDescriptor().getMessageTypes().get(18); + internal_static_hbase_pb_GetRSGroupInfoOfServerRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_GetRSGroupInfoOfServerRequest_descriptor, + new java.lang.String[] { "Server", }); + internal_static_hbase_pb_GetRSGroupInfoOfServerResponse_descriptor = + getDescriptor().getMessageTypes().get(19); + internal_static_hbase_pb_GetRSGroupInfoOfServerResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_GetRSGroupInfoOfServerResponse_descriptor, + new java.lang.String[] { "RSGroupInfo", }); + internal_static_hbase_pb_MoveServersAndTablesRequest_descriptor = + getDescriptor().getMessageTypes().get(20); + internal_static_hbase_pb_MoveServersAndTablesRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_MoveServersAndTablesRequest_descriptor, + new java.lang.String[] { "TargetGroup", "Servers", "TableName", }); + internal_static_hbase_pb_MoveServersAndTablesResponse_descriptor = + getDescriptor().getMessageTypes().get(21); + internal_static_hbase_pb_MoveServersAndTablesResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_MoveServersAndTablesResponse_descriptor, + new java.lang.String[] { }); + return null; + } + }; + com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + org.apache.hadoop.hbase.protobuf.generated.TableProtos.getDescriptor(), + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.getDescriptor(), + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.getDescriptor(), + }, assigner); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupProtos.java new file mode 100644 index 00000000000..5f5eb3b17e6 --- /dev/null +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupProtos.java @@ -0,0 +1,1332 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: RSGroup.proto + +package org.apache.hadoop.hbase.protobuf.generated; + +public final class RSGroupProtos { + private RSGroupProtos() {} + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistry registry) { + } + public interface RSGroupInfoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string name = 1; + /** + * required string name = 1; + */ + boolean hasName(); + /** + * required string name = 1; + */ + java.lang.String getName(); + /** + * required string name = 1; + */ + com.google.protobuf.ByteString + getNameBytes(); + + // repeated .hbase.pb.ServerName servers = 4; + /** + * repeated .hbase.pb.ServerName servers = 4; + */ + java.util.List + getServersList(); + /** + * repeated .hbase.pb.ServerName servers = 4; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServers(int index); + /** + * repeated .hbase.pb.ServerName servers = 4; + */ + int getServersCount(); + /** + * repeated .hbase.pb.ServerName servers = 4; + */ + java.util.List + getServersOrBuilderList(); + /** + * repeated .hbase.pb.ServerName servers = 4; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServersOrBuilder( + int index); + + // repeated .hbase.pb.TableName tables = 3; + /** + * repeated .hbase.pb.TableName tables = 3; + */ + java.util.List + getTablesList(); + /** + * repeated .hbase.pb.TableName tables = 3; + */ + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTables(int index); + /** + * repeated .hbase.pb.TableName tables = 3; + */ + int getTablesCount(); + /** + * repeated .hbase.pb.TableName tables = 3; + */ + java.util.List + getTablesOrBuilderList(); + /** + * repeated .hbase.pb.TableName tables = 3; + */ + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTablesOrBuilder( + int index); + } + /** + * Protobuf type {@code hbase.pb.RSGroupInfo} + */ + public static final class RSGroupInfo extends + com.google.protobuf.GeneratedMessage + implements RSGroupInfoOrBuilder { + // Use RSGroupInfo.newBuilder() to construct. + private RSGroupInfo(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private RSGroupInfo(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final RSGroupInfo defaultInstance; + public static RSGroupInfo getDefaultInstance() { + return defaultInstance; + } + + public RSGroupInfo getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private RSGroupInfo( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + name_ = input.readBytes(); + break; + } + case 26: { + if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + tables_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000004; + } + tables_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.PARSER, extensionRegistry)); + break; + } + case 34: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + servers_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000002; + } + servers_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + tables_ = java.util.Collections.unmodifiableList(tables_); + } + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + servers_ = java.util.Collections.unmodifiableList(servers_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_hbase_pb_RSGroupInfo_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_hbase_pb_RSGroupInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public RSGroupInfo parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new RSGroupInfo(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string name = 1; + public static final int NAME_FIELD_NUMBER = 1; + private java.lang.Object name_; + /** + * required string name = 1; + */ + public boolean hasName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string name = 1; + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + name_ = s; + } + return s; + } + } + /** + * required string name = 1; + */ + public com.google.protobuf.ByteString + getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // repeated .hbase.pb.ServerName servers = 4; + public static final int SERVERS_FIELD_NUMBER = 4; + private java.util.List servers_; + /** + * repeated .hbase.pb.ServerName servers = 4; + */ + public java.util.List getServersList() { + return servers_; + } + /** + * repeated .hbase.pb.ServerName servers = 4; + */ + public java.util.List + getServersOrBuilderList() { + return servers_; + } + /** + * repeated .hbase.pb.ServerName servers = 4; + */ + public int getServersCount() { + return servers_.size(); + } + /** + * repeated .hbase.pb.ServerName servers = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServers(int index) { + return servers_.get(index); + } + /** + * repeated .hbase.pb.ServerName servers = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServersOrBuilder( + int index) { + return servers_.get(index); + } + + // repeated .hbase.pb.TableName tables = 3; + public static final int TABLES_FIELD_NUMBER = 3; + private java.util.List tables_; + /** + * repeated .hbase.pb.TableName tables = 3; + */ + public java.util.List getTablesList() { + return tables_; + } + /** + * repeated .hbase.pb.TableName tables = 3; + */ + public java.util.List + getTablesOrBuilderList() { + return tables_; + } + /** + * repeated .hbase.pb.TableName tables = 3; + */ + public int getTablesCount() { + return tables_.size(); + } + /** + * repeated .hbase.pb.TableName tables = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTables(int index) { + return tables_.get(index); + } + /** + * repeated .hbase.pb.TableName tables = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTablesOrBuilder( + int index) { + return tables_.get(index); + } + + private void initFields() { + name_ = ""; + servers_ = java.util.Collections.emptyList(); + tables_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasName()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getServersCount(); i++) { + if (!getServers(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + for (int i = 0; i < getTablesCount(); i++) { + if (!getTables(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getNameBytes()); + } + for (int i = 0; i < tables_.size(); i++) { + output.writeMessage(3, tables_.get(i)); + } + for (int i = 0; i < servers_.size(); i++) { + output.writeMessage(4, servers_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getNameBytes()); + } + for (int i = 0; i < tables_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(3, tables_.get(i)); + } + for (int i = 0; i < servers_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(4, servers_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo other = (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo) obj; + + boolean result = true; + result = result && (hasName() == other.hasName()); + if (hasName()) { + result = result && getName() + .equals(other.getName()); + } + result = result && getServersList() + .equals(other.getServersList()); + result = result && getTablesList() + .equals(other.getTablesList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasName()) { + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + } + if (getServersCount() > 0) { + hash = (37 * hash) + SERVERS_FIELD_NUMBER; + hash = (53 * hash) + getServersList().hashCode(); + } + if (getTablesCount() > 0) { + hash = (37 * hash) + TABLES_FIELD_NUMBER; + hash = (53 * hash) + getTablesList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.RSGroupInfo} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfoOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_hbase_pb_RSGroupInfo_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_hbase_pb_RSGroupInfo_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getServersFieldBuilder(); + getTablesFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + name_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + if (serversBuilder_ == null) { + servers_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + } else { + serversBuilder_.clear(); + } + if (tablesBuilder_ == null) { + tables_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + } else { + tablesBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.internal_static_hbase_pb_RSGroupInfo_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo build() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo result = new org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.name_ = name_; + if (serversBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { + servers_ = java.util.Collections.unmodifiableList(servers_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.servers_ = servers_; + } else { + result.servers_ = serversBuilder_.build(); + } + if (tablesBuilder_ == null) { + if (((bitField0_ & 0x00000004) == 0x00000004)) { + tables_ = java.util.Collections.unmodifiableList(tables_); + bitField0_ = (bitField0_ & ~0x00000004); + } + result.tables_ = tables_; + } else { + result.tables_ = tablesBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo.getDefaultInstance()) return this; + if (other.hasName()) { + bitField0_ |= 0x00000001; + name_ = other.name_; + onChanged(); + } + if (serversBuilder_ == null) { + if (!other.servers_.isEmpty()) { + if (servers_.isEmpty()) { + servers_ = other.servers_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureServersIsMutable(); + servers_.addAll(other.servers_); + } + onChanged(); + } + } else { + if (!other.servers_.isEmpty()) { + if (serversBuilder_.isEmpty()) { + serversBuilder_.dispose(); + serversBuilder_ = null; + servers_ = other.servers_; + bitField0_ = (bitField0_ & ~0x00000002); + serversBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getServersFieldBuilder() : null; + } else { + serversBuilder_.addAllMessages(other.servers_); + } + } + } + if (tablesBuilder_ == null) { + if (!other.tables_.isEmpty()) { + if (tables_.isEmpty()) { + tables_ = other.tables_; + bitField0_ = (bitField0_ & ~0x00000004); + } else { + ensureTablesIsMutable(); + tables_.addAll(other.tables_); + } + onChanged(); + } + } else { + if (!other.tables_.isEmpty()) { + if (tablesBuilder_.isEmpty()) { + tablesBuilder_.dispose(); + tablesBuilder_ = null; + tables_ = other.tables_; + bitField0_ = (bitField0_ & ~0x00000004); + tablesBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getTablesFieldBuilder() : null; + } else { + tablesBuilder_.addAllMessages(other.tables_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasName()) { + + return false; + } + for (int i = 0; i < getServersCount(); i++) { + if (!getServers(i).isInitialized()) { + + return false; + } + } + for (int i = 0; i < getTablesCount(); i++) { + if (!getTables(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos.RSGroupInfo) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string name = 1; + private java.lang.Object name_ = ""; + /** + * required string name = 1; + */ + public boolean hasName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string name = 1; + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string name = 1; + */ + public com.google.protobuf.ByteString + getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string name = 1; + */ + public Builder setName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + name_ = value; + onChanged(); + return this; + } + /** + * required string name = 1; + */ + public Builder clearName() { + bitField0_ = (bitField0_ & ~0x00000001); + name_ = getDefaultInstance().getName(); + onChanged(); + return this; + } + /** + * required string name = 1; + */ + public Builder setNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + name_ = value; + onChanged(); + return this; + } + + // repeated .hbase.pb.ServerName servers = 4; + private java.util.List servers_ = + java.util.Collections.emptyList(); + private void ensureServersIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + servers_ = new java.util.ArrayList(servers_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> serversBuilder_; + + /** + * repeated .hbase.pb.ServerName servers = 4; + */ + public java.util.List getServersList() { + if (serversBuilder_ == null) { + return java.util.Collections.unmodifiableList(servers_); + } else { + return serversBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.ServerName servers = 4; + */ + public int getServersCount() { + if (serversBuilder_ == null) { + return servers_.size(); + } else { + return serversBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.ServerName servers = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServers(int index) { + if (serversBuilder_ == null) { + return servers_.get(index); + } else { + return serversBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.ServerName servers = 4; + */ + public Builder setServers( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { + if (serversBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureServersIsMutable(); + servers_.set(index, value); + onChanged(); + } else { + serversBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.ServerName servers = 4; + */ + public Builder setServers( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { + if (serversBuilder_ == null) { + ensureServersIsMutable(); + servers_.set(index, builderForValue.build()); + onChanged(); + } else { + serversBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.ServerName servers = 4; + */ + public Builder addServers(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { + if (serversBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureServersIsMutable(); + servers_.add(value); + onChanged(); + } else { + serversBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.ServerName servers = 4; + */ + public Builder addServers( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) { + if (serversBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureServersIsMutable(); + servers_.add(index, value); + onChanged(); + } else { + serversBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.ServerName servers = 4; + */ + public Builder addServers( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { + if (serversBuilder_ == null) { + ensureServersIsMutable(); + servers_.add(builderForValue.build()); + onChanged(); + } else { + serversBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.ServerName servers = 4; + */ + public Builder addServers( + int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) { + if (serversBuilder_ == null) { + ensureServersIsMutable(); + servers_.add(index, builderForValue.build()); + onChanged(); + } else { + serversBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.ServerName servers = 4; + */ + public Builder addAllServers( + java.lang.Iterable values) { + if (serversBuilder_ == null) { + ensureServersIsMutable(); + super.addAll(values, servers_); + onChanged(); + } else { + serversBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.ServerName servers = 4; + */ + public Builder clearServers() { + if (serversBuilder_ == null) { + servers_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + serversBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.ServerName servers = 4; + */ + public Builder removeServers(int index) { + if (serversBuilder_ == null) { + ensureServersIsMutable(); + servers_.remove(index); + onChanged(); + } else { + serversBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.ServerName servers = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getServersBuilder( + int index) { + return getServersFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.ServerName servers = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServersOrBuilder( + int index) { + if (serversBuilder_ == null) { + return servers_.get(index); } else { + return serversBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.ServerName servers = 4; + */ + public java.util.List + getServersOrBuilderList() { + if (serversBuilder_ != null) { + return serversBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(servers_); + } + } + /** + * repeated .hbase.pb.ServerName servers = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder addServersBuilder() { + return getServersFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()); + } + /** + * repeated .hbase.pb.ServerName servers = 4; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder addServersBuilder( + int index) { + return getServersFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()); + } + /** + * repeated .hbase.pb.ServerName servers = 4; + */ + public java.util.List + getServersBuilderList() { + return getServersFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> + getServersFieldBuilder() { + if (serversBuilder_ == null) { + serversBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>( + servers_, + ((bitField0_ & 0x00000002) == 0x00000002), + getParentForChildren(), + isClean()); + servers_ = null; + } + return serversBuilder_; + } + + // repeated .hbase.pb.TableName tables = 3; + private java.util.List tables_ = + java.util.Collections.emptyList(); + private void ensureTablesIsMutable() { + if (!((bitField0_ & 0x00000004) == 0x00000004)) { + tables_ = new java.util.ArrayList(tables_); + bitField0_ |= 0x00000004; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder> tablesBuilder_; + + /** + * repeated .hbase.pb.TableName tables = 3; + */ + public java.util.List getTablesList() { + if (tablesBuilder_ == null) { + return java.util.Collections.unmodifiableList(tables_); + } else { + return tablesBuilder_.getMessageList(); + } + } + /** + * repeated .hbase.pb.TableName tables = 3; + */ + public int getTablesCount() { + if (tablesBuilder_ == null) { + return tables_.size(); + } else { + return tablesBuilder_.getCount(); + } + } + /** + * repeated .hbase.pb.TableName tables = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName getTables(int index) { + if (tablesBuilder_ == null) { + return tables_.get(index); + } else { + return tablesBuilder_.getMessage(index); + } + } + /** + * repeated .hbase.pb.TableName tables = 3; + */ + public Builder setTables( + int index, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName value) { + if (tablesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTablesIsMutable(); + tables_.set(index, value); + onChanged(); + } else { + tablesBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 3; + */ + public Builder setTables( + int index, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder builderForValue) { + if (tablesBuilder_ == null) { + ensureTablesIsMutable(); + tables_.set(index, builderForValue.build()); + onChanged(); + } else { + tablesBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 3; + */ + public Builder addTables(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName value) { + if (tablesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTablesIsMutable(); + tables_.add(value); + onChanged(); + } else { + tablesBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 3; + */ + public Builder addTables( + int index, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName value) { + if (tablesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTablesIsMutable(); + tables_.add(index, value); + onChanged(); + } else { + tablesBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 3; + */ + public Builder addTables( + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder builderForValue) { + if (tablesBuilder_ == null) { + ensureTablesIsMutable(); + tables_.add(builderForValue.build()); + onChanged(); + } else { + tablesBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 3; + */ + public Builder addTables( + int index, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder builderForValue) { + if (tablesBuilder_ == null) { + ensureTablesIsMutable(); + tables_.add(index, builderForValue.build()); + onChanged(); + } else { + tablesBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 3; + */ + public Builder addAllTables( + java.lang.Iterable values) { + if (tablesBuilder_ == null) { + ensureTablesIsMutable(); + super.addAll(values, tables_); + onChanged(); + } else { + tablesBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 3; + */ + public Builder clearTables() { + if (tablesBuilder_ == null) { + tables_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + } else { + tablesBuilder_.clear(); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 3; + */ + public Builder removeTables(int index) { + if (tablesBuilder_ == null) { + ensureTablesIsMutable(); + tables_.remove(index); + onChanged(); + } else { + tablesBuilder_.remove(index); + } + return this; + } + /** + * repeated .hbase.pb.TableName tables = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder getTablesBuilder( + int index) { + return getTablesFieldBuilder().getBuilder(index); + } + /** + * repeated .hbase.pb.TableName tables = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder getTablesOrBuilder( + int index) { + if (tablesBuilder_ == null) { + return tables_.get(index); } else { + return tablesBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .hbase.pb.TableName tables = 3; + */ + public java.util.List + getTablesOrBuilderList() { + if (tablesBuilder_ != null) { + return tablesBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(tables_); + } + } + /** + * repeated .hbase.pb.TableName tables = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder addTablesBuilder() { + return getTablesFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance()); + } + /** + * repeated .hbase.pb.TableName tables = 3; + */ + public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder addTablesBuilder( + int index) { + return getTablesFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance()); + } + /** + * repeated .hbase.pb.TableName tables = 3; + */ + public java.util.List + getTablesBuilderList() { + return getTablesFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder> + getTablesFieldBuilder() { + if (tablesBuilder_ == null) { + tablesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder>( + tables_, + ((bitField0_ & 0x00000004) == 0x00000004), + getParentForChildren(), + isClean()); + tables_ = null; + } + return tablesBuilder_; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.RSGroupInfo) + } + + static { + defaultInstance = new RSGroupInfo(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.RSGroupInfo) + } + + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_RSGroupInfo_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_RSGroupInfo_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n\rRSGroup.proto\022\010hbase.pb\032\013Table.proto\032\013" + + "HBase.proto\"g\n\013RSGroupInfo\022\014\n\004name\030\001 \002(\t" + + "\022%\n\007servers\030\004 \003(\0132\024.hbase.pb.ServerName\022" + + "#\n\006tables\030\003 \003(\0132\023.hbase.pb.TableNameBC\n*" + + "org.apache.hadoop.hbase.protobuf.generat" + + "edB\rRSGroupProtosH\001\210\001\001\240\001\001" + }; + com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = + new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { + public com.google.protobuf.ExtensionRegistry assignDescriptors( + com.google.protobuf.Descriptors.FileDescriptor root) { + descriptor = root; + internal_static_hbase_pb_RSGroupInfo_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_hbase_pb_RSGroupInfo_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_RSGroupInfo_descriptor, + new java.lang.String[] { "Name", "Servers", "Tables", }); + return null; + } + }; + com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + org.apache.hadoop.hbase.protobuf.generated.TableProtos.getDescriptor(), + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.getDescriptor(), + }, assigner); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/hbase-protocol/src/main/protobuf/RSGroup.proto b/hbase-protocol/src/main/protobuf/RSGroup.proto new file mode 100644 index 00000000000..b88e82cfea9 --- /dev/null +++ b/hbase-protocol/src/main/protobuf/RSGroup.proto @@ -0,0 +1,35 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package hbase.pb; + +option java_package = "org.apache.hadoop.hbase.protobuf.generated"; +option java_outer_classname = "RSGroupProtos"; +option java_generic_services = true; +option java_generate_equals_and_hash = true; +option optimize_for = SPEED; + +import "Table.proto"; +import "HBase.proto"; + +message RSGroupInfo { + required string name = 1; + repeated ServerName servers = 4; + repeated TableName tables = 3; +} + diff --git a/hbase-protocol/src/main/protobuf/RSGroupAdmin.proto b/hbase-protocol/src/main/protobuf/RSGroupAdmin.proto new file mode 100644 index 00000000000..65da657e092 --- /dev/null +++ b/hbase-protocol/src/main/protobuf/RSGroupAdmin.proto @@ -0,0 +1,149 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package hbase.pb; + +option java_package = "org.apache.hadoop.hbase.protobuf.generated"; +option java_outer_classname = "RSGroupAdminProtos"; +option java_generic_services = true; +option java_generate_equals_and_hash = true; +option optimize_for = SPEED; + +import "Table.proto"; +import "HBase.proto"; +import "RSGroup.proto"; + +/** Group level protobufs */ + +message ListTablesOfRSGroupRequest { + required string r_s_group_name = 1; +} + +message ListTablesOfRSGroupResponse { + repeated TableName table_name = 1; +} + +message GetRSGroupInfoRequest { + required string r_s_group_name = 1; +} + +message GetRSGroupInfoResponse { + optional RSGroupInfo r_s_group_info = 1; +} + +message GetRSGroupInfoOfTableRequest { + required TableName table_name = 1; +} + +message GetRSGroupInfoOfTableResponse { + optional RSGroupInfo r_s_group_info = 1; +} + +message MoveServersRequest { + required string target_group = 1; + repeated ServerName servers = 3; +} + +message MoveServersResponse { +} + +message MoveTablesRequest { + required string target_group = 1; + repeated TableName table_name = 2; +} + +message MoveTablesResponse { +} + +message AddRSGroupRequest { + required string r_s_group_name = 1; +} + +message AddRSGroupResponse { +} + +message RemoveRSGroupRequest { + required string r_s_group_name = 1; +} + +message RemoveRSGroupResponse { +} + +message BalanceRSGroupRequest { + required string r_s_group_name = 1; +} + +message BalanceRSGroupResponse { + required bool balanceRan = 1; +} + +message ListRSGroupInfosRequest { +} + +message ListRSGroupInfosResponse { + repeated RSGroupInfo r_s_group_info = 1; +} + +message GetRSGroupInfoOfServerRequest { + required ServerName server = 2; +} + +message GetRSGroupInfoOfServerResponse { + optional RSGroupInfo r_s_group_info = 1; +} + +message MoveServersAndTablesRequest { + required string target_group = 1; + repeated ServerName servers = 2; + repeated TableName table_name = 3; +} + +message MoveServersAndTablesResponse { +} + +service RSGroupAdminService { + rpc GetRSGroupInfo(GetRSGroupInfoRequest) + returns (GetRSGroupInfoResponse); + + rpc GetRSGroupInfoOfTable(GetRSGroupInfoOfTableRequest) + returns (GetRSGroupInfoOfTableResponse); + + rpc GetRSGroupInfoOfServer(GetRSGroupInfoOfServerRequest) + returns (GetRSGroupInfoOfServerResponse); + + rpc MoveServers(MoveServersRequest) + returns (MoveServersResponse); + + rpc MoveTables(MoveTablesRequest) + returns (MoveTablesResponse); + + rpc AddRSGroup(AddRSGroupRequest) + returns (AddRSGroupResponse); + + rpc RemoveRSGroup(RemoveRSGroupRequest) + returns (RemoveRSGroupResponse); + + rpc BalanceRSGroup(BalanceRSGroupRequest) + returns (BalanceRSGroupResponse); + + rpc ListRSGroupInfos(ListRSGroupInfosRequest) + returns (ListRSGroupInfosResponse); + + rpc MoveServersAndTables(MoveServersAndTablesRequest) + returns (MoveServersAndTablesResponse); +} diff --git a/hbase-rsgroup/pom.xml b/hbase-rsgroup/pom.xml new file mode 100644 index 00000000000..ac1d6b33cf9 --- /dev/null +++ b/hbase-rsgroup/pom.xml @@ -0,0 +1,278 @@ + + + + 4.0.0 + + hbase + org.apache.hbase + 1.4.0-SNAPSHOT + .. + + + hbase-rsgroup + Apache HBase - RSGroup + Regionserver Groups for HBase + + + + + org.apache.maven.plugins + maven-site-plugin + + true + + + + + maven-assembly-plugin + ${maven.assembly.version} + + true + + + + maven-surefire-plugin + + + + secondPartTestsExecution + test + + test + + + true + + + + + + + org.apache.maven.plugins + maven-source-plugin + + + + + + + org.eclipse.m2e + lifecycle-mapping + 1.0.0 + + + + + + org.apache.maven.plugins + maven-compiler-plugin + [3.2,) + + compile + + + + + + + + + + + + + + + + + + org.apache.hbase + hbase-annotations + + + jdk.tools + jdk.tools + + + + + org.apache.hbase + hbase-annotations + test-jar + test + + + org.apache.hbase + hbase-client + + + org.apache.hbase + hbase-server + + + org.apache.hbase + hbase-common + + + org.apache.hbase + hbase-common + test-jar + + + org.apache.hbase + hbase-protocol + + + org.apache.hbase + hbase-testing-util + test + + + + + + + skipRSGroupTests + + + skipRSGroupTests + + + + true + + + + + hadoop-1.1 + + + + hadoop.profile1.1 + + + + + org.apache.hadoop + hadoop-core + + + + + + + hadoop-2.0 + + + + !hadoop.profile + + + + + org.apache.hadoop + hadoop-auth + + + org.apache.hadoop + hadoop-common + + + com.github.stephenc.findbugs + findbugs-annotations + + + net.java.dev.jets3t + jets3t + + + javax.servlet.jsp + jsp-api + + + org.mortbay.jetty + jetty + + + com.sun.jersey + jersey-server + + + com.sun.jersey + jersey-core + + + com.sun.jersey + jersey-json + + + javax.servlet + servlet-api + + + tomcat + jasper-compiler + + + tomcat + jasper-runtime + + + + + + + + + hadoop-3.0 + + + hadoop.profile + 3.0 + + + + 3.0-SNAPSHOT + + + + org.apache.hadoop + hadoop-auth + + + org.apache.hadoop + hadoop-common + + + + + diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdmin.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdmin.java new file mode 100644 index 00000000000..f465f834754 --- /dev/null +++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdmin.java @@ -0,0 +1,92 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rsgroup; + +import java.io.Closeable; +import java.io.IOException; +import java.util.List; +import java.util.Set; + +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.net.Address; + +/** + * Group user API interface used between client and server. + */ +@InterfaceAudience.Private +public interface RSGroupAdmin extends Closeable { + /** + * Gets {@code RSGroupInfo} for given group name. + */ + RSGroupInfo getRSGroupInfo(String groupName) throws IOException; + + /** + * Gets {@code RSGroupInfo} for the given table's group. + */ + RSGroupInfo getRSGroupInfoOfTable(TableName tableName) throws IOException; + + /** + * Move given set of servers to the specified target RegionServer group. + */ + void moveServers(Set
servers, String targetGroup) throws IOException; + + /** + * Move given set of tables to the specified target RegionServer group. + * This will unassign all of a table's region so it can be reassigned to the correct group. + */ + void moveTables(Set tables, String targetGroup) throws IOException; + + /** + * Creates a new RegionServer group with the given name. + */ + void addRSGroup(String groupName) throws IOException; + + /** + * Removes RegionServer group associated with the given name. + */ + void removeRSGroup(String groupName) throws IOException; + + /** + * Balance regions in the given RegionServer group. + * + * @return boolean Whether balance ran or not + */ + boolean balanceRSGroup(String groupName) throws IOException; + + /** + * Lists current set of RegionServer groups. + */ + List listRSGroups() throws IOException; + + /** + * Retrieve the RSGroupInfo a server is affiliated to + * @param hostPort HostPort to get RSGroupInfo for + */ + RSGroupInfo getRSGroupOfServer(Address hostPort) throws IOException; + + /** + * Move given set of servers and tables to the specified target RegionServer group. + * @param servers set of servers to move + * @param tables set of tables to move + * @param targetGroup the target group name + * @throws IOException + */ + void moveServersAndTables(Set
servers, Set tables, + String targetGroup) throws IOException; +} diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminClient.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminClient.java new file mode 100644 index 00000000000..8d9df44873f --- /dev/null +++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminClient.java @@ -0,0 +1,212 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rsgroup; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Set; + +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.net.Address; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequest; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersRequest; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesRequest; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RSGroupAdminService; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos; + +import com.google.common.collect.Sets; +import com.google.protobuf.ServiceException; + +/** + * Client used for managing region server group information. + */ +@InterfaceAudience.Private +class RSGroupAdminClient implements RSGroupAdmin { + private RSGroupAdminService.BlockingInterface stub; + + public RSGroupAdminClient(Connection conn) throws IOException { + stub = RSGroupAdminService.newBlockingStub(conn.getAdmin().coprocessorService()); + } + + @Override + public RSGroupInfo getRSGroupInfo(String groupName) throws IOException { + try { + GetRSGroupInfoResponse resp = stub.getRSGroupInfo(null, + GetRSGroupInfoRequest.newBuilder().setRSGroupName(groupName).build()); + if(resp.hasRSGroupInfo()) { + return RSGroupProtobufUtil.toGroupInfo(resp.getRSGroupInfo()); + } + return null; + } catch (ServiceException e) { + throw ProtobufUtil.handleRemoteException(e); + } + } + + @Override + public RSGroupInfo getRSGroupInfoOfTable(TableName tableName) throws IOException { + GetRSGroupInfoOfTableRequest request = GetRSGroupInfoOfTableRequest.newBuilder().setTableName( + ProtobufUtil.toProtoTableName(tableName)).build(); + try { + GetRSGroupInfoOfTableResponse resp = stub.getRSGroupInfoOfTable(null, request); + if (resp.hasRSGroupInfo()) { + return RSGroupProtobufUtil.toGroupInfo(resp.getRSGroupInfo()); + } + return null; + } catch (ServiceException e) { + throw ProtobufUtil.handleRemoteException(e); + } + } + + @Override + public void moveServers(Set
servers, String targetGroup) throws IOException { + Set hostPorts = Sets.newHashSet(); + for(Address el: servers) { + hostPorts.add(HBaseProtos.ServerName.newBuilder() + .setHostName(el.getHostname()) + .setPort(el.getPort()) + .build()); + } + MoveServersRequest request = MoveServersRequest.newBuilder() + .setTargetGroup(targetGroup) + .addAllServers(hostPorts) + .build(); + try { + stub.moveServers(null, request); + } catch (ServiceException e) { + throw ProtobufUtil.handleRemoteException(e); + } + } + + @Override + public void moveTables(Set tables, String targetGroup) throws IOException { + MoveTablesRequest.Builder builder = MoveTablesRequest.newBuilder().setTargetGroup(targetGroup); + for(TableName tableName: tables) { + builder.addTableName(ProtobufUtil.toProtoTableName(tableName)); + } + try { + stub.moveTables(null, builder.build()); + } catch (ServiceException e) { + throw ProtobufUtil.handleRemoteException(e); + } + } + + @Override + public void addRSGroup(String groupName) throws IOException { + AddRSGroupRequest request = AddRSGroupRequest.newBuilder().setRSGroupName(groupName).build(); + try { + stub.addRSGroup(null, request); + } catch (ServiceException e) { + throw ProtobufUtil.handleRemoteException(e); + } + } + + @Override + public void removeRSGroup(String name) throws IOException { + RemoveRSGroupRequest request = RemoveRSGroupRequest.newBuilder().setRSGroupName(name).build(); + try { + stub.removeRSGroup(null, request); + } catch (ServiceException e) { + throw ProtobufUtil.handleRemoteException(e); + } + } + + @Override + public boolean balanceRSGroup(String groupName) throws IOException { + BalanceRSGroupRequest request = BalanceRSGroupRequest.newBuilder() + .setRSGroupName(groupName).build(); + try { + return stub.balanceRSGroup(null, request).getBalanceRan(); + } catch (ServiceException e) { + throw ProtobufUtil.handleRemoteException(e); + } + } + + @Override + public List listRSGroups() throws IOException { + try { + List resp = stub.listRSGroupInfos(null, + ListRSGroupInfosRequest.getDefaultInstance()).getRSGroupInfoList(); + List result = new ArrayList<>(resp.size()); + for(RSGroupProtos.RSGroupInfo entry : resp) { + result.add(RSGroupProtobufUtil.toGroupInfo(entry)); + } + return result; + } catch (ServiceException e) { + throw ProtobufUtil.handleRemoteException(e); + } + } + + @Override + public RSGroupInfo getRSGroupOfServer(Address hostPort) throws IOException { + GetRSGroupInfoOfServerRequest request = GetRSGroupInfoOfServerRequest.newBuilder() + .setServer(HBaseProtos.ServerName.newBuilder() + .setHostName(hostPort.getHostname()) + .setPort(hostPort.getPort()) + .build()) + .build(); + try { + GetRSGroupInfoOfServerResponse resp = stub.getRSGroupInfoOfServer(null, request); + if (resp.hasRSGroupInfo()) { + return RSGroupProtobufUtil.toGroupInfo(resp.getRSGroupInfo()); + } + return null; + } catch (ServiceException e) { + throw ProtobufUtil.handleRemoteException(e); + } + } + + @Override + public void moveServersAndTables(Set
servers, Set tables, String targetGroup) + throws IOException { + MoveServersAndTablesRequest.Builder builder = + MoveServersAndTablesRequest.newBuilder().setTargetGroup(targetGroup); + for(Address el: servers) { + builder.addServers(HBaseProtos.ServerName.newBuilder() + .setHostName(el.getHostname()) + .setPort(el.getPort()) + .build()); + } + for(TableName tableName: tables) { + builder.addTableName(ProtobufUtil.toProtoTableName(tableName)); + } + try { + stub.moveServersAndTables(null, builder.build()); + } catch (ServiceException e) { + throw ProtobufUtil.handleRemoteException(e); + } + } + + @Override + public void close() throws IOException { + } +} diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java new file mode 100644 index 00000000000..c4f59522317 --- /dev/null +++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java @@ -0,0 +1,1049 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.rsgroup; + +import com.google.common.collect.Sets; + +import com.google.protobuf.RpcCallback; +import com.google.protobuf.RpcController; +import com.google.protobuf.Service; + +import java.io.IOException; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import org.apache.hadoop.hbase.Coprocessor; +import org.apache.hadoop.hbase.CoprocessorEnvironment; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.NamespaceDescriptor; +import org.apache.hadoop.hbase.ProcedureInfo; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Admin.MasterSwitchType; +import org.apache.hadoop.hbase.constraint.ConstraintException; +import org.apache.hadoop.hbase.coprocessor.CoprocessorService; +import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment; +import org.apache.hadoop.hbase.coprocessor.MasterObserver; +import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.master.RegionPlan; +import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; +import org.apache.hadoop.hbase.net.Address; +import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.ResponseConverter; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; +import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupResponse; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupRequest; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.BalanceRSGroupResponse; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerRequest; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfServerResponse; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableRequest; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoOfTableResponse; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoRequest; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.GetRSGroupInfoResponse; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersRequest; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersResponse; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesRequest; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesResponse; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RSGroupAdminService; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse; +import org.apache.hadoop.hbase.protobuf.generated.TableProtos; + +public class RSGroupAdminEndpoint extends RSGroupAdminService + implements CoprocessorService, Coprocessor, MasterObserver { + + private MasterServices master = null; + + private static RSGroupInfoManagerImpl groupInfoManager; + private RSGroupAdminServer groupAdminServer; + + @Override + public void start(CoprocessorEnvironment env) throws IOException { + MasterCoprocessorEnvironment menv = (MasterCoprocessorEnvironment)env; + master = menv.getMasterServices(); + setGroupInfoManager(new RSGroupInfoManagerImpl(master)); + groupAdminServer = new RSGroupAdminServer(master, groupInfoManager); + Class clazz = + master.getConfiguration().getClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, null); + if (!RSGroupableBalancer.class.isAssignableFrom(clazz)) { + throw new IOException("Configured balancer is not a GroupableBalancer"); + } + } + + @Override + public void stop(CoprocessorEnvironment env) throws IOException { + } + + @Override + public Service getService() { + return this; + } + + private static void setStaticGroupInfoManager(RSGroupInfoManagerImpl groupInfoManager) { + RSGroupAdminEndpoint.groupInfoManager = groupInfoManager; + } + + private void setGroupInfoManager(RSGroupInfoManagerImpl groupInfoManager) throws IOException { + if (groupInfoManager == null) { + groupInfoManager = new RSGroupInfoManagerImpl(master); + groupInfoManager.init(); + } else if (!groupInfoManager.isInit()) { + groupInfoManager.init(); + } + setStaticGroupInfoManager(groupInfoManager); + } + + public RSGroupInfoManager getGroupInfoManager() { + return groupInfoManager; + } + + @Override + public void getRSGroupInfo(RpcController controller, + GetRSGroupInfoRequest request, + RpcCallback done) { + GetRSGroupInfoResponse response = null; + try { + GetRSGroupInfoResponse.Builder builder = + GetRSGroupInfoResponse.newBuilder(); + RSGroupInfo RSGroupInfo = groupAdminServer.getRSGroupInfo(request.getRSGroupName()); + if(RSGroupInfo != null) { + builder.setRSGroupInfo(RSGroupProtobufUtil.toProtoGroupInfo(RSGroupInfo)); + } + response = builder.build(); + } catch (IOException e) { + ResponseConverter.setControllerException(controller, e); + } + done.run(response); + } + + @Override + public void getRSGroupInfoOfTable(RpcController controller, + GetRSGroupInfoOfTableRequest request, + RpcCallback done) { + GetRSGroupInfoOfTableResponse response = null; + try { + GetRSGroupInfoOfTableResponse.Builder builder = + GetRSGroupInfoOfTableResponse.newBuilder(); + TableName tableName = ProtobufUtil.toTableName(request.getTableName()); + RSGroupInfo RSGroupInfo = groupAdminServer.getRSGroupInfoOfTable(tableName); + if (RSGroupInfo == null) { + response = builder.build(); + } else { + response = builder.setRSGroupInfo(RSGroupProtobufUtil.toProtoGroupInfo(RSGroupInfo)).build(); + } + } catch (IOException e) { + ResponseConverter.setControllerException(controller, e); + } + done.run(response); + } + + @Override + public void moveServers(RpcController controller, + MoveServersRequest request, + RpcCallback done) { + RSGroupAdminProtos.MoveServersResponse response = null; + try { + RSGroupAdminProtos.MoveServersResponse.Builder builder = + RSGroupAdminProtos.MoveServersResponse.newBuilder(); + Set
servers = Sets.newHashSet(); + for(HBaseProtos.ServerName el: request.getServersList()) { + servers.add(Address.fromParts(el.getHostName(), el.getPort())); + } + groupAdminServer.moveServers(servers, request.getTargetGroup()); + response = builder.build(); + } catch (IOException e) { + ResponseConverter.setControllerException(controller, e); + } + done.run(response); + } + + @Override + public void moveTables(RpcController controller, + MoveTablesRequest request, + RpcCallback done) { + MoveTablesResponse response = null; + try { + MoveTablesResponse.Builder builder = + MoveTablesResponse.newBuilder(); + Set tables = new HashSet(request.getTableNameList().size()); + for(TableProtos.TableName tableName: request.getTableNameList()) { + tables.add(ProtobufUtil.toTableName(tableName)); + } + groupAdminServer.moveTables(tables, request.getTargetGroup()); + response = builder.build(); + } catch (IOException e) { + ResponseConverter.setControllerException(controller, e); + } + done.run(response); + } + + @Override + public void moveServersAndTables(RpcController controller, MoveServersAndTablesRequest request, + RpcCallback done) { + MoveServersAndTablesResponse.Builder builder = MoveServersAndTablesResponse.newBuilder(); + try { + Set
servers = Sets.newHashSet(); + for (HBaseProtos.ServerName el : request.getServersList()) { + servers.add(Address.fromParts(el.getHostName(), el.getPort())); + } + Set tables = new HashSet<>(request.getTableNameList().size()); + for (TableProtos.TableName tableName : request.getTableNameList()) { + tables.add(ProtobufUtil.toTableName(tableName)); + } + groupAdminServer.moveServersAndTables(servers, tables, request.getTargetGroup()); + } catch (IOException e) { + ResponseConverter.setControllerException(controller, e); + } + done.run(builder.build()); + } + + @Override + public void addRSGroup(RpcController controller, + AddRSGroupRequest request, + RpcCallback done) { + AddRSGroupResponse response = null; + try { + AddRSGroupResponse.Builder builder = + AddRSGroupResponse.newBuilder(); + groupAdminServer.addRSGroup(request.getRSGroupName()); + response = builder.build(); + } catch (IOException e) { + ResponseConverter.setControllerException(controller, e); + } + done.run(response); + } + + @Override + public void removeRSGroup(RpcController controller, + RemoveRSGroupRequest request, + RpcCallback done) { + RemoveRSGroupResponse response = null; + try { + RemoveRSGroupResponse.Builder builder = + RemoveRSGroupResponse.newBuilder(); + groupAdminServer.removeRSGroup(request.getRSGroupName()); + response = builder.build(); + } catch (IOException e) { + ResponseConverter.setControllerException(controller, e); + } + done.run(response); + } + + @Override + public void balanceRSGroup(RpcController controller, + BalanceRSGroupRequest request, + RpcCallback done) { + BalanceRSGroupResponse.Builder builder = BalanceRSGroupResponse.newBuilder(); + try { + builder.setBalanceRan(groupAdminServer.balanceRSGroup(request.getRSGroupName())); + } catch (IOException e) { + ResponseConverter.setControllerException(controller, e); + builder.setBalanceRan(false); + } + done.run(builder.build()); + } + + @Override + public void listRSGroupInfos(RpcController controller, + ListRSGroupInfosRequest request, + RpcCallback done) { + ListRSGroupInfosResponse response = null; + try { + ListRSGroupInfosResponse.Builder builder = + ListRSGroupInfosResponse.newBuilder(); + for(RSGroupInfo RSGroupInfo : groupAdminServer.listRSGroups()) { + builder.addRSGroupInfo(RSGroupProtobufUtil.toProtoGroupInfo(RSGroupInfo)); + } + response = builder.build(); + } catch (IOException e) { + ResponseConverter.setControllerException(controller, e); + } + done.run(response); + } + + @Override + public void getRSGroupInfoOfServer(RpcController controller, + GetRSGroupInfoOfServerRequest request, + RpcCallback done) { + GetRSGroupInfoOfServerResponse.Builder builder = GetRSGroupInfoOfServerResponse.newBuilder(); + try { + Address server = + Address.fromParts(request.getServer().getHostName(), request.getServer().getPort()); + RSGroupInfo RSGroupInfo = groupAdminServer.getRSGroupOfServer(server); + if (RSGroupInfo != null) { + builder.setRSGroupInfo(RSGroupProtobufUtil.toProtoGroupInfo(RSGroupInfo)); + } + } catch (IOException e) { + ResponseConverter.setControllerException(controller, e); + } + done.run(builder.build()); + } + + void assignTableToGroup(HTableDescriptor desc) throws IOException { + String groupName = + master.getNamespaceDescriptor(desc.getTableName().getNamespaceAsString()) + .getConfigurationValue(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP); + if (groupName == null) { + groupName = RSGroupInfo.DEFAULT_GROUP; + } + RSGroupInfo rsGroupInfo = groupAdminServer.getRSGroupInfo(groupName); + if (rsGroupInfo == null) { + throw new ConstraintException("Default RSGroup (" + groupName + ") for this table's " + + "namespace does not exist."); + } + if (!rsGroupInfo.containsTable(desc.getTableName())) { + groupAdminServer.moveTables(Sets.newHashSet(desc.getTableName()), groupName); + } + } + + ///////////////////////////////////////////////////////////////////////////// + // MasterObserver overrides + ///////////////////////////////////////////////////////////////////////////// + + // Assign table to default RSGroup. + @Override + public void preCreateTable(ObserverContext ctx, + HTableDescriptor desc, HRegionInfo[] regions) throws IOException { + groupAdminServer.prepareRSGroupForTable(desc); + assignTableToGroup(desc); + } + + // Remove table from its RSGroup. + @Override + public void postDeleteTable(ObserverContext ctx, + TableName tableName) throws IOException { + groupAdminServer.cleanupRSGroupForTable(tableName); + } + + //unused cp hooks + + @Override + public void postCreateTable(ObserverContext ctx, + HTableDescriptor desc, + HRegionInfo[] regions) throws IOException { + + } + + @Override + public void preCreateTableHandler(ObserverContext ctx, + HTableDescriptor desc, + HRegionInfo[] regions) throws IOException { + + } + + @Override + public void postCreateTableHandler(ObserverContext ctx, + HTableDescriptor desc, + HRegionInfo[] regions) throws IOException { + + } + + @Override + public void preDeleteTable(ObserverContext ctx, + TableName tableName) throws IOException { + + } + + @Override + public void preDeleteTableHandler(ObserverContext ctx, + TableName tableName) throws IOException { + + } + + @Override + public void postDeleteTableHandler(ObserverContext ctx, + TableName tableName) throws IOException { + + } + + @Override + public void preTruncateTable(ObserverContext ctx, + TableName tableName) throws IOException { + + } + + @Override + public void postTruncateTable(ObserverContext ctx, + TableName tableName) throws IOException { + + } + + @Override + public void preTruncateTableHandler(ObserverContext ctx, + TableName tableName) throws IOException { + + } + + @Override + public void postTruncateTableHandler(ObserverContext ctx, + TableName tableName) throws IOException { + + } + + @Override + public void preModifyTable(ObserverContext ctx, TableName tableName, + HTableDescriptor htd) throws IOException { + + } + + @Override + public void postModifyTable(ObserverContext ctx, + TableName tableName, HTableDescriptor htd) throws IOException { + + } + + @Override + public void preModifyTableHandler(ObserverContext ctx, + TableName tableName, HTableDescriptor htd) throws IOException { + + } + + @Override + public void postModifyTableHandler(ObserverContext ctx, + TableName tableName, HTableDescriptor htd) throws IOException { + + } + + @Override + public void preAddColumn(ObserverContext ctx, + TableName tableName, HColumnDescriptor column) throws IOException { + + } + + @Override + public void postAddColumn(ObserverContext ctx, TableName tableName, + HColumnDescriptor column) throws IOException { + + } + + @Override + public void preAddColumnHandler(ObserverContext ctx, + TableName tableName, HColumnDescriptor column) + throws IOException { + + } + + @Override + public void postAddColumnHandler(ObserverContext ctx, + TableName tableName, HColumnDescriptor column) + throws IOException { + + } + + @Override + public void preModifyColumn(ObserverContext ctx, + TableName tableName, HColumnDescriptor descriptor) + throws IOException { + + } + + @Override + public void postModifyColumn(ObserverContext ctx, + TableName tableName, HColumnDescriptor descriptor) + throws IOException { + + } + + @Override + public void preModifyColumnHandler(ObserverContext ctx, + TableName tableName, HColumnDescriptor descriptor) + throws IOException { + + } + + @Override + public void postModifyColumnHandler(ObserverContext ctx, + TableName tableName, HColumnDescriptor descriptor) + throws IOException { + + } + + @Override + public void preDeleteColumn(ObserverContext ctx, TableName + tableName, byte[] c) throws IOException { + + } + + @Override + public void postDeleteColumn(ObserverContext ctx, TableName + tableName, byte[] c) throws IOException { + + } + + @Override + public void preDeleteColumnHandler(ObserverContext ctx, TableName + tableName, byte[] c) throws IOException { + + } + + @Override + public void postDeleteColumnHandler(ObserverContext ctx, + TableName tableName, byte[] c) throws IOException { + + } + + @Override + public void preEnableTable(ObserverContext ctx, + TableName tableName) throws IOException { + + } + + @Override + public void postEnableTable(ObserverContext ctx, + TableName tableName) throws IOException { + + } + + @Override + public void preEnableTableHandler(ObserverContext ctx, + TableName tableName) throws IOException { + + } + + @Override + public void postEnableTableHandler(ObserverContext ctx, + TableName tableName) throws IOException { + + } + + @Override + public void preDisableTable(ObserverContext ctx, + TableName tableName) throws IOException { + + } + + @Override + public void postDisableTable(ObserverContext ctx, + TableName tableName) throws IOException { + + } + + @Override + public void preDisableTableHandler(ObserverContext ctx, + TableName tableName) throws IOException { + + } + + @Override + public void postDisableTableHandler(ObserverContext ctx, + TableName tableName) throws IOException { + + } + + @Override + public void preMove(ObserverContext ctx, HRegionInfo region, + ServerName srcServer, ServerName destServer) throws IOException { + + } + + @Override + public void postMove(ObserverContext ctx, HRegionInfo region, + ServerName srcServer, ServerName destServer) throws IOException { + + } + + @Override + public void preAbortProcedure(ObserverContext ctx, + ProcedureExecutor procEnv, + long procId) throws IOException { + + } + + @Override + public void postAbortProcedure(ObserverContext ctx) + throws IOException { + + } + + @Override + public void preListProcedures(ObserverContext ctx) throws + IOException { + + } + + @Override + public void postListProcedures(ObserverContext ctx, + List procInfoList) throws IOException { + + } + + @Override + public void preAssign(ObserverContext ctx, + HRegionInfo regionInfo) throws IOException { + + } + + @Override + public void postAssign(ObserverContext ctx, HRegionInfo + regionInfo) throws IOException { + + } + + @Override + public void preUnassign(ObserverContext ctx, HRegionInfo + regionInfo, boolean force) throws IOException { + + } + + @Override + public void postUnassign(ObserverContext ctx, + HRegionInfo regionInfo, boolean force) throws IOException { + + } + + @Override + public void preRegionOffline(ObserverContext ctx, + HRegionInfo regionInfo) throws IOException { + + } + + @Override + public void postRegionOffline(ObserverContext ctx, + HRegionInfo regionInfo) throws IOException { + + } + + @Override + public void preBalance(ObserverContext ctx) throws IOException { + + } + + @Override + public void postBalance(ObserverContext ctx, + List plans) throws IOException { + + } + + @Override + public boolean preSetSplitOrMergeEnabled(ObserverContext ctx, + boolean newValue, MasterSwitchType switchType) throws + IOException { + return false; + } + + @Override + public void postSetSplitOrMergeEnabled(ObserverContext ctx, + boolean newValue, MasterSwitchType switchType) throws + IOException { + + } + + @Override + public boolean preBalanceSwitch(ObserverContext ctx, + boolean newValue) throws IOException { + return false; + } + + @Override + public void postBalanceSwitch(ObserverContext ctx, boolean + oldValue, boolean newValue) throws IOException { + + } + + @Override + public void preShutdown(ObserverContext ctx) throws IOException { + + } + + @Override + public void preStopMaster(ObserverContext ctx) throws IOException { + + } + + @Override + public void postStartMaster(ObserverContext ctx) + throws IOException { + + } + + @Override + public void preMasterInitialization(ObserverContext ctx) + throws IOException { + + } + + @Override + public void preSnapshot(ObserverContext ctx, + SnapshotDescription snapshot, + HTableDescriptor hTableDescriptor) throws IOException { + + + } + + @Override + public void postSnapshot(ObserverContext ctx, SnapshotDescription + snapshot, HTableDescriptor hTableDescriptor) throws IOException { + + } + + @Override + public void preListSnapshot(ObserverContext ctx, + SnapshotDescription snapshot) throws IOException { + + } + + @Override + public void postListSnapshot(ObserverContext ctx, + SnapshotDescription snapshot) throws IOException { + + } + + @Override + public void preCloneSnapshot(ObserverContext ctx, + SnapshotDescription snapshot, + HTableDescriptor hTableDescriptor) throws IOException { + assignTableToGroup(hTableDescriptor); + } + + @Override + public void postCloneSnapshot(ObserverContext ctx, + SnapshotDescription snapshot, + HTableDescriptor hTableDescriptor) throws IOException { + + } + + @Override + public void preRestoreSnapshot(ObserverContext ctx, + SnapshotDescription snapshot, + HTableDescriptor hTableDescriptor) throws IOException { + + } + + @Override + public void postRestoreSnapshot(ObserverContext ctx, + SnapshotDescription snapshot, + HTableDescriptor hTableDescriptor) throws IOException { + + } + + @Override + public void preDeleteSnapshot(ObserverContext ctx, + SnapshotDescription snapshot) throws IOException { + + } + + @Override + public void postDeleteSnapshot(ObserverContext ctx, + SnapshotDescription snapshot) throws IOException { + + } + + @Override + public void preGetTableDescriptors(ObserverContext ctx, + List tableNamesList, + List descriptors) throws IOException { + + } + + @Override + public void postGetTableDescriptors(ObserverContext ctx, + List descriptors) throws IOException { + + } + + @Override + public void preGetTableDescriptors(ObserverContext ctx, + List tableNamesList, + List descriptors, + String regex) throws IOException { + + } + + @Override + public void postGetTableDescriptors(ObserverContext ctx, + List tableNamesList, + List descriptors, + String regex) throws IOException { + + } + + @Override + public void preGetTableNames(ObserverContext ctx, + List descriptors, + String regex) throws IOException { + + } + + @Override + public void postGetTableNames(ObserverContext ctx, + List descriptors, + String regex) throws IOException { + + } + + @Override + public void preCreateNamespace(ObserverContext ctx, + NamespaceDescriptor ns) throws IOException { + String group = ns.getConfigurationValue(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP); + if(group != null && groupAdminServer.getRSGroupInfo(group) == null) { + throw new ConstraintException("Region server group "+group+" does not exit"); + } + } + + @Override + public void postCreateNamespace(ObserverContext ctx, + NamespaceDescriptor ns) throws IOException { + + } + + @Override + public void preDeleteNamespace(ObserverContext ctx, + String namespace) throws IOException { + + } + + @Override + public void postDeleteNamespace(ObserverContext ctx, + String namespace) throws IOException { + + } + + @Override + public void preModifyNamespace(ObserverContext ctx, + NamespaceDescriptor ns) throws IOException { + preCreateNamespace(ctx, ns); + } + + @Override + public void postModifyNamespace(ObserverContext ctx, + NamespaceDescriptor ns) throws IOException { + + } + + @Override + public void preGetNamespaceDescriptor(ObserverContext ctx, + String namespace) throws IOException { + + } + + @Override + public void postGetNamespaceDescriptor(ObserverContext ctx, + NamespaceDescriptor ns) throws IOException { + + } + + @Override + public void preListNamespaceDescriptors(ObserverContext ctx, + List descriptors) + throws IOException { + + } + + @Override + public void postListNamespaceDescriptors(ObserverContext ctx, + List descriptors) + throws IOException { + + } + + @Override + public void preTableFlush(ObserverContext ctx, TableName + tableName) throws IOException { + + } + + @Override + public void postTableFlush(ObserverContext ctx, + TableName tableName) throws IOException { + + } + + @Override + public void preSetUserQuota(ObserverContext ctx, String userName, + Quotas quotas) throws IOException { + + } + + @Override + public void postSetUserQuota(ObserverContext ctx, + String userName, Quotas quotas) throws IOException { + + } + + @Override + public void preSetUserQuota(ObserverContext ctx, String userName, + TableName tableName, Quotas quotas) throws IOException { + + } + + @Override + public void postSetUserQuota(ObserverContext ctx, String + userName, TableName tableName, Quotas quotas) throws IOException { + + } + + @Override + public void preSetUserQuota(ObserverContext ctx, String userName, + String namespace, Quotas quotas) throws IOException { + + } + + @Override + public void postSetUserQuota(ObserverContext ctx, String userName, + String namespace, Quotas quotas) throws IOException { + + } + + @Override + public void preSetTableQuota(ObserverContext ctx, + TableName tableName, Quotas quotas) throws IOException { + + } + + @Override + public void postSetTableQuota(ObserverContext ctx, + TableName tableName, Quotas quotas) throws IOException { + + } + + @Override + public void preSetNamespaceQuota(ObserverContext ctx, + String namespace, Quotas quotas) throws IOException { + + } + + @Override + public void postSetNamespaceQuota(ObserverContext ctx, + String namespace, Quotas quotas) throws IOException { + + } + + @Override + public void preDispatchMerge(ObserverContext ctx, + HRegionInfo regionA, HRegionInfo regionB) throws IOException { + + } + + @Override + public void postDispatchMerge(ObserverContext c, + HRegionInfo regionA, HRegionInfo regionB) throws IOException { + + } + + @Override + public void preListDeadServers(ObserverContext ctx) + throws IOException { + + } + + @Override + public void postListDeadServers(ObserverContext ctx) + throws IOException { + + } + + @Override + public void preClearDeadServers(ObserverContext ctx) + throws IOException { + + } + + @Override + public void postClearDeadServers(ObserverContext ctx) + throws IOException { + + } + + @Override + public void preMoveServers(ObserverContext ctx, + Set
servers, String targetGroup) throws IOException { + + } + + @Override + public void postMoveServers(ObserverContext ctx, + Set
servers, String targetGroup) throws IOException { + + } + + @Override + public void preMoveTables(ObserverContext ctx, Set + tables, String targetGroup) throws IOException { + + } + + @Override + public void postMoveTables(ObserverContext ctx, + Set tables, String targetGroup) throws IOException { + + } + + @Override + public void preMoveServersAndTables(ObserverContext ctx, + Set
servers, Set tables, String targetGroup) throws IOException { + } + + @Override + public void postMoveServersAndTables(ObserverContext ctx, + Set
servers, Set tables, String targetGroup) throws IOException { + } + + @Override + public void preAddRSGroup(ObserverContext ctx, + String name) throws IOException { + + } + + @Override + public void postAddRSGroup(ObserverContext ctx, + String name) throws IOException { + + } + + @Override + public void preRemoveRSGroup(ObserverContext ctx, + String name) throws IOException { + + } + + @Override + public void postRemoveRSGroup(ObserverContext ctx, + String name) throws IOException { + + } + + @Override + public void preBalanceRSGroup(ObserverContext ctx, + String groupName) throws IOException { + + } + + @Override + public void postBalanceRSGroup(ObserverContext ctx, + String groupName, boolean balancerRan) throws IOException { + + } + +} diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java new file mode 100644 index 00000000000..13b3141b803 --- /dev/null +++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java @@ -0,0 +1,516 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rsgroup; + +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import com.google.common.collect.Sets; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; + +import org.apache.commons.lang.StringUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.NamespaceDescriptor; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.constraint.ConstraintException; +import org.apache.hadoop.hbase.master.AssignmentManager; +import org.apache.hadoop.hbase.master.LoadBalancer; +import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.master.RegionPlan; +import org.apache.hadoop.hbase.master.RegionState; +import org.apache.hadoop.hbase.master.ServerManager; +import org.apache.hadoop.hbase.master.TableLockManager.TableLock; +import org.apache.hadoop.hbase.net.Address; + +/** + * Service to support Region Server Grouping (HBase-6721) + */ +@InterfaceAudience.Private +public class RSGroupAdminServer implements RSGroupAdmin { + private static final Log LOG = LogFactory.getLog(RSGroupAdminServer.class); + + private MasterServices master; + //List of servers that are being moved from one group to another + //Key=host:port,Value=targetGroup + private ConcurrentMap serversInTransition = + new ConcurrentHashMap(); + private RSGroupInfoManager RSGroupInfoManager; + + public RSGroupAdminServer(MasterServices master, + RSGroupInfoManager RSGroupInfoManager) throws IOException { + this.master = master; + this.RSGroupInfoManager = RSGroupInfoManager; + } + + @Override + public RSGroupInfo getRSGroupInfo(String groupName) throws IOException{ + return getRSGroupInfoManager().getRSGroup(groupName); + } + + + @Override + public RSGroupInfo getRSGroupInfoOfTable(TableName tableName) throws IOException { + String groupName = getRSGroupInfoManager().getRSGroupOfTable(tableName); + if (groupName == null) { + return null; + } + return getRSGroupInfoManager().getRSGroup(groupName); + } + + @Override + public void moveServers(Set
servers, String targetGroupName) + throws IOException { + if (servers == null) { + throw new ConstraintException( + "The list of servers cannot be null."); + } + if (StringUtils.isEmpty(targetGroupName)) { + throw new ConstraintException("The target group cannot be null."); + } + if (servers.size() < 1) { + return; + } + + RSGroupInfo targetGrp = getRSGroupInfo(targetGroupName); + if (targetGrp == null) { + throw new ConstraintException("Group does not exist: "+targetGroupName); + } + + RSGroupInfoManager manager = getRSGroupInfoManager(); + synchronized (manager) { + if (master.getMasterCoprocessorHost() != null) { + master.getMasterCoprocessorHost().preMoveServers(servers, targetGroupName); + } + Address firstServer = servers.iterator().next(); + //we only allow a move from a single source group + //so this should be ok + RSGroupInfo srcGrp = manager.getRSGroupOfServer(firstServer); + //only move online servers (from default) + //or servers from other groups + //this prevents bogus servers from entering groups + if (srcGrp == null) { + throw new ConstraintException( + "Server "+firstServer+" does not have a group."); + } + if (RSGroupInfo.DEFAULT_GROUP.equals(srcGrp.getName())) { + Set
onlineServers = new HashSet
(); + for(ServerName server: master.getServerManager().getOnlineServers().keySet()) { + onlineServers.add(server.getAddress()); + } + for(Address el: servers) { + if(!onlineServers.contains(el)) { + throw new ConstraintException( + "Server "+el+" is not an online server in default group."); + } + } + } + + if(srcGrp.getServers().size() <= servers.size() && + srcGrp.getTables().size() > 0) { + throw new ConstraintException("Cannot leave a group "+srcGrp.getName()+ + " that contains tables " +"without servers."); + } + + String sourceGroupName = getRSGroupInfoManager() + .getRSGroupOfServer(srcGrp.getServers().iterator().next()).getName(); + if(getRSGroupInfo(targetGroupName) == null) { + throw new ConstraintException("Target group does not exist: "+targetGroupName); + } + + for(Address server: servers) { + if (serversInTransition.containsKey(server)) { + throw new ConstraintException( + "Server list contains a server that is already being moved: "+server); + } + String tmpGroup = getRSGroupInfoManager().getRSGroupOfServer(server).getName(); + if (sourceGroupName != null && !tmpGroup.equals(sourceGroupName)) { + throw new ConstraintException( + "Move server request should only come from one source group. "+ + "Expecting only "+sourceGroupName+" but contains "+tmpGroup); + } + } + + if(sourceGroupName.equals(targetGroupName)) { + throw new ConstraintException( + "Target group is the same as source group: "+targetGroupName); + } + + try { + //update the servers as in transition + for (Address server : servers) { + serversInTransition.put(server, targetGroupName); + } + + getRSGroupInfoManager().moveServers(servers, sourceGroupName, targetGroupName); + boolean found; + List
tmpServers = Lists.newArrayList(servers); + do { + found = false; + for (Iterator
iter = tmpServers.iterator(); + iter.hasNext(); ) { + Address rs = iter.next(); + //get online regions + List regions = new LinkedList(); + for (Map.Entry el : + master.getAssignmentManager().getRegionStates().getRegionAssignments().entrySet()) { + if (el.getValue().getAddress().equals(rs)) { + regions.add(el.getKey()); + } + } + Iterator i = + master.getAssignmentManager().getRegionStates().getRegionsInTransition().iterator(); + while (i.hasNext()) { + RegionState state = i.next(); + if (state.getServerName().getAddress().equals(rs)) { + regions.add(state.getRegion()); + } + } + + //unassign regions for a server + LOG.info("Unassigning " + regions.size() + + " regions from server " + rs + " for move to " + targetGroupName); + if (regions.size() > 0) { + //TODO bulk unassign or throttled unassign? + for (HRegionInfo region : regions) { + //regions might get assigned from tables of target group + //so we need to filter + if (!targetGrp.containsTable(region.getTable())) { + master.getAssignmentManager().unassign(region); + found = true; + } + } + } + if (!found) { + iter.remove(); + } + } + try { + manager.wait(1000); + } catch (InterruptedException e) { + LOG.warn("Sleep interrupted", e); + Thread.currentThread().interrupt(); + } + } while (found); + } finally { + //remove from transition + for (Address server : servers) { + serversInTransition.remove(server); + } + } + if (master.getMasterCoprocessorHost() != null) { + master.getMasterCoprocessorHost().postMoveServers(servers, targetGroupName); + } + LOG.info("Move server done: "+sourceGroupName+"->"+targetGroupName); + } + } + + @Override + public void moveTables(Set tables, String targetGroup) throws IOException { + if (tables == null) { + throw new ConstraintException( + "The list of servers cannot be null."); + } + if(tables.size() < 1) { + LOG.debug("moveTables() passed an empty set. Ignoring."); + return; + } + RSGroupInfoManager manager = getRSGroupInfoManager(); + synchronized (manager) { + if (master.getMasterCoprocessorHost() != null) { + master.getMasterCoprocessorHost().preMoveTables(tables, targetGroup); + } + + if(targetGroup != null) { + RSGroupInfo destGroup = manager.getRSGroup(targetGroup); + if(destGroup == null) { + throw new ConstraintException("Target group does not exist: "+targetGroup); + } + if(destGroup.getServers().size() < 1) { + throw new ConstraintException("Target group must have at least one server."); + } + } + + for(TableName table : tables) { + String srcGroup = manager.getRSGroupOfTable(table); + if(srcGroup != null && srcGroup.equals(targetGroup)) { + throw new ConstraintException( + "Source group is the same as target group for table "+table+" :"+srcGroup); + } + } + manager.moveTables(tables, targetGroup); + if (master.getMasterCoprocessorHost() != null) { + master.getMasterCoprocessorHost().postMoveTables(tables, targetGroup); + } + } + for(TableName table: tables) { + TableLock lock = master.getTableLockManager().writeLock(table, "Group: table move"); + try { + lock.acquire(); + for (HRegionInfo region : + master.getAssignmentManager().getRegionStates().getRegionsOfTable(table)) { + master.getAssignmentManager().unassign(region); + } + } finally { + lock.release(); + } + } + } + + @Override + public void addRSGroup(String name) throws IOException { + if (master.getMasterCoprocessorHost() != null) { + master.getMasterCoprocessorHost().preAddRSGroup(name); + } + getRSGroupInfoManager().addRSGroup(new RSGroupInfo(name)); + if (master.getMasterCoprocessorHost() != null) { + master.getMasterCoprocessorHost().postAddRSGroup(name); + } + } + + @Override + public void removeRSGroup(String name) throws IOException { + RSGroupInfoManager manager = getRSGroupInfoManager(); + synchronized (manager) { + if (master.getMasterCoprocessorHost() != null) { + master.getMasterCoprocessorHost().preRemoveRSGroup(name); + } + RSGroupInfo RSGroupInfo = getRSGroupInfoManager().getRSGroup(name); + if(RSGroupInfo == null) { + throw new ConstraintException("Group "+name+" does not exist"); + } + int tableCount = RSGroupInfo.getTables().size(); + if (tableCount > 0) { + throw new ConstraintException("Group "+name+" must have no associated tables: "+tableCount); + } + int serverCount = RSGroupInfo.getServers().size(); + if(serverCount > 0) { + throw new ConstraintException( + "Group "+name+" must have no associated servers: "+serverCount); + } + for(NamespaceDescriptor ns: master.getTableNamespaceManager().list()) { + String nsGroup = ns.getConfigurationValue(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP); + if(nsGroup != null && nsGroup.equals(name)) { + throw new ConstraintException("Group "+name+" is referenced by namespace: "+ns.getName()); + } + } + manager.removeRSGroup(name); + if (master.getMasterCoprocessorHost() != null) { + master.getMasterCoprocessorHost().postRemoveRSGroup(name); + } + } + } + + @Override + public boolean balanceRSGroup(String groupName) throws IOException { + ServerManager serverManager = master.getServerManager(); + AssignmentManager assignmentManager = master.getAssignmentManager(); + LoadBalancer balancer = master.getLoadBalancer(); + + boolean balancerRan; + synchronized (balancer) { + if (master.getMasterCoprocessorHost() != null) { + master.getMasterCoprocessorHost().preBalanceRSGroup(groupName); + } + if (getRSGroupInfo(groupName) == null) { + throw new ConstraintException("Group does not exist: "+groupName); + } + // Only allow one balance run at at time. + Map groupRIT = rsGroupGetRegionsInTransition(groupName); + if (groupRIT.size() > 0) { + LOG.debug("Not running balancer because " + + groupRIT.size() + + " region(s) in transition: " + + StringUtils.abbreviate( + master.getAssignmentManager().getRegionStates().getRegionsInTransition().toString(), + 256)); + return false; + } + if (serverManager.areDeadServersInProgress()) { + LOG.debug("Not running balancer because processing dead regionserver(s): " + + serverManager.getDeadServers()); + return false; + } + + //We balance per group instead of per table + List plans = new ArrayList(); + for(Map.Entry>> tableMap: + getRSGroupAssignmentsByTable(groupName).entrySet()) { + LOG.info("Creating partial plan for table "+tableMap.getKey()+": "+tableMap.getValue()); + List partialPlans = balancer.balanceCluster(tableMap.getValue()); + LOG.info("Partial plan for table "+tableMap.getKey()+": "+partialPlans); + if (partialPlans != null) { + plans.addAll(partialPlans); + } + } + long startTime = System.currentTimeMillis(); + balancerRan = plans != null; + if (plans != null && !plans.isEmpty()) { + LOG.info("Group balance "+groupName+" starting with plan count: "+plans.size()); + for (RegionPlan plan: plans) { + LOG.info("balance " + plan); + assignmentManager.balance(plan); + } + LOG.info("Group balance "+groupName+" completed after "+ + (System.currentTimeMillis()-startTime)+" seconds"); + } + if (master.getMasterCoprocessorHost() != null) { + master.getMasterCoprocessorHost().postBalanceRSGroup(groupName, balancerRan); + } + } + return balancerRan; + } + + @Override + public List listRSGroups() throws IOException { + return getRSGroupInfoManager().listRSGroups(); + } + + @Override + public RSGroupInfo getRSGroupOfServer(Address server) throws IOException { + return getRSGroupInfoManager().getRSGroupOfServer(server); + } + + @InterfaceAudience.Private + public RSGroupInfoManager getRSGroupInfoManager() throws IOException { + return RSGroupInfoManager; + } + + private Map rsGroupGetRegionsInTransition(String groupName) + throws IOException { + Map rit = Maps.newTreeMap(); + AssignmentManager am = master.getAssignmentManager(); + RSGroupInfo RSGroupInfo = getRSGroupInfo(groupName); + for(TableName tableName : RSGroupInfo.getTables()) { + for(HRegionInfo regionInfo: am.getRegionStates().getRegionsOfTable(tableName)) { + RegionState state = + master.getAssignmentManager().getRegionStates().getRegionTransitionState(regionInfo); + if(state != null) { + rit.put(regionInfo.getEncodedName(), state); + } + } + } + return rit; + } + + private Map>> + getRSGroupAssignmentsByTable(String groupName) throws IOException { + Map>> result = Maps.newHashMap(); + RSGroupInfo RSGroupInfo = getRSGroupInfo(groupName); + Map>> assignments = Maps.newHashMap(); + for(Map.Entry entry: + master.getAssignmentManager().getRegionStates().getRegionAssignments().entrySet()) { + TableName currTable = entry.getKey().getTable(); + ServerName currServer = entry.getValue(); + HRegionInfo currRegion = entry.getKey(); + if(RSGroupInfo.getTables().contains(currTable)) { + if(!assignments.containsKey(entry.getKey().getTable())) { + assignments.put(currTable, new HashMap>()); + } + if(!assignments.get(currTable).containsKey(currServer)) { + assignments.get(currTable).put(currServer, new ArrayList()); + } + assignments.get(currTable).get(currServer).add(currRegion); + } + } + + Map> serverMap = Maps.newHashMap(); + for(ServerName serverName: master.getServerManager().getOnlineServers().keySet()) { + if(RSGroupInfo.getServers().contains(serverName.getAddress())) { + serverMap.put(serverName, Collections. emptyList()); + } + } + + //add all tables that are members of the group + for(TableName tableName : RSGroupInfo.getTables()) { + if(assignments.containsKey(tableName)) { + result.put(tableName, new HashMap>()); + result.get(tableName).putAll(serverMap); + result.get(tableName).putAll(assignments.get(tableName)); + LOG.debug("Adding assignments for "+tableName+": "+assignments.get(tableName)); + } + } + + return result; + } + + public void prepareRSGroupForTable(HTableDescriptor desc) throws IOException { + String groupName = + master.getTableNamespaceManager().get(desc.getTableName().getNamespaceAsString()) + .getConfigurationValue(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP); + if (groupName == null) { + groupName = RSGroupInfo.DEFAULT_GROUP; + } + RSGroupInfo RSGroupInfo = getRSGroupInfo(groupName); + if (RSGroupInfo == null) { + throw new ConstraintException("RSGroup " + groupName + " does not exist."); + } + if (!RSGroupInfo.containsTable(desc.getTableName())) { + LOG.debug("Pre-moving table " + desc.getTableName() + " to rsgroup " + groupName); + moveTables(Sets.newHashSet(desc.getTableName()), groupName); + } + } + + public void cleanupRSGroupForTable(TableName tableName) throws IOException { + try { + RSGroupInfo group = getRSGroupInfoOfTable(tableName); + if (group != null) { + LOG.debug("Removing deleted table from table rsgroup " + group.getName()); + moveTables(Sets.newHashSet(tableName), null); + } + } catch (ConstraintException ex) { + LOG.debug("Failed to perform rsgroup information cleanup for table: " + tableName, ex); + } catch (IOException ex) { + LOG.debug("Failed to perform rsgroup information cleanup for table: " + tableName, ex); + } + } + + @Override + public void moveServersAndTables(Set
servers, Set tables, + String targetGroup) throws IOException { + if (servers == null || servers.isEmpty() ) { + throw new ConstraintException("The list of servers to move cannot be null or empty."); + } + if (tables == null || tables.isEmpty()) { + throw new ConstraintException("The list of tables to move cannot be null or empty."); + } + moveServers(servers, targetGroup); + moveTables(tables, targetGroup); + } + + @Override + public void close() throws IOException { + } +} diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java new file mode 100644 index 00000000000..48bcb64fb45 --- /dev/null +++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java @@ -0,0 +1,431 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.rsgroup; + +import com.google.common.collect.ArrayListMultimap; +import com.google.common.collect.LinkedListMultimap; +import com.google.common.collect.ListMultimap; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeMap; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.ClusterStatus; +import org.apache.hadoop.hbase.HBaseIOException; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.constraint.ConstraintException; +import org.apache.hadoop.hbase.master.LoadBalancer; +import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.master.RegionPlan; +import org.apache.hadoop.hbase.master.balancer.StochasticLoadBalancer; +import org.apache.hadoop.hbase.net.Address; +import org.apache.hadoop.util.ReflectionUtils; + +/** + * GroupBasedLoadBalancer, used when Region Server Grouping is configured (HBase-6721) + * It does region balance based on a table's group membership. + * + * Most assignment methods contain two exclusive code paths: Online - when the group + * table is online and Offline - when it is unavailable. + * + * During Offline, assignments are assigned based on cached information in zookeeper. + * If unavailable (ie bootstrap) then regions are assigned randomly. + * + * Once the GROUP table has been assigned, the balancer switches to Online and will then + * start providing appropriate assignments for user tables. + * + */ +@InterfaceAudience.Private +public class RSGroupBasedLoadBalancer implements RSGroupableBalancer, LoadBalancer { + /** Config for pluggable load balancers */ + public static final String HBASE_GROUP_LOADBALANCER_CLASS = "hbase.group.grouploadbalancer.class"; + + private static final Log LOG = LogFactory.getLog(RSGroupBasedLoadBalancer.class); + + private Configuration config; + private ClusterStatus clusterStatus; + private MasterServices masterServices; + private RSGroupInfoManager RSGroupInfoManager; + private LoadBalancer internalBalancer; + + //used during reflection by LoadBalancerFactory + @InterfaceAudience.Private + public RSGroupBasedLoadBalancer() { + } + + //This constructor should only be used for unit testing + @InterfaceAudience.Private + public RSGroupBasedLoadBalancer(RSGroupInfoManager RSGroupInfoManager) { + this.RSGroupInfoManager = RSGroupInfoManager; + } + + @Override + public Configuration getConf() { + return config; + } + + @Override + public void setConf(Configuration conf) { + this.config = conf; + } + + @Override + public void setClusterStatus(ClusterStatus st) { + this.clusterStatus = st; + } + + @Override + public void setMasterServices(MasterServices masterServices) { + this.masterServices = masterServices; + } + + @Override + public List balanceCluster(TableName tableName, Map> + clusterState) throws HBaseIOException { + return balanceCluster(clusterState); + } + + @Override + public List balanceCluster(Map> clusterState) + throws HBaseIOException { + if (!isOnline()) { + throw new ConstraintException(RSGroupInfoManager.RSGROUP_TABLE_NAME + + " is not online, unable to perform balance"); + } + + Map> correctedState = correctAssignments(clusterState); + List regionPlans = new ArrayList(); + + List misplacedRegions = correctedState.get(LoadBalancer.BOGUS_SERVER_NAME); + for (HRegionInfo regionInfo : misplacedRegions) { + regionPlans.add(new RegionPlan(regionInfo, null, null)); + } + try { + for (RSGroupInfo info : RSGroupInfoManager.listRSGroups()) { + Map> groupClusterState = + new HashMap>(); + for (Address addr : info.getServers()) { + for(ServerName curr: clusterState.keySet()) { + if(curr.getAddress().equals(addr)) { + groupClusterState.put(curr, correctedState.get(curr)); + } + } + } + List groupPlans = this.internalBalancer + .balanceCluster(groupClusterState); + if (groupPlans != null) { + regionPlans.addAll(groupPlans); + } + } + } catch (IOException exp) { + LOG.warn("Exception while balancing cluster.", exp); + regionPlans.clear(); + } + return regionPlans; + } + + @Override + public Map> roundRobinAssignment( + List regions, List servers) throws HBaseIOException { + Map> assignments = Maps.newHashMap(); + ListMultimap regionMap = ArrayListMultimap.create(); + ListMultimap serverMap = ArrayListMultimap.create(); + generateGroupMaps(regions, servers, regionMap, serverMap); + for(String groupKey : regionMap.keySet()) { + if (regionMap.get(groupKey).size() > 0) { + Map> result = + this.internalBalancer.roundRobinAssignment( + regionMap.get(groupKey), + serverMap.get(groupKey)); + if(result != null) { + assignments.putAll(result); + } + } + } + return assignments; + } + + @Override + public Map> retainAssignment( + Map regions, List servers) throws HBaseIOException { + try { + Map> assignments = new TreeMap>(); + ListMultimap groupToRegion = ArrayListMultimap.create(); + Set misplacedRegions = getMisplacedRegions(regions); + for (HRegionInfo region : regions.keySet()) { + if (!misplacedRegions.contains(region)) { + String groupName = RSGroupInfoManager.getRSGroupOfTable(region.getTable()); + groupToRegion.put(groupName, region); + } + } + // Now the "groupToRegion" map has only the regions which have correct + // assignments. + for (String key : groupToRegion.keySet()) { + Map currentAssignmentMap = new TreeMap(); + List regionList = groupToRegion.get(key); + RSGroupInfo info = RSGroupInfoManager.getRSGroup(key); + List candidateList = filterOfflineServers(info, servers); + for (HRegionInfo region : regionList) { + currentAssignmentMap.put(region, regions.get(region)); + } + if(candidateList.size() > 0) { + assignments.putAll(this.internalBalancer.retainAssignment( + currentAssignmentMap, candidateList)); + } + } + + for (HRegionInfo region : misplacedRegions) { + String groupName = RSGroupInfoManager.getRSGroupOfTable( + region.getTable()); + RSGroupInfo info = RSGroupInfoManager.getRSGroup(groupName); + List candidateList = filterOfflineServers(info, servers); + ServerName server = this.internalBalancer.randomAssignment(region, + candidateList); + if (server != null) { + if (!assignments.containsKey(server)) { + assignments.put(server, new ArrayList()); + } + assignments.get(server).add(region); + } else { + //if not server is available assign to bogus so it ends up in RIT + if(!assignments.containsKey(LoadBalancer.BOGUS_SERVER_NAME)) { + assignments.put(LoadBalancer.BOGUS_SERVER_NAME, new ArrayList()); + } + assignments.get(LoadBalancer.BOGUS_SERVER_NAME).add(region); + } + } + return assignments; + } catch (IOException e) { + throw new HBaseIOException("Failed to do online retain assignment", e); + } + } + + @Override + public Map immediateAssignment(List regions, + List servers) throws HBaseIOException { + throw new UnsupportedOperationException("immediateAssignment is not supported"); + } + + @Override + public ServerName randomAssignment(HRegionInfo region, + List servers) throws HBaseIOException { + ListMultimap regionMap = LinkedListMultimap.create(); + ListMultimap serverMap = LinkedListMultimap.create(); + generateGroupMaps(Lists.newArrayList(region), servers, regionMap, serverMap); + List filteredServers = serverMap.get(regionMap.keySet().iterator().next()); + return this.internalBalancer.randomAssignment(region, filteredServers); + } + + private void generateGroupMaps( + List regions, + List servers, + ListMultimap regionMap, + ListMultimap serverMap) throws HBaseIOException { + try { + for (HRegionInfo region : regions) { + String groupName = RSGroupInfoManager.getRSGroupOfTable(region.getTable()); + if(groupName == null) { + LOG.warn("Group for table "+region.getTable()+" is null"); + } + regionMap.put(groupName, region); + } + for (String groupKey : regionMap.keySet()) { + RSGroupInfo info = RSGroupInfoManager.getRSGroup(groupKey); + serverMap.putAll(groupKey, filterOfflineServers(info, servers)); + if(serverMap.get(groupKey).size() < 1) { + serverMap.put(groupKey, LoadBalancer.BOGUS_SERVER_NAME); + } + } + } catch(IOException e) { + throw new HBaseIOException("Failed to generate group maps", e); + } + } + + private List filterOfflineServers(RSGroupInfo RSGroupInfo, + List onlineServers) { + if (RSGroupInfo != null) { + return filterServers(RSGroupInfo.getServers(), onlineServers); + } else { + LOG.debug("Group Information found to be null. Some regions might be unassigned."); + return Collections.EMPTY_LIST; + } + } + + /** + * Filter servers based on the online servers. + * + * @param servers + * the servers + * @param onlineServers + * List of servers which are online. + * @return the list + */ + private List filterServers(Collection
servers, + Collection onlineServers) { + ArrayList finalList = new ArrayList(); + for (Address server : servers) { + for(ServerName curr: onlineServers) { + if(curr.getAddress().equals(server)) { + finalList.add(curr); + } + } + } + return finalList; + } + + private ListMultimap groupRegions( + List regionList) throws IOException { + ListMultimap regionGroup = ArrayListMultimap + .create(); + for (HRegionInfo region : regionList) { + String groupName = RSGroupInfoManager.getRSGroupOfTable(region.getTable()); + regionGroup.put(groupName, region); + } + return regionGroup; + } + + private Set getMisplacedRegions( + Map regions) throws IOException { + Set misplacedRegions = new HashSet(); + for(Map.Entry region : regions.entrySet()) { + HRegionInfo regionInfo = region.getKey(); + ServerName assignedServer = region.getValue(); + RSGroupInfo info = + RSGroupInfoManager.getRSGroup(RSGroupInfoManager.getRSGroupOfTable(regionInfo.getTable())); + if (assignedServer != null && + (info == null || !info.containsServer(assignedServer.getAddress()))) { + LOG.debug("Found misplaced region: " + regionInfo.getRegionNameAsString() + + " on server: " + assignedServer + + " found in group: " + + RSGroupInfoManager.getRSGroupOfServer(assignedServer.getAddress()) + + " outside of group: " + (info == null ? "UNKNOWN" : info.getName())); + misplacedRegions.add(regionInfo); + } + } + return misplacedRegions; + } + + private Map> correctAssignments( + Map> existingAssignments){ + Map> correctAssignments = + new TreeMap>(); + List misplacedRegions = new LinkedList(); + correctAssignments.put(LoadBalancer.BOGUS_SERVER_NAME, new LinkedList()); + for (Map.Entry> assignments : existingAssignments.entrySet()){ + ServerName sName = assignments.getKey(); + correctAssignments.put(sName, new LinkedList()); + List regions = assignments.getValue(); + for (HRegionInfo region : regions) { + RSGroupInfo info = null; + try { + info = RSGroupInfoManager.getRSGroup( + RSGroupInfoManager.getRSGroupOfTable(region.getTable())); + } catch (IOException exp) { + LOG.debug("Group information null for region of table " + region.getTable(), + exp); + } + if ((info == null) || (!info.containsServer(sName.getAddress()))) { + correctAssignments.get(LoadBalancer.BOGUS_SERVER_NAME).add(region); + } else { + correctAssignments.get(sName).add(region); + } + } + } + + //TODO bulk unassign? + //unassign misplaced regions, so that they are assigned to correct groups. + for(HRegionInfo info: misplacedRegions) { + this.masterServices.getAssignmentManager().unassign(info); + } + return correctAssignments; + } + + @Override + public void initialize() throws HBaseIOException { + try { + if (RSGroupInfoManager == null) { + List cps = + masterServices.getMasterCoprocessorHost().findCoprocessors(RSGroupAdminEndpoint.class); + if (cps.size() != 1) { + String msg = "Expected one implementation of GroupAdminEndpoint but found " + cps.size(); + LOG.error(msg); + throw new HBaseIOException(msg); + } + RSGroupInfoManager = cps.get(0).getGroupInfoManager(); + } + } catch (IOException e) { + throw new HBaseIOException("Failed to initialize GroupInfoManagerImpl", e); + } + + // Create the balancer + Class balancerKlass = config.getClass( + HBASE_GROUP_LOADBALANCER_CLASS, + StochasticLoadBalancer.class, LoadBalancer.class); + internalBalancer = ReflectionUtils.newInstance(balancerKlass, config); + internalBalancer.setClusterStatus(clusterStatus); + internalBalancer.setMasterServices(masterServices); + internalBalancer.setConf(config); + internalBalancer.initialize(); + } + + public boolean isOnline() { + return RSGroupInfoManager != null && RSGroupInfoManager.isOnline(); + } + + @Override + public void regionOnline(HRegionInfo regionInfo, ServerName sn) { + } + + @Override + public void regionOffline(HRegionInfo regionInfo) { + } + + @Override + public void onConfigurationChange(Configuration conf) { + //DO nothing for now + } + + @Override + public void stop(String why) { + } + + @Override + public boolean isStopped() { + return false; + } +} diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java new file mode 100644 index 00000000000..60afee0c14a --- /dev/null +++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfo.java @@ -0,0 +1,190 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.rsgroup; + +import com.google.common.collect.Sets; + +import java.util.Collection; +import java.util.NavigableSet; +import java.util.Set; + +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.net.Address; + +/** + * Stores the group information of region server groups. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class RSGroupInfo { + + public static final String DEFAULT_GROUP = "default"; + public static final String NAMESPACE_DESC_PROP_GROUP = "hbase.rsgroup.name"; + + private String name; + private Set
servers; + private NavigableSet tables; + + public RSGroupInfo(String name) { + this(name, Sets.
newHashSet(), Sets.newTreeSet()); + } + + RSGroupInfo(String name, + Set
servers, + NavigableSet tables) { + this.name = name; + this.servers = servers; + this.tables = tables; + } + + public RSGroupInfo(RSGroupInfo src) { + name = src.getName(); + servers = Sets.newHashSet(src.getServers()); + tables = Sets.newTreeSet(src.getTables()); + } + + /** + * Get group name. + * + * @return group name + */ + public String getName() { + return name; + } + + /** + * Adds the server to the group. + * + * @param server the server + */ + public void addServer(Address server){ + servers.add(server); + } + + /** + * Adds a group of servers. + * + * @param servers the servers + */ + public void addAllServers(Collection
servers){ + servers.addAll(servers); + } + + /** + * @param address Address of the server + * @return true, if a server with address is found + */ + public boolean containsServer(Address address) { + return servers.contains(address); + } + + /** + * Get list of servers. + * + * @return set of servers + */ + public Set
getServers() { + return servers; + } + + /** + * Remove a server from this group. + * + * @param address Address of the server to remove + */ + public boolean removeServer(Address address) { + return servers.remove(address); + } + + /** + * Set of tables that are members of this group + * @return set of tables + */ + public NavigableSet getTables() { + return tables; + } + + public void addTable(TableName table) { + tables.add(table); + } + + public void addAllTables(Collection arg) { + tables.addAll(arg); + } + + public boolean containsTable(TableName table) { + return tables.contains(table); + } + + public boolean removeTable(TableName table) { + return tables.remove(table); + } + + @Override + public String toString() { + StringBuffer sb = new StringBuffer(); + sb.append("Name:"); + sb.append(this.name); + sb.append(", "); + sb.append(" Servers:"); + sb.append(this.servers); + sb.append(", "); + sb.append(" Tables:"); + sb.append(this.tables); + return sb.toString(); + + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + RSGroupInfo RSGroupInfo = (RSGroupInfo) o; + + if (!name.equals(RSGroupInfo.name)) { + return false; + } + if (!servers.equals(RSGroupInfo.servers)) { + return false; + } + if (!tables.equals(RSGroupInfo.tables)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = servers.hashCode(); + result = 31 * result + tables.hashCode(); + result = 31 * result + name.hashCode(); + return result; + } + +} diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java new file mode 100644 index 00000000000..ab423e95658 --- /dev/null +++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java @@ -0,0 +1,116 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.rsgroup; + + +import java.io.IOException; +import java.util.List; +import java.util.Set; + +import org.apache.hadoop.hbase.NamespaceDescriptor; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.net.Address; + +/** + * Interface used to manage RSGroupInfo storage. An implementation + * has the option to support offline mode. + * See {@link RSGroupBasedLoadBalancer} + */ +@InterfaceAudience.Private +public interface RSGroupInfoManager { + //Assigned before user tables + TableName RSGROUP_TABLE_NAME = + TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "rsgroup"); + String rsGroupZNode = "rsgroup"; + byte[] META_FAMILY_BYTES = Bytes.toBytes("m"); + byte[] META_QUALIFIER_BYTES = Bytes.toBytes("i"); + byte[] ROW_KEY = {0}; + + /** + * Add given RSGroupInfo to existing list of group infos. + */ + void addRSGroup(RSGroupInfo rsGroupInfo) throws IOException; + + /** + * Remove a region server group. + */ + void removeRSGroup(String groupName) throws IOException; + + /** + * Move servers to a new group. + * @param servers list of servers, must be part of the same group + * @param srcGroup groupName being moved from + * @param dstGroup groupName being moved to + * @return Set of servers moved (May be a subset of {@code servers}). + */ + Set
moveServers(Set
servers, String srcGroup, String dstGroup) + throws IOException; + + /** + * Gets the group info of server. + */ + RSGroupInfo getRSGroupOfServer(Address serverHostPort) throws IOException; + + /** + * Gets {@code RSGroupInfo} for the given group name. + */ + RSGroupInfo getRSGroup(String groupName) throws IOException; + + /** + * Get the group membership of a table + */ + String getRSGroupOfTable(TableName tableName) throws IOException; + + /** + * Set the group membership of a set of tables + * + * @param tableNames set of tables to move + * @param groupName name of group of tables to move to + */ + void moveTables(Set tableNames, String groupName) throws IOException; + + /** + * List the existing {@code RSGroupInfo}s. + */ + List listRSGroups() throws IOException; + + /** + * Refresh/reload the group information from the persistent store + */ + void refresh() throws IOException; + + /** + * Whether the manager is able to fully return group metadata + * + * @return whether the manager is in online mode + */ + boolean isOnline(); + + /** + * Move servers and tables to a new group. + * @param servers list of servers, must be part of the same group + * @param tables set of tables to move + * @param srcGroup groupName being moved from + * @param dstGroup groupName being moved to + */ + void moveServersAndTables(Set
servers, Set tables, + String srcGroup, String dstGroup) throws IOException; +} diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java new file mode 100644 index 00000000000..eec03ce80a5 --- /dev/null +++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java @@ -0,0 +1,795 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.rsgroup; + +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; + +import com.google.common.collect.Sets; +import com.google.protobuf.ServiceException; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.NavigableSet; +import java.util.Set; +import java.util.TreeSet; +import java.util.concurrent.atomic.AtomicBoolean; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.Coprocessor; +import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.NamespaceDescriptor; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.TableStateManager; +import org.apache.hadoop.hbase.client.ClusterConnection; +import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.Mutation; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.constraint.ConstraintException; +import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint; +import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; +import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.master.ServerListener; +import org.apache.hadoop.hbase.master.procedure.CreateTableProcedure; +import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch; +import org.apache.hadoop.hbase.net.Address; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.RequestConverter; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; +import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos; +import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; +import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MutateRowsRequest; +import org.apache.hadoop.hbase.regionserver.DisabledRegionSplitPolicy; +import org.apache.hadoop.hbase.security.access.AccessControlLists; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.ModifyRegionUtils; +import org.apache.hadoop.hbase.zookeeper.ZKUtil; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.zookeeper.KeeperException; + +/** + * This is an implementation of {@link RSGroupInfoManager}. Which makes + * use of an HBase table as the persistence store for the group information. + * It also makes use of zookeeper to store group information needed + * for bootstrapping during offline mode. + */ +public class RSGroupInfoManagerImpl implements RSGroupInfoManager, ServerListener { + private static final Log LOG = LogFactory.getLog(RSGroupInfoManagerImpl.class); + + /** Table descriptor for hbase:rsgroup catalog table */ + private final static HTableDescriptor RSGROUP_TABLE_DESC; + static { + RSGROUP_TABLE_DESC = new HTableDescriptor(RSGROUP_TABLE_NAME); + RSGROUP_TABLE_DESC.addFamily(new HColumnDescriptor(META_FAMILY_BYTES)); + RSGROUP_TABLE_DESC.setRegionSplitPolicyClassName(DisabledRegionSplitPolicy.class.getName()); + try { + RSGROUP_TABLE_DESC.addCoprocessor( + MultiRowMutationEndpoint.class.getName(), + null, Coprocessor.PRIORITY_SYSTEM, null); + } catch (IOException ex) { + throw new RuntimeException(ex); + } + } + + private volatile Map rsGroupMap; + private volatile Map tableMap; + private MasterServices master; + private ClusterConnection conn; + private ZooKeeperWatcher watcher; + private RSGroupStartupWorker rsGroupStartupWorker; + // contains list of groups that were last flushed to persistent store + private volatile Set prevRSGroups; + private RSGroupSerDe rsGroupSerDe; + private DefaultServerUpdater defaultServerUpdater; + private boolean isInit = false; + + public RSGroupInfoManagerImpl(MasterServices master) throws IOException { + this.rsGroupMap = Collections.emptyMap(); + this.tableMap = Collections.emptyMap(); + rsGroupSerDe = new RSGroupSerDe(); + this.master = master; + this.watcher = master.getZooKeeper(); + this.conn = master.getConnection(); + prevRSGroups = new HashSet(); + } + + public void init() throws IOException{ + rsGroupStartupWorker = new RSGroupStartupWorker(this, master, conn); + refresh(); + rsGroupStartupWorker.start(); + defaultServerUpdater = new DefaultServerUpdater(this); + master.getServerManager().registerListener(this); + defaultServerUpdater.start(); + isInit = true; + } + + boolean isInit() { + return isInit; + } + + /** + * Adds the group. + * + * @param rsGroupInfo the group name + */ + @Override + public synchronized void addRSGroup(RSGroupInfo rsGroupInfo) throws IOException { + checkGroupName(rsGroupInfo.getName()); + if (rsGroupMap.get(rsGroupInfo.getName()) != null || + rsGroupInfo.getName().equals(RSGroupInfo.DEFAULT_GROUP)) { + throw new DoNotRetryIOException("Group already exists: "+ rsGroupInfo.getName()); + } + Map newGroupMap = Maps.newHashMap(rsGroupMap); + newGroupMap.put(rsGroupInfo.getName(), rsGroupInfo); + flushConfig(newGroupMap); + } + + @Override + public synchronized Set
moveServers(Set
servers, String srcGroup, + String dstGroup) throws IOException { + if (servers == null) { + throw new ConstraintException("The list of servers to move cannot be null."); + } + Set
movedServers = Sets.newHashSet(); + if (!rsGroupMap.containsKey(srcGroup)) { + throw new DoNotRetryIOException("Group "+srcGroup+" does not exist"); + } + if (!rsGroupMap.containsKey(dstGroup)) { + throw new DoNotRetryIOException("Group "+dstGroup+" does not exist"); + } + + RSGroupInfo src = new RSGroupInfo(getRSGroup(srcGroup)); + RSGroupInfo dst = new RSGroupInfo(getRSGroup(dstGroup)); + for(Address el: servers) { + if (src.removeServer(el)) { + movedServers.add(el); + } + dst.addServer(el); + } + + Map newGroupMap = Maps.newHashMap(rsGroupMap); + newGroupMap.put(src.getName(), src); + newGroupMap.put(dst.getName(), dst); + + flushConfig(newGroupMap); + return movedServers; + } + + /** + * Gets the group info of server. + * + * @param server the server + * @return An instance of GroupInfo. + */ + @Override + public RSGroupInfo getRSGroupOfServer(Address server) throws IOException { + for (RSGroupInfo info : rsGroupMap.values()) { + if (info.containsServer(server)){ + return info; + } + } + return null; + } + + /** + * Gets the group information. + * + * @param groupName + * the group name + * @return An instance of GroupInfo + */ + @Override + public RSGroupInfo getRSGroup(String groupName) throws IOException { + RSGroupInfo RSGroupInfo = rsGroupMap.get(groupName); + return RSGroupInfo; + } + + + + @Override + public String getRSGroupOfTable(TableName tableName) throws IOException { + return tableMap.get(tableName); + } + + @Override + public synchronized void moveTables( + Set tableNames, String groupName) throws IOException { + if (groupName != null && !rsGroupMap.containsKey(groupName)) { + throw new DoNotRetryIOException("Group "+groupName+" does not exist or is a special group"); + } + + Map newGroupMap = Maps.newHashMap(rsGroupMap); + for(TableName tableName: tableNames) { + if (tableMap.containsKey(tableName)) { + RSGroupInfo src = new RSGroupInfo(newGroupMap.get(tableMap.get(tableName))); + src.removeTable(tableName); + newGroupMap.put(src.getName(), src); + } + if(groupName != null) { + RSGroupInfo dst = new RSGroupInfo(newGroupMap.get(groupName)); + dst.addTable(tableName); + newGroupMap.put(dst.getName(), dst); + } + } + + flushConfig(newGroupMap); + } + + + /** + * Delete a region server group. + * + * @param groupName the group name + * @throws java.io.IOException Signals that an I/O exception has occurred. + */ + @Override + public synchronized void removeRSGroup(String groupName) throws IOException { + if (!rsGroupMap.containsKey(groupName) || groupName.equals(RSGroupInfo.DEFAULT_GROUP)) { + throw new DoNotRetryIOException("Group "+groupName+" does not exist or is a reserved group"); + } + Map newGroupMap = Maps.newHashMap(rsGroupMap); + newGroupMap.remove(groupName); + flushConfig(newGroupMap); + } + + @Override + public List listRSGroups() throws IOException { + List list = Lists.newLinkedList(rsGroupMap.values()); + return list; + } + + @Override + public boolean isOnline() { + return rsGroupStartupWorker.isOnline(); + } + + @Override + public synchronized void refresh() throws IOException { + refresh(false); + } + + private synchronized void refresh(boolean forceOnline) throws IOException { + List groupList = new LinkedList(); + + // overwrite anything read from zk, group table is source of truth + // if online read from GROUP table + if (forceOnline || isOnline()) { + LOG.debug("Refreshing in Online mode."); + try (Table rsGroupTable = conn.getTable(RSGROUP_TABLE_NAME)) { + groupList.addAll(rsGroupSerDe.retrieveGroupList(rsGroupTable)); + } + } else { + LOG.debug("Refershing in Offline mode."); + String groupBasePath = ZKUtil.joinZNode(watcher.baseZNode, rsGroupZNode); + groupList.addAll(rsGroupSerDe.retrieveGroupList(watcher, groupBasePath)); + } + + // refresh default group, prune + NavigableSet orphanTables = new TreeSet(); + for(String entry: master.getTableDescriptors().getAll().keySet()) { + orphanTables.add(TableName.valueOf(entry)); + } + + List specialTables; + if(!master.isInitialized()) { + specialTables = new ArrayList(); + specialTables.add(AccessControlLists.ACL_TABLE_NAME); + specialTables.add(TableName.META_TABLE_NAME); + specialTables.add(TableName.NAMESPACE_TABLE_NAME); + specialTables.add(RSGROUP_TABLE_NAME); + } else { + specialTables = + master.listTableNamesByNamespace(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR); + } + + for(TableName table : specialTables) { + orphanTables.add(table); + } + for(RSGroupInfo group: groupList) { + if(!group.getName().equals(RSGroupInfo.DEFAULT_GROUP)) { + orphanTables.removeAll(group.getTables()); + } + } + + // This is added to the last of the list + // so it overwrites the default group loaded + // from region group table or zk + groupList.add(new RSGroupInfo(RSGroupInfo.DEFAULT_GROUP, + Sets.newHashSet(getDefaultServers()), + orphanTables)); + + + // populate the data + HashMap newGroupMap = Maps.newHashMap(); + HashMap newTableMap = Maps.newHashMap(); + for (RSGroupInfo group : groupList) { + newGroupMap.put(group.getName(), group); + for(TableName table: group.getTables()) { + newTableMap.put(table, group.getName()); + } + } + rsGroupMap = Collections.unmodifiableMap(newGroupMap); + tableMap = Collections.unmodifiableMap(newTableMap); + + prevRSGroups.clear(); + prevRSGroups.addAll(rsGroupMap.keySet()); + } + + private synchronized Map flushConfigTable(Map newGroupMap) + throws IOException { + Map newTableMap = Maps.newHashMap(); + List mutations = Lists.newArrayList(); + + // populate deletes + for(String groupName : prevRSGroups) { + if(!newGroupMap.containsKey(groupName)) { + Delete d = new Delete(Bytes.toBytes(groupName)); + mutations.add(d); + } + } + + // populate puts + for(RSGroupInfo RSGroupInfo : newGroupMap.values()) { + RSGroupProtos.RSGroupInfo proto = RSGroupProtobufUtil.toProtoGroupInfo(RSGroupInfo); + Put p = new Put(Bytes.toBytes(RSGroupInfo.getName())); + p.addColumn(META_FAMILY_BYTES, + META_QUALIFIER_BYTES, + proto.toByteArray()); + mutations.add(p); + for(TableName entry: RSGroupInfo.getTables()) { + newTableMap.put(entry, RSGroupInfo.getName()); + } + } + + if(mutations.size() > 0) { + multiMutate(mutations); + } + return newTableMap; + } + + private synchronized void flushConfig(Map newGroupMap) throws IOException { + Map newTableMap; + + // For offline mode persistence is still unavailable + // We're refreshing in-memory state but only for default servers + if (!isOnline()) { + Map m = Maps.newHashMap(rsGroupMap); + RSGroupInfo oldDefaultGroup = m.remove(RSGroupInfo.DEFAULT_GROUP); + RSGroupInfo newDefaultGroup = newGroupMap.remove(RSGroupInfo.DEFAULT_GROUP); + if (!m.equals(newGroupMap) || + !oldDefaultGroup.getTables().equals(newDefaultGroup.getTables())) { + throw new IOException("Only default servers can be updated during offline mode"); + } + newGroupMap.put(RSGroupInfo.DEFAULT_GROUP, newDefaultGroup); + rsGroupMap = newGroupMap; + return; + } + + newTableMap = flushConfigTable(newGroupMap); + + // make changes visible since it has been + // persisted in the source of truth + rsGroupMap = Collections.unmodifiableMap(newGroupMap); + tableMap = Collections.unmodifiableMap(newTableMap); + + + try { + String groupBasePath = ZKUtil.joinZNode(watcher.baseZNode, rsGroupZNode); + ZKUtil.createAndFailSilent(watcher, groupBasePath, ProtobufUtil.PB_MAGIC); + + List zkOps = new ArrayList(newGroupMap.size()); + for(String groupName : prevRSGroups) { + if(!newGroupMap.containsKey(groupName)) { + String znode = ZKUtil.joinZNode(groupBasePath, groupName); + zkOps.add(ZKUtil.ZKUtilOp.deleteNodeFailSilent(znode)); + } + } + + + for(RSGroupInfo RSGroupInfo : newGroupMap.values()) { + String znode = ZKUtil.joinZNode(groupBasePath, RSGroupInfo.getName()); + RSGroupProtos.RSGroupInfo proto = RSGroupProtobufUtil.toProtoGroupInfo(RSGroupInfo); + LOG.debug("Updating znode: "+znode); + ZKUtil.createAndFailSilent(watcher, znode); + zkOps.add(ZKUtil.ZKUtilOp.deleteNodeFailSilent(znode)); + zkOps.add(ZKUtil.ZKUtilOp.createAndFailSilent(znode, + ProtobufUtil.prependPBMagic(proto.toByteArray()))); + } + LOG.debug("Writing ZK GroupInfo count: " + zkOps.size()); + + ZKUtil.multiOrSequential(watcher, zkOps, false); + } catch (KeeperException e) { + LOG.error("Failed to write to rsGroupZNode", e); + master.abort("Failed to write to rsGroupZNode", e); + throw new IOException("Failed to write to rsGroupZNode",e); + } + + prevRSGroups.clear(); + prevRSGroups.addAll(newGroupMap.keySet()); + } + + private List getOnlineRS() throws IOException { + if (master != null) { + return master.getServerManager().getOnlineServersList(); + } + try { + LOG.debug("Reading online RS from zookeeper"); + List servers = new LinkedList(); + for (String el: ZKUtil.listChildrenNoWatch(watcher, watcher.rsZNode)) { + servers.add(ServerName.parseServerName(el)); + } + return servers; + } catch (KeeperException e) { + throw new IOException("Failed to retrieve server list from zookeeper", e); + } + } + + private List
getDefaultServers() throws IOException { + List
defaultServers = new LinkedList
(); + for(ServerName server : getOnlineRS()) { + Address address = Address.fromParts(server.getHostname(), server.getPort()); + boolean found = false; + for(RSGroupInfo info : rsGroupMap.values()) { + if(!RSGroupInfo.DEFAULT_GROUP.equals(info.getName()) && + info.containsServer(address)) { + found = true; + break; + } + } + if(!found) { + defaultServers.add(address); + } + } + return defaultServers; + } + + private synchronized void updateDefaultServers( + Set
server) throws IOException { + RSGroupInfo info = rsGroupMap.get(RSGroupInfo.DEFAULT_GROUP); + RSGroupInfo newInfo = new RSGroupInfo(info.getName(), server, info.getTables()); + HashMap newGroupMap = Maps.newHashMap(rsGroupMap); + newGroupMap.put(newInfo.getName(), newInfo); + flushConfig(newGroupMap); + } + + @Override + public void serverAdded(ServerName serverName) { + defaultServerUpdater.serverChanged(); + } + + @Override + public void serverRemoved(ServerName serverName) { + defaultServerUpdater.serverChanged(); + } + + private static class DefaultServerUpdater extends Thread { + private static final Log LOG = LogFactory.getLog(DefaultServerUpdater.class); + private RSGroupInfoManagerImpl mgr; + private boolean hasChanged = false; + + public DefaultServerUpdater(RSGroupInfoManagerImpl mgr) { + this.mgr = mgr; + } + + @Override + public void run() { + List
prevDefaultServers = new LinkedList
(); + while(!mgr.master.isAborted() || !mgr.master.isStopped()) { + try { + LOG.info("Updating default servers."); + List
servers = mgr.getDefaultServers(); + Collections.sort(servers, new Comparator
() { + @Override + public int compare(Address o1, Address o2) { + int diff = o1.getHostname().compareTo(o2.getHostname()); + if (diff != 0) { + return diff; + } + return o1.getPort() - o2.getPort(); + } + }); + if(!servers.equals(prevDefaultServers)) { + mgr.updateDefaultServers(Sets.
newHashSet(servers)); + prevDefaultServers = servers; + LOG.info("Updated with servers: "+servers.size()); + } + try { + synchronized (this) { + if(!hasChanged) { + wait(); + } + hasChanged = false; + } + } catch (InterruptedException e) { + } + } catch (IOException e) { + LOG.warn("Failed to update default servers", e); + } + } + } + + public void serverChanged() { + synchronized (this) { + hasChanged = true; + this.notify(); + } + } + } + + @Override + public void waiting() { + + } + + private static class RSGroupStartupWorker extends Thread { + private static final Log LOG = LogFactory.getLog(RSGroupStartupWorker.class); + + private volatile boolean isOnline = false; + private MasterServices masterServices; + private RSGroupInfoManagerImpl groupInfoManager; + private ClusterConnection conn; + + public RSGroupStartupWorker(RSGroupInfoManagerImpl groupInfoManager, + MasterServices masterServices, + ClusterConnection conn) { + this.masterServices = masterServices; + this.groupInfoManager = groupInfoManager; + this.conn = conn; + setName(RSGroupStartupWorker.class.getName()+"-"+masterServices.getServerName()); + setDaemon(true); + } + + @Override + public void run() { + if(waitForGroupTableOnline()) { + LOG.info("GroupBasedLoadBalancer is now online"); + } + } + + public boolean waitForGroupTableOnline() { + final List foundRegions = new LinkedList(); + final List assignedRegions = new LinkedList(); + final AtomicBoolean found = new AtomicBoolean(false); + final TableStateManager tsm = + masterServices.getAssignmentManager().getTableStateManager(); + boolean createSent = false; + while (!found.get() && isMasterRunning()) { + foundRegions.clear(); + assignedRegions.clear(); + found.set(true); + try { + boolean rootMetaFound = + masterServices.getMetaTableLocator().verifyMetaRegionLocation( + conn, + masterServices.getZooKeeper(), + 1); + final AtomicBoolean nsFound = new AtomicBoolean(false); + if (rootMetaFound) { + + MetaTableAccessor.Visitor visitor = new MetaTableAccessor.Visitor() { + @Override + public boolean visit(Result row) throws IOException { + + HRegionInfo info = MetaTableAccessor.getHRegionInfo(row); + if (info != null) { + Cell serverCell = + row.getColumnLatestCell(HConstants.CATALOG_FAMILY, + HConstants.SERVER_QUALIFIER); + if (RSGROUP_TABLE_NAME.equals(info.getTable()) && serverCell != null) { + ServerName sn = + ServerName.parseVersionedServerName(CellUtil.cloneValue(serverCell)); + if (sn == null) { + found.set(false); + } else if (tsm.isTableState(RSGROUP_TABLE_NAME, + ZooKeeperProtos.Table.State.ENABLED)) { + try { + ClientProtos.ClientService.BlockingInterface rs = conn.getClient(sn); + ClientProtos.GetRequest request = + RequestConverter.buildGetRequest(info.getRegionName(), + new Get(ROW_KEY)); + rs.get(null, request); + assignedRegions.add(info); + } catch(Exception ex) { + LOG.debug("Caught exception while verifying group region", ex); + } + } + foundRegions.add(info); + } + if (TableName.NAMESPACE_TABLE_NAME.equals(info.getTable())) { + Cell cell = row.getColumnLatestCell(HConstants.CATALOG_FAMILY, + HConstants.SERVER_QUALIFIER); + ServerName sn = null; + if(cell != null) { + sn = ServerName.parseVersionedServerName(CellUtil.cloneValue(cell)); + } + if (tsm.isTableState(TableName.NAMESPACE_TABLE_NAME, + ZooKeeperProtos.Table.State.ENABLED)) { + try { + ClientProtos.ClientService.BlockingInterface rs = conn.getClient(sn); + ClientProtos.GetRequest request = + RequestConverter.buildGetRequest(info.getRegionName(), + new Get(ROW_KEY)); + rs.get(null, request); + nsFound.set(true); + } catch(Exception ex) { + LOG.debug("Caught exception while verifying group region", ex); + } + } + } + } + return true; + } + }; + MetaTableAccessor.fullScan(conn, visitor); + // if no regions in meta then we have to create the table + if (foundRegions.size() < 1 && rootMetaFound && !createSent && nsFound.get()) { + groupInfoManager.createGroupTable(masterServices); + createSent = true; + } + LOG.info("Group table: " + RSGROUP_TABLE_NAME + " isOnline: " + found.get() + + ", regionCount: " + foundRegions.size() + ", assignCount: " + + assignedRegions.size() + ", rootMetaFound: "+rootMetaFound); + found.set(found.get() && assignedRegions.size() == foundRegions.size() + && foundRegions.size() > 0); + } else { + LOG.info("Waiting for catalog tables to come online"); + found.set(false); + } + if (found.get()) { + LOG.debug("With group table online, refreshing cached information."); + groupInfoManager.refresh(true); + isOnline = true; + //flush any inconsistencies between ZK and HTable + groupInfoManager.flushConfig(groupInfoManager.rsGroupMap); + } + } catch (RuntimeException e) { + throw e; + } catch(Exception e) { + found.set(false); + LOG.warn("Failed to perform check", e); + } + try { + Thread.sleep(100); + } catch (InterruptedException e) { + LOG.info("Sleep interrupted", e); + } + } + return found.get(); + } + + public boolean isOnline() { + return isOnline; + } + + private boolean isMasterRunning() { + return !masterServices.isAborted() && !masterServices.isStopped(); + } + } + + private void createGroupTable(MasterServices masterServices) throws IOException { + HRegionInfo[] newRegions = + ModifyRegionUtils.createHRegionInfos(RSGROUP_TABLE_DESC, null); + ProcedurePrepareLatch latch = ProcedurePrepareLatch.createLatch(); + masterServices.getMasterProcedureExecutor().submitProcedure( + new CreateTableProcedure( + masterServices.getMasterProcedureExecutor().getEnvironment(), + RSGROUP_TABLE_DESC, + newRegions, + latch)); + latch.await(); + // wait for region to be online + int tries = 600; + while(masterServices.getAssignmentManager().getRegionStates() + .getRegionServerOfRegion(newRegions[0]) == null && tries > 0) { + try { + Thread.sleep(100); + } catch (InterruptedException e) { + throw new IOException("Wait interrupted", e); + } + tries--; + } + if(tries <= 0) { + throw new IOException("Failed to create group table."); + } + } + + private void multiMutate(List mutations) + throws IOException { + MutateRowsRequest.Builder mrmBuilder = MutateRowsRequest.newBuilder(); + for (Mutation mutation : mutations) { + if (mutation instanceof Put) { + mrmBuilder.addMutationRequest(ProtobufUtil.toMutation( + ClientProtos.MutationProto.MutationType.PUT, mutation)); + } else if (mutation instanceof Delete) { + mrmBuilder.addMutationRequest(ProtobufUtil.toMutation( + ClientProtos.MutationProto.MutationType.DELETE, mutation)); + } else { + throw new DoNotRetryIOException("multiMutate doesn't support " + + mutation.getClass().getName()); + } + } + MutateRowsRequest mrm = mrmBuilder.build(); + // Be robust against movement of the rsgroup table + // TODO: Why is this necessary sometimes? Should we be using our own connection? + conn.clearRegionCache(RSGROUP_TABLE_NAME); + try (Table rsGroupTable = conn.getTable(RSGROUP_TABLE_NAME)) { + CoprocessorRpcChannel channel = rsGroupTable.coprocessorService(ROW_KEY); + MultiRowMutationProtos.MultiRowMutationService.BlockingInterface service = + MultiRowMutationProtos.MultiRowMutationService.newBlockingStub(channel); + try { + service.mutateRows(null, mrm); + } catch (ServiceException ex) { + ProtobufUtil.toIOException(ex); + } + } + } + + private void checkGroupName(String groupName) throws ConstraintException { + if(!groupName.matches("[a-zA-Z0-9_]+")) { + throw new ConstraintException("Group name should only contain alphanumeric characters"); + } + } + + @Override + public void moveServersAndTables(Set
servers, Set tables, String srcGroup, + String dstGroup) throws IOException { + //get server's group + RSGroupInfo srcGroupInfo = getRSGroup(srcGroup); + RSGroupInfo dstGroupInfo = getRSGroup(dstGroup); + + //move servers + for (Address el: servers) { + srcGroupInfo.removeServer(el); + dstGroupInfo.addServer(el); + } + //move tables + for(TableName tableName: tables) { + srcGroupInfo.removeTable(tableName); + dstGroupInfo.addTable(tableName); + } + + //flush changed groupinfo + Map newGroupMap = Maps.newHashMap(rsGroupMap); + newGroupMap.put(srcGroupInfo.getName(), srcGroupInfo); + newGroupMap.put(dstGroupInfo.getName(), dstGroupInfo); + flushConfig(newGroupMap); + } +} diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupProtobufUtil.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupProtobufUtil.java new file mode 100644 index 00000000000..0874210f59f --- /dev/null +++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupProtobufUtil.java @@ -0,0 +1,61 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.rsgroup; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.net.Address; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos; +import org.apache.hadoop.hbase.protobuf.generated.TableProtos; + +@InterfaceAudience.Private +class RSGroupProtobufUtil { + static RSGroupInfo toGroupInfo(RSGroupProtos.RSGroupInfo proto) { + RSGroupInfo RSGroupInfo = new RSGroupInfo(proto.getName()); + for(HBaseProtos.ServerName el: proto.getServersList()) { + RSGroupInfo.addServer(Address.fromParts(el.getHostName(), el.getPort())); + } + for(TableProtos.TableName pTableName: proto.getTablesList()) { + RSGroupInfo.addTable(ProtobufUtil.toTableName(pTableName)); + } + return RSGroupInfo; + } + + static RSGroupProtos.RSGroupInfo toProtoGroupInfo(RSGroupInfo pojo) { + List tables = new ArrayList<>(pojo.getTables().size()); + for(TableName arg: pojo.getTables()) { + tables.add(ProtobufUtil.toProtoTableName(arg)); + } + List servers = new ArrayList<>(pojo.getServers().size()); + for(Address el: pojo.getServers()) { + servers.add(HBaseProtos.ServerName.newBuilder() + .setHostName(el.getHostname()) + .setPort(el.getPort()) + .build()); + } + return RSGroupProtos.RSGroupInfo.newBuilder().setName(pojo.getName()) + .addAllServers(servers) + .addAllTables(tables).build(); + } +} diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupSerDe.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupSerDe.java new file mode 100644 index 00000000000..642cb4a03b7 --- /dev/null +++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupSerDe.java @@ -0,0 +1,88 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.rsgroup; + +import com.google.common.collect.Lists; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos; +import org.apache.hadoop.hbase.zookeeper.ZKUtil; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.zookeeper.KeeperException; + +//TODO do better encapsulation of SerDe logic from GroupInfoManager and GroupTracker +public class RSGroupSerDe { + private static final Log LOG = LogFactory.getLog(RSGroupSerDe.class); + + public RSGroupSerDe() { + + } + + public List retrieveGroupList(Table groupTable) throws IOException { + List RSGroupInfoList = Lists.newArrayList(); + for (Result result : groupTable.getScanner(new Scan())) { + RSGroupProtos.RSGroupInfo proto = + RSGroupProtos.RSGroupInfo.parseFrom( + result.getValue( + RSGroupInfoManager.META_FAMILY_BYTES, + RSGroupInfoManager.META_QUALIFIER_BYTES)); + RSGroupInfoList.add(RSGroupProtobufUtil.toGroupInfo(proto)); + } + return RSGroupInfoList; + } + + public List retrieveGroupList(ZooKeeperWatcher watcher, + String groupBasePath) throws IOException { + List RSGroupInfoList = Lists.newArrayList(); + //Overwrite any info stored by table, this takes precedence + try { + if(ZKUtil.checkExists(watcher, groupBasePath) != -1) { + for(String znode: ZKUtil.listChildrenAndWatchForNewChildren(watcher, groupBasePath)) { + byte[] data = ZKUtil.getData(watcher, ZKUtil.joinZNode(groupBasePath, znode)); + if(data.length > 0) { + ProtobufUtil.expectPBMagicPrefix(data); + ByteArrayInputStream bis = new ByteArrayInputStream( + data, ProtobufUtil.lengthOfPBMagic(), data.length); + RSGroupInfoList.add(RSGroupProtobufUtil.toGroupInfo(RSGroupProtos.RSGroupInfo.parseFrom(bis))); + } + } + LOG.debug("Read ZK GroupInfo count:" + RSGroupInfoList.size()); + } + } catch (KeeperException e) { + throw new IOException("Failed to read rsGroupZNode",e); + } catch (DeserializationException e) { + throw new IOException("Failed to read rsGroupZNode",e); + } catch (InterruptedException e) { + throw new IOException("Failed to read rsGroupZNode",e); + } + return RSGroupInfoList; + } +} diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupableBalancer.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupableBalancer.java new file mode 100644 index 00000000000..6c791a14352 --- /dev/null +++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupableBalancer.java @@ -0,0 +1,32 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.rsgroup; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.master.LoadBalancer; + +/** + * Marker Interface. RSGroups feature will check for a LoadBalancer + * marked with this Interface before it runs. + */ +@InterfaceAudience.Private +public interface RSGroupableBalancer extends LoadBalancer { + /** Config for pluggable load balancers */ + String HBASE_RSGROUP_LOADBALANCER_CLASS = "hbase.rsgroup.grouploadbalancer.class"; +} diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java new file mode 100644 index 00000000000..3b96de6da68 --- /dev/null +++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancer.java @@ -0,0 +1,573 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.balancer; + +import com.google.common.collect.ArrayListMultimap; +import com.google.common.collect.Lists; + +import org.apache.commons.lang.StringUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableDescriptors; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.rsgroup.RSGroupBasedLoadBalancer; +import org.apache.hadoop.hbase.rsgroup.RSGroupInfo; +import org.apache.hadoop.hbase.rsgroup.RSGroupInfoManager; +import org.apache.hadoop.hbase.master.AssignmentManager; +import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.master.RegionPlan; +import org.apache.hadoop.hbase.net.Address; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.security.SecureRandom; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeMap; +import java.util.TreeSet; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +//TODO use stochastic based load balancer instead +@Category(SmallTests.class) +public class TestRSGroupBasedLoadBalancer { + + private static final Log LOG = LogFactory.getLog(TestRSGroupBasedLoadBalancer.class); + private static RSGroupBasedLoadBalancer loadBalancer; + private static SecureRandom rand; + + static String[] groups = new String[] { RSGroupInfo.DEFAULT_GROUP, "dg2", "dg3", + "dg4" }; + static TableName[] tables = + new TableName[] { TableName.valueOf("dt1"), + TableName.valueOf("dt2"), + TableName.valueOf("dt3"), + TableName.valueOf("dt4")}; + static List servers; + static Map groupMap; + static Map tableMap; + static List tableDescs; + int[] regionAssignment = new int[] { 2, 5, 7, 10, 4, 3, 1 }; + static int regionId = 0; + + @BeforeClass + public static void beforeAllTests() throws Exception { + rand = new SecureRandom(); + servers = generateServers(7); + groupMap = constructGroupInfo(servers, groups); + tableMap = new HashMap(); + tableDescs = constructTableDesc(); + Configuration conf = HBaseConfiguration.create(); + conf.set("hbase.regions.slop", "0"); + conf.set("hbase.group.grouploadbalancer.class", SimpleLoadBalancer.class.getCanonicalName()); + loadBalancer = new RSGroupBasedLoadBalancer(getMockedGroupInfoManager()); + loadBalancer.setMasterServices(getMockedMaster()); + loadBalancer.setConf(conf); + loadBalancer.initialize(); + } + + /** + * Test the load balancing algorithm. + * + * Invariant is that all servers of the group should be hosting either floor(average) or + * ceiling(average) + * + * @throws Exception + */ + @Test + public void testBalanceCluster() throws Exception { + Map> servers = mockClusterServers(); + ArrayListMultimap list = convertToGroupBasedMap(servers); + LOG.info("Mock Cluster : " + printStats(list)); + List plans = loadBalancer.balanceCluster(servers); + ArrayListMultimap balancedCluster = reconcile( + list, plans); + LOG.info("Mock Balance : " + printStats(balancedCluster)); + assertClusterAsBalanced(balancedCluster); + } + + /** + * Invariant is that all servers of a group have load between floor(avg) and + * ceiling(avg) number of regions. + */ + private void assertClusterAsBalanced( + ArrayListMultimap groupLoadMap) { + for (String gName : groupLoadMap.keySet()) { + List groupLoad = groupLoadMap.get(gName); + int numServers = groupLoad.size(); + int numRegions = 0; + int maxRegions = 0; + int minRegions = Integer.MAX_VALUE; + for (ServerAndLoad server : groupLoad) { + int nr = server.getLoad(); + if (nr > maxRegions) { + maxRegions = nr; + } + if (nr < minRegions) { + minRegions = nr; + } + numRegions += nr; + } + if (maxRegions - minRegions < 2) { + // less than 2 between max and min, can't balance + return; + } + int min = numRegions / numServers; + int max = numRegions % numServers == 0 ? min : min + 1; + + for (ServerAndLoad server : groupLoad) { + assertTrue(server.getLoad() <= max); + assertTrue(server.getLoad() >= min); + } + } + } + + /** + * All regions have an assignment. + * + * @param regions + * @param servers + * @param assignments + * @throws java.io.IOException + * @throws java.io.FileNotFoundException + */ + private void assertImmediateAssignment(List regions, + List servers, + Map assignments) + throws IOException { + for (HRegionInfo region : regions) { + assertTrue(assignments.containsKey(region)); + ServerName server = assignments.get(region); + TableName tableName = region.getTable(); + + String groupName = + getMockedGroupInfoManager().getRSGroupOfTable(tableName); + assertTrue(StringUtils.isNotEmpty(groupName)); + RSGroupInfo gInfo = getMockedGroupInfoManager().getRSGroup(groupName); + assertTrue("Region is not correctly assigned to group servers.", + gInfo.containsServer(server.getAddress())); + } + } + + /** + * Tests the bulk assignment used during cluster startup. + * + * Round-robin. Should yield a balanced cluster so same invariant as the + * load balancer holds, all servers holding either floor(avg) or + * ceiling(avg). + * + * @throws Exception + */ + @Test + public void testBulkAssignment() throws Exception { + List regions = randomRegions(25); + Map> assignments = loadBalancer + .roundRobinAssignment(regions, servers); + //test empty region/servers scenario + //this should not throw an NPE + loadBalancer.roundRobinAssignment(regions, Collections.emptyList()); + //test regular scenario + assertTrue(assignments.keySet().size() == servers.size()); + for (ServerName sn : assignments.keySet()) { + List regionAssigned = assignments.get(sn); + for (HRegionInfo region : regionAssigned) { + TableName tableName = region.getTable(); + String groupName = + getMockedGroupInfoManager().getRSGroupOfTable(tableName); + assertTrue(StringUtils.isNotEmpty(groupName)); + RSGroupInfo gInfo = getMockedGroupInfoManager().getRSGroup( + groupName); + assertTrue( + "Region is not correctly assigned to group servers.", + gInfo.containsServer(sn.getAddress())); + } + } + ArrayListMultimap loadMap = convertToGroupBasedMap(assignments); + assertClusterAsBalanced(loadMap); + } + + /** + * Test the cluster startup bulk assignment which attempts to retain + * assignment info. + * + * @throws Exception + */ + @Test + public void testRetainAssignment() throws Exception { + // Test simple case where all same servers are there + Map> currentAssignments = mockClusterServers(); + Map inputForTest = new HashMap(); + for (ServerName sn : currentAssignments.keySet()) { + for (HRegionInfo region : currentAssignments.get(sn)) { + inputForTest.put(region, sn); + } + } + //verify region->null server assignment is handled + inputForTest.put(randomRegions(1).get(0), null); + Map> newAssignment = loadBalancer + .retainAssignment(inputForTest, servers); + assertRetainedAssignment(inputForTest, servers, newAssignment); + } + + /** + * Asserts a valid retained assignment plan. + *

+ * Must meet the following conditions: + *

    + *
  • Every input region has an assignment, and to an online server + *
  • If a region had an existing assignment to a server with the same + * address a a currently online server, it will be assigned to it + *
+ * + * @param existing + * @param assignment + * @throws java.io.IOException + * @throws java.io.FileNotFoundException + */ + private void assertRetainedAssignment( + Map existing, List servers, + Map> assignment) + throws FileNotFoundException, IOException { + // Verify condition 1, every region assigned, and to online server + Set onlineServerSet = new TreeSet(servers); + Set assignedRegions = new TreeSet(); + for (Map.Entry> a : assignment.entrySet()) { + assertTrue( + "Region assigned to server that was not listed as online", + onlineServerSet.contains(a.getKey())); + for (HRegionInfo r : a.getValue()) + assignedRegions.add(r); + } + assertEquals(existing.size(), assignedRegions.size()); + + // Verify condition 2, every region must be assigned to correct server. + Set onlineHostNames = new TreeSet(); + for (ServerName s : servers) { + onlineHostNames.add(s.getHostname()); + } + + for (Map.Entry> a : assignment.entrySet()) { + ServerName currentServer = a.getKey(); + for (HRegionInfo r : a.getValue()) { + ServerName oldAssignedServer = existing.get(r); + TableName tableName = r.getTable(); + String groupName = + getMockedGroupInfoManager().getRSGroupOfTable(tableName); + assertTrue(StringUtils.isNotEmpty(groupName)); + RSGroupInfo gInfo = getMockedGroupInfoManager().getRSGroup( + groupName); + assertTrue( + "Region is not correctly assigned to group servers.", + gInfo.containsServer(currentServer.getAddress())); + if (oldAssignedServer != null + && onlineHostNames.contains(oldAssignedServer + .getHostname())) { + // this region was previously assigned somewhere, and that + // host is still around, then the host must have been is a + // different group. + if (!oldAssignedServer.getAddress().equals(currentServer.getAddress())) { + assertFalse(gInfo.containsServer(oldAssignedServer.getAddress())); + } + } + } + } + } + + private String printStats( + ArrayListMultimap groupBasedLoad) { + StringBuffer sb = new StringBuffer(); + sb.append("\n"); + for (String groupName : groupBasedLoad.keySet()) { + sb.append("Stats for group: " + groupName); + sb.append("\n"); + sb.append(groupMap.get(groupName).getServers()); + sb.append("\n"); + List groupLoad = groupBasedLoad.get(groupName); + int numServers = groupLoad.size(); + int totalRegions = 0; + sb.append("Per Server Load: \n"); + for (ServerAndLoad sLoad : groupLoad) { + sb.append("Server :" + sLoad.getServerName() + " Load : " + + sLoad.getLoad() + "\n"); + totalRegions += sLoad.getLoad(); + } + sb.append(" Group Statistics : \n"); + float average = (float) totalRegions / numServers; + int max = (int) Math.ceil(average); + int min = (int) Math.floor(average); + sb.append("[srvr=" + numServers + " rgns=" + totalRegions + " avg=" + + average + " max=" + max + " min=" + min + "]"); + sb.append("\n"); + sb.append("==============================="); + sb.append("\n"); + } + return sb.toString(); + } + + private ArrayListMultimap convertToGroupBasedMap( + final Map> serversMap) throws IOException { + ArrayListMultimap loadMap = ArrayListMultimap + .create(); + for (RSGroupInfo gInfo : getMockedGroupInfoManager().listRSGroups()) { + Set
groupServers = gInfo.getServers(); + for (Address server : groupServers) { + ServerName actual = null; + for(ServerName entry: servers) { + if(entry.getAddress().equals(server)) { + actual = entry; + break; + } + } + List regions = serversMap.get(actual); + assertTrue("No load for " + actual, regions != null); + loadMap.put(gInfo.getName(), + new ServerAndLoad(actual, regions.size())); + } + } + return loadMap; + } + + private ArrayListMultimap reconcile( + ArrayListMultimap previousLoad, + List plans) { + ArrayListMultimap result = ArrayListMultimap + .create(); + result.putAll(previousLoad); + if (plans != null) { + for (RegionPlan plan : plans) { + ServerName source = plan.getSource(); + updateLoad(result, source, -1); + ServerName destination = plan.getDestination(); + updateLoad(result, destination, +1); + } + } + return result; + } + + private void updateLoad( + ArrayListMultimap previousLoad, + final ServerName sn, final int diff) { + for (String groupName : previousLoad.keySet()) { + ServerAndLoad newSAL = null; + ServerAndLoad oldSAL = null; + for (ServerAndLoad sal : previousLoad.get(groupName)) { + if (ServerName.isSameAddress(sn, sal.getServerName())) { + oldSAL = sal; + newSAL = new ServerAndLoad(sn, sal.getLoad() + diff); + break; + } + } + if (newSAL != null) { + previousLoad.remove(groupName, oldSAL); + previousLoad.put(groupName, newSAL); + break; + } + } + } + + private Map> mockClusterServers() throws IOException { + assertTrue(servers.size() == regionAssignment.length); + Map> assignment = new TreeMap>(); + for (int i = 0; i < servers.size(); i++) { + int numRegions = regionAssignment[i]; + List regions = assignedRegions(numRegions, servers.get(i)); + assignment.put(servers.get(i), regions); + } + return assignment; + } + + /** + * Generate a list of regions evenly distributed between the tables. + * + * @param numRegions The number of regions to be generated. + * @return List of HRegionInfo. + */ + private List randomRegions(int numRegions) { + List regions = new ArrayList(numRegions); + byte[] start = new byte[16]; + byte[] end = new byte[16]; + rand.nextBytes(start); + rand.nextBytes(end); + int regionIdx = rand.nextInt(tables.length); + for (int i = 0; i < numRegions; i++) { + Bytes.putInt(start, 0, numRegions << 1); + Bytes.putInt(end, 0, (numRegions << 1) + 1); + int tableIndex = (i + regionIdx) % tables.length; + HRegionInfo hri = new HRegionInfo( + tables[tableIndex], start, end, false, regionId++); + regions.add(hri); + } + return regions; + } + + /** + * Generate assigned regions to a given server using group information. + * + * @param numRegions the num regions to generate + * @param sn the servername + * @return the list of regions + * @throws java.io.IOException Signals that an I/O exception has occurred. + */ + private List assignedRegions(int numRegions, ServerName sn) throws IOException { + List regions = new ArrayList(numRegions); + byte[] start = new byte[16]; + byte[] end = new byte[16]; + Bytes.putInt(start, 0, numRegions << 1); + Bytes.putInt(end, 0, (numRegions << 1) + 1); + for (int i = 0; i < numRegions; i++) { + TableName tableName = getTableName(sn); + HRegionInfo hri = new HRegionInfo( + tableName, start, end, false, + regionId++); + regions.add(hri); + } + return regions; + } + + private static List generateServers(int numServers) { + List servers = new ArrayList(numServers); + for (int i = 0; i < numServers; i++) { + String host = "server" + rand.nextInt(100000); + int port = rand.nextInt(60000); + servers.add(ServerName.valueOf(host, port, -1)); + } + return servers; + } + + /** + * Construct group info, with each group having at least one server. + * + * @param servers the servers + * @param groups the groups + * @return the map + */ + private static Map constructGroupInfo( + List servers, String[] groups) { + assertTrue(servers != null); + assertTrue(servers.size() >= groups.length); + int index = 0; + Map groupMap = new HashMap(); + for (String grpName : groups) { + RSGroupInfo RSGroupInfo = new RSGroupInfo(grpName); + RSGroupInfo.addServer(servers.get(index).getAddress()); + groupMap.put(grpName, RSGroupInfo); + index++; + } + while (index < servers.size()) { + int grpIndex = rand.nextInt(groups.length); + groupMap.get(groups[grpIndex]).addServer(servers.get(index).getAddress()); + index++; + } + return groupMap; + } + + /** + * Construct table descriptors evenly distributed between the groups. + * + * @return the list + */ + private static List constructTableDesc() { + List tds = Lists.newArrayList(); + int index = rand.nextInt(groups.length); + for (int i = 0; i < tables.length; i++) { + HTableDescriptor htd = new HTableDescriptor(tables[i]); + int grpIndex = (i + index) % groups.length ; + String groupName = groups[grpIndex]; + tableMap.put(tables[i], groupName); + tds.add(htd); + } + return tds; + } + + private static MasterServices getMockedMaster() throws IOException { + TableDescriptors tds = Mockito.mock(TableDescriptors.class); + Mockito.when(tds.get(tables[0])).thenReturn(tableDescs.get(0)); + Mockito.when(tds.get(tables[1])).thenReturn(tableDescs.get(1)); + Mockito.when(tds.get(tables[2])).thenReturn(tableDescs.get(2)); + Mockito.when(tds.get(tables[3])).thenReturn(tableDescs.get(3)); + MasterServices services = Mockito.mock(HMaster.class); + Mockito.when(services.getTableDescriptors()).thenReturn(tds); + AssignmentManager am = Mockito.mock(AssignmentManager.class); + Mockito.when(services.getAssignmentManager()).thenReturn(am); + return services; + } + + private static RSGroupInfoManager getMockedGroupInfoManager() throws IOException { + RSGroupInfoManager gm = Mockito.mock(RSGroupInfoManager.class); + Mockito.when(gm.getRSGroup(groups[0])).thenReturn( + groupMap.get(groups[0])); + Mockito.when(gm.getRSGroup(groups[1])).thenReturn( + groupMap.get(groups[1])); + Mockito.when(gm.getRSGroup(groups[2])).thenReturn( + groupMap.get(groups[2])); + Mockito.when(gm.getRSGroup(groups[3])).thenReturn( + groupMap.get(groups[3])); + Mockito.when(gm.listRSGroups()).thenReturn( + Lists.newLinkedList(groupMap.values())); + Mockito.when(gm.isOnline()).thenReturn(true); + Mockito.when(gm.getRSGroupOfTable(Mockito.any(TableName.class))) + .thenAnswer(new Answer() { + @Override + public String answer(InvocationOnMock invocation) throws Throwable { + return tableMap.get(invocation.getArguments()[0]); + } + }); + return gm; + } + + private TableName getTableName(ServerName sn) throws IOException { + TableName tableName = null; + RSGroupInfoManager gm = getMockedGroupInfoManager(); + RSGroupInfo groupOfServer = null; + for(RSGroupInfo gInfo : gm.listRSGroups()){ + if(gInfo.containsServer(sn.getAddress())){ + groupOfServer = gInfo; + break; + } + } + + for(HTableDescriptor desc : tableDescs){ + if(gm.getRSGroupOfTable(desc.getTableName()).endsWith(groupOfServer.getName())){ + tableName = desc.getTableName(); + } + } + return tableName; + } +} diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroups.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroups.java new file mode 100644 index 00000000000..3ad928fb28b --- /dev/null +++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroups.java @@ -0,0 +1,300 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rsgroup; + +import com.google.common.collect.Sets; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.MiniHBaseCluster; +import org.apache.hadoop.hbase.NamespaceDescriptor; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.Waiter; +import org.apache.hadoop.hbase.Waiter.Predicate; +import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; +import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.master.ServerManager; +import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; +import org.apache.hadoop.hbase.net.Address; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import java.io.IOException; +import java.util.Iterator; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +@Category({MediumTests.class}) +public class TestRSGroups extends TestRSGroupsBase { + protected static final Log LOG = LogFactory.getLog(TestRSGroups.class); + private static HMaster master; + private static boolean init = false; + private static RSGroupAdminEndpoint RSGroupAdminEndpoint; + + + @BeforeClass + public static void setUp() throws Exception { + TEST_UTIL = new HBaseTestingUtility(); + TEST_UTIL.getConfiguration().setFloat( + "hbase.master.balancer.stochastic.tableSkewCost", 6000); + TEST_UTIL.getConfiguration().set( + HConstants.HBASE_MASTER_LOADBALANCER_CLASS, + RSGroupBasedLoadBalancer.class.getName()); + TEST_UTIL.getConfiguration().set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, + RSGroupAdminEndpoint.class.getName()); + TEST_UTIL.getConfiguration().setBoolean( + HConstants.ZOOKEEPER_USEMULTI, + true); + TEST_UTIL.startMiniCluster(NUM_SLAVES_BASE); + TEST_UTIL.getConfiguration().set( + ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, + ""+NUM_SLAVES_BASE); + TEST_UTIL.getConfiguration().setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true); + + admin = TEST_UTIL.getHBaseAdmin(); + cluster = TEST_UTIL.getHBaseCluster(); + master = ((MiniHBaseCluster)cluster).getMaster(); + + //wait for balancer to come online + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return master.isInitialized() && + ((RSGroupBasedLoadBalancer) master.getLoadBalancer()).isOnline(); + } + }); + admin.setBalancerRunning(false,true); + rsGroupAdmin = new VerifyingRSGroupAdminClient(new RSGroupAdminClient(TEST_UTIL.getConnection()), + TEST_UTIL.getConfiguration()); + RSGroupAdminEndpoint = + master.getMasterCoprocessorHost().findCoprocessors(RSGroupAdminEndpoint.class).get(0); + } + + @AfterClass + public static void tearDown() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + @Before + public void beforeMethod() throws Exception { + if(!init) { + init = true; + afterMethod(); + } + + } + + @After + public void afterMethod() throws Exception { + deleteTableIfNecessary(); + deleteNamespaceIfNecessary(); + deleteGroups(); + + int missing = NUM_SLAVES_BASE - getNumServers(); + LOG.info("Restoring servers: "+missing); + for(int i=0; i() { + @Override + public boolean evaluate() throws Exception { + LOG.info("Waiting for cleanup to finish " + rsGroupAdmin.listRSGroups()); + //Might be greater since moving servers back to default + //is after starting a server + + return rsGroupAdmin.getRSGroupInfo(RSGroupInfo.DEFAULT_GROUP).getServers().size() + == NUM_SLAVES_BASE; + } + }); + } + + @Test + public void testBasicStartUp() throws IOException { + RSGroupInfo defaultInfo = rsGroupAdmin.getRSGroupInfo(RSGroupInfo.DEFAULT_GROUP); + assertEquals(4, defaultInfo.getServers().size()); + // Assignment of root and meta regions. + int count = master.getAssignmentManager().getRegionStates().getRegionAssignments().size(); + //3 meta,namespace, group + assertEquals(3, count); + } + + @Test + public void testNamespaceCreateAndAssign() throws Exception { + LOG.info("testNamespaceCreateAndAssign"); + String nsName = tablePrefix+"_foo"; + final TableName tableName = TableName.valueOf(nsName, tablePrefix + "_testCreateAndAssign"); + RSGroupInfo appInfo = addGroup(rsGroupAdmin, "appInfo", 1); + admin.createNamespace(NamespaceDescriptor.create(nsName) + .addConfiguration(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP, "appInfo").build()); + final HTableDescriptor desc = new HTableDescriptor(tableName); + desc.addFamily(new HColumnDescriptor("f")); + admin.createTable(desc); + //wait for created table to be assigned + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return getTableRegionMap().get(desc.getTableName()) != null; + } + }); + ServerName targetServer = + ServerName.parseServerName(appInfo.getServers().iterator().next().toString()); + AdminProtos.AdminService.BlockingInterface rs = admin.getConnection().getAdmin(targetServer); + //verify it was assigned to the right group + Assert.assertEquals(1, ProtobufUtil.getOnlineRegions(rs).size()); + } + + @Test + public void testDefaultNamespaceCreateAndAssign() throws Exception { + LOG.info("testDefaultNamespaceCreateAndAssign"); + final byte[] tableName = Bytes.toBytes(tablePrefix + "_testCreateAndAssign"); + admin.modifyNamespace(NamespaceDescriptor.create("default") + .addConfiguration(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP, "default").build()); + final HTableDescriptor desc = new HTableDescriptor(tableName); + desc.addFamily(new HColumnDescriptor("f")); + admin.createTable(desc); + //wait for created table to be assigned + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return getTableRegionMap().get(desc.getTableName()) != null; + } + }); + } + + @Test + public void testNamespaceConstraint() throws Exception { + String nsName = tablePrefix+"_foo"; + String groupName = tablePrefix+"_foo"; + LOG.info("testNamespaceConstraint"); + rsGroupAdmin.addRSGroup(groupName); + admin.createNamespace(NamespaceDescriptor.create(nsName) + .addConfiguration(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP, groupName) + .build()); + //test removing a referenced group + try { + rsGroupAdmin.removeRSGroup(groupName); + fail("Expected a constraint exception"); + } catch (IOException ex) { + } + //test modify group + //changing with the same name is fine + admin.modifyNamespace( + NamespaceDescriptor.create(nsName) + .addConfiguration(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP, groupName) + .build()); + String anotherGroup = tablePrefix+"_anotherGroup"; + rsGroupAdmin.addRSGroup(anotherGroup); + //test add non-existent group + admin.deleteNamespace(nsName); + rsGroupAdmin.removeRSGroup(groupName); + try { + admin.createNamespace(NamespaceDescriptor.create(nsName) + .addConfiguration(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP, "foo") + .build()); + fail("Expected a constraint exception"); + } catch (IOException ex) { + } + } + + @Test + public void testGroupInfoMultiAccessing() throws Exception { + RSGroupInfoManager manager = RSGroupAdminEndpoint.getGroupInfoManager(); + final RSGroupInfo defaultGroup = manager.getRSGroup("default"); + // getRSGroup updates default group's server list + // this process must not affect other threads iterating the list + Iterator
it = defaultGroup.getServers().iterator(); + manager.getRSGroup("default"); + it.next(); + } + + @Test + public void testMisplacedRegions() throws Exception { + final TableName tableName = TableName.valueOf(tablePrefix+"_testMisplacedRegions"); + LOG.info("testMisplacedRegions"); + + final RSGroupInfo RSGroupInfo = addGroup(rsGroupAdmin, "testMisplacedRegions", 1); + + TEST_UTIL.createMultiRegionTable(tableName, new byte[]{'f'}, 15); + TEST_UTIL.waitUntilAllRegionsAssigned(tableName); + + RSGroupAdminEndpoint.getGroupInfoManager() + .moveTables(Sets.newHashSet(tableName), RSGroupInfo.getName()); + + assertTrue(rsGroupAdmin.balanceRSGroup(RSGroupInfo.getName())); + + TEST_UTIL.waitFor(60000, new Predicate() { + @Override + public boolean evaluate() throws Exception { + ServerName serverName = + ServerName.valueOf(RSGroupInfo.getServers().iterator().next().toString(), 1); + return admin.getConnection().getAdmin() + .getOnlineRegions(serverName).size() == 15; + } + }); + } + + @Test + public void testCloneSnapshot() throws Exception { + final TableName tableName = TableName.valueOf(tablePrefix+"_testCloneSnapshot"); + LOG.info("testCloneSnapshot"); + + byte[] FAMILY = Bytes.toBytes("test"); + String snapshotName = tableName.getNameAsString() + "_snap"; + TableName clonedTableName = TableName.valueOf(tableName.getNameAsString() + "_clone"); + + // create base table + TEST_UTIL.createTable(tableName, FAMILY); + + // create snapshot + admin.snapshot(snapshotName, tableName); + + // clone + admin.cloneSnapshot(snapshotName, clonedTableName); + } + +} diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java new file mode 100644 index 00000000000..0db0feac0c5 --- /dev/null +++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java @@ -0,0 +1,815 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rsgroup; + +import com.google.common.collect.Maps; +import com.google.common.collect.Sets; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.ClusterStatus; +import org.apache.hadoop.hbase.HBaseCluster; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.NamespaceDescriptor; +import org.apache.hadoop.hbase.RegionLoad; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.Waiter; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.constraint.ConstraintException; +import org.apache.hadoop.hbase.net.Address; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.AdminProtos; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.Assert; +import org.junit.Ignore; +import org.junit.Test; + +import java.io.IOException; +import java.security.SecureRandom; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeMap; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +public abstract class TestRSGroupsBase { + protected static final Log LOG = LogFactory.getLog(TestRSGroupsBase.class); + + //shared + protected final static String groupPrefix = "Group"; + protected final static String tablePrefix = "Group"; + protected final static SecureRandom rand = new SecureRandom(); + + //shared, cluster type specific + protected static HBaseTestingUtility TEST_UTIL; + protected static HBaseAdmin admin; + protected static HBaseCluster cluster; + protected static RSGroupAdmin rsGroupAdmin; + + public final static long WAIT_TIMEOUT = 60000*5; + public final static int NUM_SLAVES_BASE = 4; //number of slaves for the smallest cluster + + + + protected RSGroupInfo addGroup(RSGroupAdmin gAdmin, String groupName, + int serverCount) throws IOException, InterruptedException { + RSGroupInfo defaultInfo = gAdmin + .getRSGroupInfo(RSGroupInfo.DEFAULT_GROUP); + assertTrue(defaultInfo != null); + assertTrue(defaultInfo.getServers().size() >= serverCount); + gAdmin.addRSGroup(groupName); + + Set
set = new HashSet
(); + for(Address server: defaultInfo.getServers()) { + if(set.size() == serverCount) { + break; + } + set.add(server); + } + gAdmin.moveServers(set, groupName); + RSGroupInfo result = gAdmin.getRSGroupInfo(groupName); + assertTrue(result.getServers().size() >= serverCount); + return result; + } + + static void removeGroup(RSGroupAdminClient groupAdmin, String groupName) throws IOException { + RSGroupInfo info = groupAdmin.getRSGroupInfo(groupName); + groupAdmin.moveTables(info.getTables(), RSGroupInfo.DEFAULT_GROUP); + groupAdmin.moveServers(info.getServers(), RSGroupInfo.DEFAULT_GROUP); + groupAdmin.removeRSGroup(groupName); + } + + protected void deleteTableIfNecessary() throws IOException { + for (HTableDescriptor desc : TEST_UTIL.getHBaseAdmin().listTables(tablePrefix+".*")) { + TEST_UTIL.deleteTable(desc.getTableName()); + } + } + + protected void deleteNamespaceIfNecessary() throws IOException { + for (NamespaceDescriptor desc : TEST_UTIL.getHBaseAdmin().listNamespaceDescriptors()) { + if(desc.getName().startsWith(tablePrefix)) { + admin.deleteNamespace(desc.getName()); + } + } + } + + protected void deleteGroups() throws IOException { + RSGroupAdmin groupAdmin = new RSGroupAdminClient(TEST_UTIL.getConnection()); + for(RSGroupInfo group: groupAdmin.listRSGroups()) { + if(!group.getName().equals(RSGroupInfo.DEFAULT_GROUP)) { + groupAdmin.moveTables(group.getTables(), RSGroupInfo.DEFAULT_GROUP); + groupAdmin.moveServers(group.getServers(), RSGroupInfo.DEFAULT_GROUP); + groupAdmin.removeRSGroup(group.getName()); + } + } + } + + public Map> getTableRegionMap() throws IOException { + Map> map = Maps.newTreeMap(); + Map>> tableServerRegionMap + = getTableServerRegionMap(); + for(TableName tableName : tableServerRegionMap.keySet()) { + if(!map.containsKey(tableName)) { + map.put(tableName, new LinkedList()); + } + for(List subset: tableServerRegionMap.get(tableName).values()) { + map.get(tableName).addAll(subset); + } + } + return map; + } + + public Map>> getTableServerRegionMap() + throws IOException { + Map>> map = Maps.newTreeMap(); + ClusterStatus status = TEST_UTIL.getHBaseClusterInterface().getClusterStatus(); + for(ServerName serverName : status.getServers()) { + for(RegionLoad rl : status.getLoad(serverName).getRegionsLoad().values()) { + TableName tableName = null; + try { + tableName = HRegionInfo.getTable(rl.getName()); + } catch (IllegalArgumentException e) { + LOG.warn("Failed parse a table name from regionname=" + + Bytes.toStringBinary(rl.getName())); + continue; + } + if(!map.containsKey(tableName)) { + map.put(tableName, new TreeMap>()); + } + if(!map.get(tableName).containsKey(serverName)) { + map.get(tableName).put(serverName, new LinkedList()); + } + map.get(tableName).get(serverName).add(rl.getNameAsString()); + } + } + return map; + } + + @Test + public void testBogusArgs() throws Exception { + assertNull(rsGroupAdmin.getRSGroupInfoOfTable(TableName.valueOf("nonexistent"))); + assertNull(rsGroupAdmin.getRSGroupOfServer(Address.fromParts("bogus",123))); + assertNull(rsGroupAdmin.getRSGroupInfo("bogus")); + + try { + rsGroupAdmin.removeRSGroup("bogus"); + fail("Expected removing bogus group to fail"); + } catch(ConstraintException ex) { + //expected + } + + try { + rsGroupAdmin.moveTables(Sets.newHashSet(TableName.valueOf("bogustable")), "bogus"); + fail("Expected move with bogus group to fail"); + } catch(ConstraintException ex) { + //expected + } + + try { + rsGroupAdmin.moveServers(Sets.newHashSet(Address.fromParts("bogus",123)), "bogus"); + fail("Expected move with bogus group to fail"); + } catch(ConstraintException ex) { + //expected + } + + try { + rsGroupAdmin.balanceRSGroup("bogus"); + fail("Expected move with bogus group to fail"); + } catch(ConstraintException ex) { + //expected + } + } + + @Test + public void testCreateMultiRegion() throws IOException { + LOG.info("testCreateMultiRegion"); + TableName tableName = TableName.valueOf(tablePrefix + "_testCreateMultiRegion"); + byte[] end = {1,3,5,7,9}; + byte[] start = {0,2,4,6,8}; + byte[][] f = {Bytes.toBytes("f")}; + TEST_UTIL.createTable(tableName, f,1,start,end,10); + } + + @Test + public void testCreateAndDrop() throws Exception { + LOG.info("testCreateAndDrop"); + + final TableName tableName = TableName.valueOf(tablePrefix + "_testCreateAndDrop"); + TEST_UTIL.createTable(tableName, Bytes.toBytes("cf")); + //wait for created table to be assigned + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return getTableRegionMap().get(tableName) != null; + } + }); + TEST_UTIL.deleteTable(tableName); + } + + @Test + public void testSimpleRegionServerMove() throws IOException, + InterruptedException { + LOG.info("testSimpleRegionServerMove"); + + int initNumGroups = rsGroupAdmin.listRSGroups().size(); + RSGroupInfo appInfo = addGroup(rsGroupAdmin, getGroupName("testSimpleRegionServerMove"), 1); + RSGroupInfo adminInfo = addGroup(rsGroupAdmin, getGroupName("testSimpleRegionServerMove"), 1); + RSGroupInfo dInfo = rsGroupAdmin.getRSGroupInfo(RSGroupInfo.DEFAULT_GROUP); + Assert.assertEquals(initNumGroups + 2, rsGroupAdmin.listRSGroups().size()); + assertEquals(1, adminInfo.getServers().size()); + assertEquals(1, appInfo.getServers().size()); + assertEquals(getNumServers() - 2, dInfo.getServers().size()); + rsGroupAdmin.moveServers(appInfo.getServers(), + RSGroupInfo.DEFAULT_GROUP); + rsGroupAdmin.removeRSGroup(appInfo.getName()); + rsGroupAdmin.moveServers(adminInfo.getServers(), + RSGroupInfo.DEFAULT_GROUP); + rsGroupAdmin.removeRSGroup(adminInfo.getName()); + Assert.assertEquals(rsGroupAdmin.listRSGroups().size(), initNumGroups); + } + + // return the real number of region servers, excluding the master embedded region server in 2.0+ + public int getNumServers() throws IOException { + ClusterStatus status = admin.getClusterStatus(); + ServerName master = status.getMaster(); + int count = 0; + for (ServerName sn : status.getServers()) { + if (!sn.equals(master)) { + count++; + } + } + return count; + } + + @Test + public void testMoveServers() throws Exception { + LOG.info("testMoveServers"); + + //create groups and assign servers + addGroup(rsGroupAdmin, "bar", 3); + rsGroupAdmin.addRSGroup("foo"); + + RSGroupInfo barGroup = rsGroupAdmin.getRSGroupInfo("bar"); + RSGroupInfo fooGroup = rsGroupAdmin.getRSGroupInfo("foo"); + assertEquals(3, barGroup.getServers().size()); + assertEquals(0, fooGroup.getServers().size()); + + //test fail bogus server move + try { + rsGroupAdmin.moveServers(Sets.newHashSet(Address.fromString("foo:9999")),"foo"); + fail("Bogus servers shouldn't have been successfully moved."); + } catch(IOException ex) { + String exp = "Server foo:9999 does not have a group."; + String msg = "Expected '"+exp+"' in exception message: "; + assertTrue(msg+" "+ex.getMessage(), ex.getMessage().contains(exp)); + } + + //test success case + LOG.info("moving servers "+barGroup.getServers()+" to group foo"); + rsGroupAdmin.moveServers(barGroup.getServers(), fooGroup.getName()); + + barGroup = rsGroupAdmin.getRSGroupInfo("bar"); + fooGroup = rsGroupAdmin.getRSGroupInfo("foo"); + assertEquals(0,barGroup.getServers().size()); + assertEquals(3,fooGroup.getServers().size()); + + LOG.info("moving servers "+fooGroup.getServers()+" to group default"); + rsGroupAdmin.moveServers(fooGroup.getServers(), RSGroupInfo.DEFAULT_GROUP); + + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return getNumServers() == + rsGroupAdmin.getRSGroupInfo(RSGroupInfo.DEFAULT_GROUP).getServers().size(); + } + }); + + fooGroup = rsGroupAdmin.getRSGroupInfo("foo"); + assertEquals(0,fooGroup.getServers().size()); + + //test group removal + LOG.info("Remove group "+barGroup.getName()); + rsGroupAdmin.removeRSGroup(barGroup.getName()); + Assert.assertEquals(null, rsGroupAdmin.getRSGroupInfo(barGroup.getName())); + LOG.info("Remove group "+fooGroup.getName()); + rsGroupAdmin.removeRSGroup(fooGroup.getName()); + Assert.assertEquals(null, rsGroupAdmin.getRSGroupInfo(fooGroup.getName())); + } + + @Test + public void testTableMoveTruncateAndDrop() throws Exception { + LOG.info("testTableMove"); + + final TableName tableName = TableName.valueOf(tablePrefix + "_testTableMoveAndDrop"); + final byte[] familyNameBytes = Bytes.toBytes("f"); + String newGroupName = getGroupName("testTableMove"); + final RSGroupInfo newGroup = addGroup(rsGroupAdmin, newGroupName, 2); + + TEST_UTIL.createMultiRegionTable(tableName, familyNameBytes, 5); + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + List regions = getTableRegionMap().get(tableName); + if (regions == null) + return false; + return getTableRegionMap().get(tableName).size() >= 5; + } + }); + + RSGroupInfo tableGrp = rsGroupAdmin.getRSGroupInfoOfTable(tableName); + assertTrue(tableGrp.getName().equals(RSGroupInfo.DEFAULT_GROUP)); + + //change table's group + LOG.info("Moving table "+tableName+" to "+newGroup.getName()); + rsGroupAdmin.moveTables(Sets.newHashSet(tableName), newGroup.getName()); + + //verify group change + Assert.assertEquals(newGroup.getName(), + rsGroupAdmin.getRSGroupInfoOfTable(tableName).getName()); + + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + Map> serverMap = getTableServerRegionMap().get(tableName); + int count = 0; + if (serverMap != null) { + for (ServerName rs : serverMap.keySet()) { + if (newGroup.containsServer(rs.getAddress())) { + count += serverMap.get(rs).size(); + } + } + } + return count == 5; + } + }); + + //test truncate + admin.disableTable(tableName); + admin.truncateTable(tableName, true); + Assert.assertEquals(1, rsGroupAdmin.getRSGroupInfo(newGroup.getName()).getTables().size()); + Assert.assertEquals(tableName, rsGroupAdmin.getRSGroupInfo( + newGroup.getName()).getTables().first()); + + //verify removed table is removed from group + TEST_UTIL.deleteTable(tableName); + Assert.assertEquals(0, rsGroupAdmin.getRSGroupInfo(newGroup.getName()).getTables().size()); + } + + @Test + public void testGroupBalance() throws Exception { + LOG.info("testGroupBalance"); + String newGroupName = getGroupName("testGroupBalance"); + final RSGroupInfo newGroup = addGroup(rsGroupAdmin, newGroupName, 3); + + final TableName tableName = TableName.valueOf(tablePrefix+"_ns", "testGroupBalance"); + admin.createNamespace( + NamespaceDescriptor.create(tableName.getNamespaceAsString()) + .addConfiguration(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP, newGroupName).build()); + final byte[] familyNameBytes = Bytes.toBytes("f"); + final HTableDescriptor desc = new HTableDescriptor(tableName); + desc.addFamily(new HColumnDescriptor("f")); + byte [] startKey = Bytes.toBytes("aaaaa"); + byte [] endKey = Bytes.toBytes("zzzzz"); + admin.createTable(desc, startKey, endKey, 6); + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + List regions = getTableRegionMap().get(tableName); + if (regions == null) { + return false; + } + return regions.size() >= 6; + } + }); + + //make assignment uneven, move all regions to one server + Map> assignMap = + getTableServerRegionMap().get(tableName); + final ServerName first = assignMap.entrySet().iterator().next().getKey(); + for(HRegionInfo region: admin.getTableRegions(tableName)) { + if(!assignMap.get(first).contains(region)) { + admin.move(region.getEncodedNameAsBytes(), Bytes.toBytes(first.getServerName())); + } + } + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + Map> map = getTableServerRegionMap().get(tableName); + if (map == null) { + return true; + } + List regions = map.get(first); + if (regions == null) { + return true; + } + return regions.size() >= 6; + } + }); + + //balance the other group and make sure it doesn't affect the new group + rsGroupAdmin.balanceRSGroup(RSGroupInfo.DEFAULT_GROUP); + assertEquals(6, getTableServerRegionMap().get(tableName).get(first).size()); + + rsGroupAdmin.balanceRSGroup(newGroupName); + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + for (List regions : getTableServerRegionMap().get(tableName).values()) { + if (2 != regions.size()) { + return false; + } + } + return true; + } + }); + } + + @Test + public void testRegionMove() throws Exception { + LOG.info("testRegionMove"); + + final RSGroupInfo newGroup = addGroup(rsGroupAdmin, getGroupName("testRegionMove"), 1); + final TableName tableName = TableName.valueOf(tablePrefix + rand.nextInt()); + final byte[] familyNameBytes = Bytes.toBytes("f"); + // All the regions created below will be assigned to the default group. + TEST_UTIL.createMultiRegionTable(tableName, familyNameBytes, 6); + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + List regions = getTableRegionMap().get(tableName); + if (regions == null) + return false; + return getTableRegionMap().get(tableName).size() >= 6; + } + }); + + //get target region to move + Map> assignMap = + getTableServerRegionMap().get(tableName); + String targetRegion = null; + for(ServerName server : assignMap.keySet()) { + targetRegion = assignMap.get(server).size() > 0 ? assignMap.get(server).get(0) : null; + if(targetRegion != null) { + break; + } + } + //get server which is not a member of new group + ServerName targetServer = null; + for(ServerName server : admin.getClusterStatus().getServers()) { + if(!newGroup.containsServer(server.getAddress())) { + targetServer = server; + break; + } + } + + final AdminProtos.AdminService.BlockingInterface targetRS = + admin.getConnection().getAdmin(targetServer); + + //move target server to group + rsGroupAdmin.moveServers(Sets.newHashSet(targetServer.getAddress()), + newGroup.getName()); + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return ProtobufUtil.getOnlineRegions(targetRS).size() <= 0; + } + }); + + // Lets move this region to the new group. + TEST_UTIL.getHBaseAdmin().move(Bytes.toBytes(HRegionInfo.encodeRegionName(Bytes.toBytes(targetRegion))), + Bytes.toBytes(targetServer.getServerName())); + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return + getTableRegionMap().get(tableName) != null && + getTableRegionMap().get(tableName).size() == 6 && + admin.getClusterStatus().getRegionsInTransition().size() < 1; + } + }); + + //verify that targetServer didn't open it + assertFalse(ProtobufUtil.getOnlineRegions(targetRS).contains(targetRegion)); + } + + @Test + public void testFailRemoveGroup() throws IOException, InterruptedException { + LOG.info("testFailRemoveGroup"); + + int initNumGroups = rsGroupAdmin.listRSGroups().size(); + addGroup(rsGroupAdmin, "bar", 3); + TableName tableName = TableName.valueOf(tablePrefix+"_my_table"); + TEST_UTIL.createTable(tableName, Bytes.toBytes("f")); + rsGroupAdmin.moveTables(Sets.newHashSet(tableName), "bar"); + RSGroupInfo barGroup = rsGroupAdmin.getRSGroupInfo("bar"); + //group is not empty therefore it should fail + try { + rsGroupAdmin.removeRSGroup(barGroup.getName()); + fail("Expected remove group to fail"); + } catch(IOException e) { + } + //group cannot lose all it's servers therefore it should fail + try { + rsGroupAdmin.moveServers(barGroup.getServers(), RSGroupInfo.DEFAULT_GROUP); + fail("Expected move servers to fail"); + } catch(IOException e) { + } + + rsGroupAdmin.moveTables(barGroup.getTables(), RSGroupInfo.DEFAULT_GROUP); + try { + rsGroupAdmin.removeRSGroup(barGroup.getName()); + fail("Expected move servers to fail"); + } catch(IOException e) { + } + + rsGroupAdmin.moveServers(barGroup.getServers(), RSGroupInfo.DEFAULT_GROUP); + rsGroupAdmin.removeRSGroup(barGroup.getName()); + + Assert.assertEquals(initNumGroups, rsGroupAdmin.listRSGroups().size()); + } + + @Test + public void testKillRS() throws Exception { + LOG.info("testKillRS"); + RSGroupInfo appInfo = addGroup(rsGroupAdmin, "appInfo", 1); + + final TableName tableName = TableName.valueOf(tablePrefix+"_ns", "_testKillRS"); + admin.createNamespace( + NamespaceDescriptor.create(tableName.getNamespaceAsString()) + .addConfiguration(RSGroupInfo.NAMESPACE_DESC_PROP_GROUP, appInfo.getName()).build()); + final HTableDescriptor desc = new HTableDescriptor(tableName); + desc.addFamily(new HColumnDescriptor("f")); + admin.createTable(desc); + //wait for created table to be assigned + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return getTableRegionMap().get(desc.getTableName()) != null; + } + }); + + ServerName targetServer = ServerName.parseServerName( + appInfo.getServers().iterator().next().toString()); + AdminProtos.AdminService.BlockingInterface targetRS = + admin.getConnection().getAdmin(targetServer); + HRegionInfo targetRegion = ProtobufUtil.getOnlineRegions(targetRS).get(0); + Assert.assertEquals(1, ProtobufUtil.getOnlineRegions(targetRS).size()); + + try { + //stopping may cause an exception + //due to the connection loss + targetRS.stopServer(null, + AdminProtos.StopServerRequest.newBuilder().setReason("Die").build()); + } catch(Exception e) { + } + assertFalse(cluster.getClusterStatus().getServers().contains(targetServer)); + + //wait for created table to be assigned + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return cluster.getClusterStatus().getRegionsInTransition().size() == 0; + } + }); + Set
newServers = Sets.newHashSet(); + newServers.add( + rsGroupAdmin.getRSGroupInfo(RSGroupInfo.DEFAULT_GROUP).getServers().iterator().next()); + rsGroupAdmin.moveServers(newServers, appInfo.getName()); + + //Make sure all the table's regions get reassigned + //disabling the table guarantees no conflicting assign/unassign (ie SSH) happens + admin.disableTable(tableName); + admin.enableTable(tableName); + + //wait for region to be assigned + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return cluster.getClusterStatus().getRegionsInTransition().size() == 0; + } + }); + + targetServer = ServerName.parseServerName( + newServers.iterator().next().toString()); + targetRS = + admin.getConnection().getAdmin(targetServer); + Assert.assertEquals(1, ProtobufUtil.getOnlineRegions(targetRS).size()); + Assert.assertEquals(tableName, + ProtobufUtil.getOnlineRegions(targetRS).get(0).getTable()); + } + + @Test + public void testValidGroupNames() throws IOException { + String[] badNames = {"foo*","foo@","-"}; + String[] goodNames = {"foo_123"}; + + for(String entry: badNames) { + try { + rsGroupAdmin.addRSGroup(entry); + fail("Expected a constraint exception for: "+entry); + } catch(ConstraintException ex) { + //expected + } + } + + for(String entry: goodNames) { + rsGroupAdmin.addRSGroup(entry); + } + } + + private String getGroupName(String baseName) { + return groupPrefix+"_"+baseName+"_"+rand.nextInt(Integer.MAX_VALUE); + } + + @Test + public void testMultiTableMove() throws Exception { + LOG.info("testMultiTableMove"); + + final TableName tableNameA = TableName.valueOf(tablePrefix + "_testMultiTableMoveA"); + final TableName tableNameB = TableName.valueOf(tablePrefix + "_testMultiTableMoveB"); + final byte[] familyNameBytes = Bytes.toBytes("f"); + String newGroupName = getGroupName("testMultiTableMove"); + final RSGroupInfo newGroup = addGroup(rsGroupAdmin, newGroupName, 1); + + TEST_UTIL.createTable(tableNameA, familyNameBytes); + TEST_UTIL.createTable(tableNameB, familyNameBytes); + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + List regionsA = getTableRegionMap().get(tableNameA); + if (regionsA == null) + return false; + List regionsB = getTableRegionMap().get(tableNameB); + if (regionsB == null) + return false; + + return getTableRegionMap().get(tableNameA).size() >= 1 + && getTableRegionMap().get(tableNameB).size() >= 1; + } + }); + + RSGroupInfo tableGrpA = rsGroupAdmin.getRSGroupInfoOfTable(tableNameA); + assertTrue(tableGrpA.getName().equals(RSGroupInfo.DEFAULT_GROUP)); + + RSGroupInfo tableGrpB = rsGroupAdmin.getRSGroupInfoOfTable(tableNameB); + assertTrue(tableGrpB.getName().equals(RSGroupInfo.DEFAULT_GROUP)); + //change table's group + LOG.info("Moving table [" + tableNameA + "," + tableNameB + "] to " + newGroup.getName()); + rsGroupAdmin.moveTables(Sets.newHashSet(tableNameA, tableNameB), newGroup.getName()); + + //verify group change + Assert.assertEquals(newGroup.getName(), + rsGroupAdmin.getRSGroupInfoOfTable(tableNameA).getName()); + + Assert.assertEquals(newGroup.getName(), + rsGroupAdmin.getRSGroupInfoOfTable(tableNameB).getName()); + + //verify tables' not exist in old group + Set DefaultTables = rsGroupAdmin.getRSGroupInfo(RSGroupInfo.DEFAULT_GROUP).getTables(); + assertFalse(DefaultTables.contains(tableNameA)); + assertFalse(DefaultTables.contains(tableNameB)); + + //verify tables' exist in new group + Set newGroupTables = rsGroupAdmin.getRSGroupInfo(newGroupName).getTables(); + assertTrue(newGroupTables.contains(tableNameA)); + assertTrue(newGroupTables.contains(tableNameB)); + } + + @Test + public void testMoveServersAndTables() throws Exception { + final TableName tableName = TableName.valueOf(tablePrefix + "_testMoveServersAndTables"); + final RSGroupInfo newGroup = addGroup(rsGroupAdmin, getGroupName("testMoveServersAndTables"), 1); + + //create table + final byte[] familyNameBytes = Bytes.toBytes("f"); + TEST_UTIL.createMultiRegionTable(tableName, familyNameBytes, 5); + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + List regions = getTableRegionMap().get(tableName); + if (regions == null) + return false; + return getTableRegionMap().get(tableName).size() >= 5; + } + }); + + //get server which is not a member of new group + ServerName targetServer = null; + for(ServerName server : admin.getClusterStatus().getServers()) { + if(!newGroup.containsServer(server.getAddress()) && + !rsGroupAdmin.getRSGroupInfo("master").containsServer(server.getAddress())) { + targetServer = server; + break; + } + } + + LOG.debug("Print group info : " + rsGroupAdmin.listRSGroups()); + int oldDefaultGroupServerSize = + rsGroupAdmin.getRSGroupInfo(RSGroupInfo.DEFAULT_GROUP).getServers().size(); + int oldDefaultGroupTableSize = + rsGroupAdmin.getRSGroupInfo(RSGroupInfo.DEFAULT_GROUP).getTables().size(); + + //test fail bogus server move + try { + rsGroupAdmin.moveServersAndTables(Sets.newHashSet(Address.fromString("foo:9999")), + Sets.newHashSet(tableName), newGroup.getName()); + fail("Bogus servers shouldn't have been successfully moved."); + } catch(IOException ex) { + } + + //test fail server move + try { + rsGroupAdmin.moveServersAndTables(Sets.newHashSet(targetServer.getAddress()), + Sets.newHashSet(tableName), RSGroupInfo.DEFAULT_GROUP); + fail("servers shouldn't have been successfully moved."); + } catch(IOException ex) { + } + + //verify default group info + Assert.assertEquals(oldDefaultGroupServerSize, + rsGroupAdmin.getRSGroupInfo(RSGroupInfo.DEFAULT_GROUP).getServers().size()); + Assert.assertEquals(oldDefaultGroupTableSize, + rsGroupAdmin.getRSGroupInfo(RSGroupInfo.DEFAULT_GROUP).getTables().size()); + + //verify new group info + Assert.assertEquals(1, + rsGroupAdmin.getRSGroupInfo(newGroup.getName()).getServers().size()); + Assert.assertEquals(0, + rsGroupAdmin.getRSGroupInfo(newGroup.getName()).getTables().size()); + + //get all region to move targetServer + List regionList = getTableRegionMap().get(tableName); + for(String region : regionList) { + // Lets move this region to the targetServer + admin.move(Bytes.toBytes(HRegionInfo.encodeRegionName(Bytes.toBytes(region))), + Bytes.toBytes(targetServer.getServerName())); + } + + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return getTableRegionMap().get(tableName) != null && + getTableRegionMap().get(tableName).size() == 5 && + getTableServerRegionMap().get(tableName).size() == 1 && + admin.getClusterStatus().getRegionsInTransition().size() < 1; + } + }); + + //verify that all region move to targetServer + Assert.assertNotNull(getTableServerRegionMap().get(tableName)); + Assert.assertNotNull(getTableServerRegionMap().get(tableName).get(targetServer)); + Assert.assertEquals(5, getTableServerRegionMap().get(tableName).get(targetServer).size()); + + //move targetServer and table to newGroup + LOG.info("moving server and table to newGroup"); + rsGroupAdmin.moveServersAndTables(Sets.newHashSet(targetServer.getAddress()), + Sets.newHashSet(tableName), newGroup.getName()); + + //verify group change + Assert.assertEquals(newGroup.getName(), + rsGroupAdmin.getRSGroupInfoOfTable(tableName).getName()); + + //verify servers' not exist in old group + Set
defaultServers = rsGroupAdmin.getRSGroupInfo(RSGroupInfo.DEFAULT_GROUP).getServers(); + assertFalse(defaultServers.contains(targetServer.getAddress())); + + //verify servers' exist in new group + Set
newGroupServers = rsGroupAdmin.getRSGroupInfo(newGroup.getName()).getServers(); + assertTrue(newGroupServers.contains(targetServer.getAddress())); + + //verify tables' not exist in old group + Set defaultTables = rsGroupAdmin.getRSGroupInfo(RSGroupInfo.DEFAULT_GROUP).getTables(); + assertFalse(defaultTables.contains(tableName)); + + //verify tables' exist in new group + Set newGroupTables = rsGroupAdmin.getRSGroupInfo(newGroup.getName()).getTables(); + assertTrue(newGroupTables.contains(tableName)); + } +} diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsOfflineMode.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsOfflineMode.java new file mode 100644 index 00000000000..360f9ef9f68 --- /dev/null +++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsOfflineMode.java @@ -0,0 +1,187 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rsgroup; + +import com.google.common.collect.Sets; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HBaseCluster; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.MiniHBaseCluster; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.Waiter; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; +import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.master.ServerManager; +import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; + + +//This tests that GroupBasedBalancer will use data in zk +//to do balancing during master startup +//This does not test retain assignment +@Category(MediumTests.class) +public class TestRSGroupsOfflineMode { + private static final org.apache.commons.logging.Log LOG = + LogFactory.getLog(TestRSGroupsOfflineMode.class); + private static HMaster master; + private static HBaseAdmin hbaseAdmin; + private static HBaseTestingUtility TEST_UTIL; + private static HBaseCluster cluster; + private static RSGroupAdminEndpoint RSGroupAdminEndpoint; + public final static long WAIT_TIMEOUT = 60000*5; + + @BeforeClass + public static void setUp() throws Exception { + TEST_UTIL = new HBaseTestingUtility(); + TEST_UTIL.getConfiguration().set( + HConstants.HBASE_MASTER_LOADBALANCER_CLASS, + RSGroupBasedLoadBalancer.class.getName()); + TEST_UTIL.getConfiguration().set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, + RSGroupAdminEndpoint.class.getName()); + TEST_UTIL.getConfiguration().set( + ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, + "1"); + TEST_UTIL.startMiniCluster(2, 3); + cluster = TEST_UTIL.getHBaseCluster(); + master = ((MiniHBaseCluster)cluster).getMaster(); + master.balanceSwitch(false); + hbaseAdmin = TEST_UTIL.getHBaseAdmin(); + //wait till the balancer is in online mode + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return master.isInitialized() && + ((RSGroupBasedLoadBalancer) master.getLoadBalancer()).isOnline() && + master.getServerManager().getOnlineServersList().size() >= 3; + } + }); + RSGroupAdminEndpoint = + master.getMasterCoprocessorHost().findCoprocessors(RSGroupAdminEndpoint.class).get(0); + } + + @AfterClass + public static void tearDown() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + @Test + public void testOffline() throws Exception, InterruptedException { + //table should be after group table name + //so it gets assigned later + final TableName failoverTable = TableName.valueOf("testOffline"); + TEST_UTIL.createTable(failoverTable, Bytes.toBytes("f")); + + RSGroupAdmin groupAdmin = new RSGroupAdminClient(TEST_UTIL.getConnection()); + + final HRegionServer killRS = ((MiniHBaseCluster)cluster).getRegionServer(0); + final HRegionServer groupRS = ((MiniHBaseCluster)cluster).getRegionServer(1); + final HRegionServer failoverRS = ((MiniHBaseCluster)cluster).getRegionServer(2); + + String newGroup = "my_group"; + groupAdmin.addRSGroup(newGroup); + if(master.getAssignmentManager().getRegionStates().getRegionAssignments() + .containsValue(failoverRS.getServerName())) { + for(HRegionInfo regionInfo: hbaseAdmin.getOnlineRegions(failoverRS.getServerName())) { + hbaseAdmin.move(regionInfo.getEncodedNameAsBytes(), + Bytes.toBytes(failoverRS.getServerName().getServerName())); + } + LOG.info("Waiting for region unassignments on failover RS..."); + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return master.getServerManager().getLoad(failoverRS.getServerName()) + .getRegionsLoad().size() > 0; + } + }); + } + + //move server to group and make sure all tables are assigned + groupAdmin.moveServers(Sets.newHashSet(groupRS.getServerName().getAddress()), newGroup); + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return groupRS.getNumberOfOnlineRegions() < 1 && + master.getAssignmentManager().getRegionStates().getRegionsInTransition().size() < 1; + } + }); + //move table to group and wait + groupAdmin.moveTables(Sets.newHashSet(RSGroupInfoManager.RSGROUP_TABLE_NAME), newGroup); + LOG.info("Waiting for move table..."); + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return groupRS.getNumberOfOnlineRegions() == 1; + } + }); + + groupRS.stop("die"); + //race condition here + TEST_UTIL.getHBaseCluster().getMaster().stopMaster(); + LOG.info("Waiting for offline mode..."); + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return TEST_UTIL.getHBaseCluster().getMaster() != null && + TEST_UTIL.getHBaseCluster().getMaster().isActiveMaster() && + TEST_UTIL.getHBaseCluster().getMaster().isInitialized() && + TEST_UTIL.getHBaseCluster().getMaster().getServerManager().getOnlineServers().size() + <= 3; + } + }); + + + RSGroupInfoManager groupMgr = RSGroupAdminEndpoint.getGroupInfoManager(); + //make sure balancer is in offline mode, since this is what we're testing + assertFalse(groupMgr.isOnline()); + //verify the group affiliation that's loaded from ZK instead of tables + assertEquals(newGroup, + groupMgr.getRSGroupOfTable(RSGroupInfoManager.RSGROUP_TABLE_NAME)); + assertEquals(RSGroupInfo.DEFAULT_GROUP, groupMgr.getRSGroupOfTable(failoverTable)); + + //kill final regionserver to see the failover happens for all tables + //except GROUP table since it's group does not have any online RS + killRS.stop("die"); + master = TEST_UTIL.getHBaseCluster().getMaster(); + LOG.info("Waiting for new table assignment..."); + TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() { + @Override + public boolean evaluate() throws Exception { + return failoverRS.getOnlineRegions(failoverTable).size() >= 1; + } + }); + Assert.assertEquals(0, failoverRS.getOnlineRegions(RSGroupInfoManager.RSGROUP_TABLE_NAME).size()); + + //need this for minicluster to shutdown cleanly + master.stopMaster(); + } +} diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdminClient.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdminClient.java new file mode 100644 index 00000000000..f5d02f0c647 --- /dev/null +++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdminClient.java @@ -0,0 +1,155 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.rsgroup; + +import com.google.common.collect.Maps; +import com.google.common.collect.Sets; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.hadoop.hbase.net.Address; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos; +import org.apache.hadoop.hbase.zookeeper.ZKUtil; +import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; +import org.apache.zookeeper.KeeperException; +import org.junit.Assert; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.Set; + +public class VerifyingRSGroupAdminClient implements RSGroupAdmin { + private Table table; + private ZooKeeperWatcher zkw; + private RSGroupAdmin wrapped; + + public VerifyingRSGroupAdminClient(RSGroupAdmin RSGroupAdmin, Configuration conf) + throws IOException { + wrapped = RSGroupAdmin; + table = ConnectionFactory.createConnection(conf).getTable(RSGroupInfoManager.RSGROUP_TABLE_NAME); + zkw = new ZooKeeperWatcher(conf, this.getClass().getSimpleName(), null); + } + + @Override + public void addRSGroup(String groupName) throws IOException { + wrapped.addRSGroup(groupName); + verify(); + } + + @Override + public RSGroupInfo getRSGroupInfo(String groupName) throws IOException { + return wrapped.getRSGroupInfo(groupName); + } + + @Override + public RSGroupInfo getRSGroupInfoOfTable(TableName tableName) throws IOException { + return wrapped.getRSGroupInfoOfTable(tableName); + } + + @Override + public void moveServers(Set
servers, String targetGroup) throws IOException { + wrapped.moveServers(servers, targetGroup); + verify(); + } + + @Override + public void moveTables(Set tables, String targetGroup) throws IOException { + wrapped.moveTables(tables, targetGroup); + verify(); + } + + @Override + public void removeRSGroup(String name) throws IOException { + wrapped.removeRSGroup(name); + verify(); + } + + @Override + public boolean balanceRSGroup(String name) throws IOException { + return wrapped.balanceRSGroup(name); + } + + @Override + public List listRSGroups() throws IOException { + return wrapped.listRSGroups(); + } + + @Override + public RSGroupInfo getRSGroupOfServer(Address server) throws IOException { + return wrapped.getRSGroupOfServer(server); + } + + public void verify() throws IOException { + Map groupMap = Maps.newHashMap(); + Set zList = Sets.newHashSet(); + + for (Result result : table.getScanner(new Scan())) { + RSGroupProtos.RSGroupInfo proto = + RSGroupProtos.RSGroupInfo.parseFrom( + result.getValue( + RSGroupInfoManager.META_FAMILY_BYTES, + RSGroupInfoManager.META_QUALIFIER_BYTES)); + groupMap.put(proto.getName(), RSGroupProtobufUtil.toGroupInfo(proto)); + } + Assert.assertEquals(Sets.newHashSet(groupMap.values()), + Sets.newHashSet(wrapped.listRSGroups())); + try { + String groupBasePath = ZKUtil.joinZNode(zkw.baseZNode, "rsgroup"); + for(String znode: ZKUtil.listChildrenNoWatch(zkw, groupBasePath)) { + byte[] data = ZKUtil.getData(zkw, ZKUtil.joinZNode(groupBasePath, znode)); + if(data.length > 0) { + ProtobufUtil.expectPBMagicPrefix(data); + ByteArrayInputStream bis = new ByteArrayInputStream( + data, ProtobufUtil.lengthOfPBMagic(), data.length); + zList.add(RSGroupProtobufUtil.toGroupInfo(RSGroupProtos.RSGroupInfo.parseFrom(bis))); + } + } + Assert.assertEquals(zList.size(), groupMap.size()); + for(RSGroupInfo RSGroupInfo : zList) { + Assert.assertTrue(groupMap.get(RSGroupInfo.getName()).equals(RSGroupInfo)); + } + } catch (KeeperException e) { + throw new IOException("ZK verification failed", e); + } catch (DeserializationException e) { + throw new IOException("ZK verification failed", e); + } catch (InterruptedException e) { + throw new IOException("ZK verification failed", e); + } + } + + @Override + public void moveServersAndTables(Set
servers, Set tables, + String targetGroup) throws IOException { + wrapped.moveServersAndTables(servers, tables, targetGroup); + verify(); + } + + @Override + public void close() throws IOException { + } +} diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon index 0ecc131b709..7467baa6c55 100644 --- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon +++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon @@ -395,6 +395,8 @@ AssignmentManager assignmentManager = master.getAssignmentManager(); } else if (tableName.equals(QuotaUtil.QUOTA_TABLE_NAME)){ description = "The hbase:quota table holds quota information about number" + " or size of requests in a given time frame."; + } else if (tableName.equals(TableName.valueOf("hbase:rsgroup"))){ + description = "The hbase:rsgroup table holds information about regionserver groups"; } <% description %> diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java index 42484e71c94..9f8b3c15906 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/LocalHBaseCluster.java @@ -147,6 +147,9 @@ public class LocalHBaseCluster { if (conf.getInt(HConstants.REGIONSERVER_INFO_PORT, 0) != -1) { conf.set(HConstants.REGIONSERVER_INFO_PORT, "0"); } + if (conf.getInt(HConstants.MASTER_INFO_PORT, 0) != -1) { + conf.set(HConstants.MASTER_INFO_PORT, "0"); + } this.masterClass = (Class) conf.getClass(HConstants.MASTER_IMPL, masterClass); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java index eab9f971189..9ad8453aea2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java @@ -33,12 +33,14 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.master.RegionPlan; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; +import org.apache.hadoop.hbase.net.Address; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas; import java.io.IOException; import java.util.List; +import java.util.Set; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @InterfaceStability.Evolving @@ -601,4 +603,64 @@ public class BaseMasterAndRegionObserver extends BaseRegionObserver public void postSetNamespaceQuota(final ObserverContext ctx, final String namespace, final Quotas quotas) throws IOException { } + + @Override + public void postAddRSGroup(ObserverContext ctx, String name) + throws IOException { + } + + @Override + public void postBalanceRSGroup(ObserverContext ctx, + String groupName, boolean balancerRan) throws IOException { + } + + @Override + public void postMoveServers(ObserverContext ctx, Set
+ servers, String targetGroup) throws IOException { + } + + @Override + public void postMoveTables(ObserverContext ctx, Set + tables, String targetGroup) throws IOException { + } + + @Override + public void preMoveServersAndTables(ObserverContext ctx, + Set
servers, Set tables, String targetGroup) throws IOException { + } + + @Override + public void postMoveServersAndTables(ObserverContext ctx, + Set
servers, Set tables, String targetGroup) throws IOException { + } + + @Override + public void postRemoveRSGroup(ObserverContext ctx, String name) + throws IOException { + } + + @Override + public void preAddRSGroup(ObserverContext ctx, String name) + throws IOException { + } + + @Override + public void preBalanceRSGroup(ObserverContext ctx, String groupName) + throws IOException { + } + + @Override + public void preMoveServers(ObserverContext ctx, + Set
servers, String targetGroup) throws IOException { + } + + @Override + public void preMoveTables(ObserverContext ctx, + Set tables, String targetGroup) throws IOException { + } + + @Override + public void preRemoveRSGroup(ObserverContext ctx, String name) + throws IOException { + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java index 373e5d5a425..ca2bd53ac48 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java @@ -33,12 +33,14 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.master.RegionPlan; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; +import org.apache.hadoop.hbase.net.Address; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas; import java.io.IOException; import java.util.List; +import java.util.Set; @InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.CONFIG}) @InterfaceStability.Evolving @@ -600,4 +602,65 @@ public class BaseMasterObserver implements MasterObserver { public void postSetNamespaceQuota(final ObserverContext ctx, final String namespace, final Quotas quotas) throws IOException { } + + @Override + public void preMoveServers(ObserverContext ctx, Set
+ servers, String targetGroup) throws IOException { + } + + @Override + public void postMoveServers(ObserverContext ctx, Set
+ servers, String targetGroup) throws IOException { + } + + @Override + public void preMoveTables(ObserverContext ctx, Set + tables, String targetGroup) throws IOException { + } + + @Override + public void postMoveTables(ObserverContext ctx, + Set tables, String targetGroup) throws IOException { + } + + @Override + public void preMoveServersAndTables(ObserverContext ctx, + Set
servers, Set tables, String targetGroup) throws IOException { + } + + @Override + public void postMoveServersAndTables(ObserverContext ctx, + Set
servers, Set tables, String targetGroup) throws IOException { + } + + @Override + public void preAddRSGroup(ObserverContext ctx, String name) + throws IOException { + } + + @Override + public void postAddRSGroup(ObserverContext ctx, String name) + throws IOException { + } + + @Override + public void preRemoveRSGroup(ObserverContext ctx, String name) + throws IOException { + + } + + @Override + public void postRemoveRSGroup(ObserverContext ctx, String name) + throws IOException { + } + + @Override + public void preBalanceRSGroup(ObserverContext ctx, String groupName) + throws IOException { + } + + @Override + public void postBalanceRSGroup(ObserverContext ctx, + String groupName, boolean balancerRan) throws IOException { + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java index 4ec02f4487a..03d5123c241 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java @@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.coprocessor; import java.io.IOException; import java.util.List; +import java.util.Set; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; @@ -36,6 +37,7 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.master.RegionPlan; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; +import org.apache.hadoop.hbase.net.Address; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas; @@ -1065,4 +1067,115 @@ public interface MasterObserver extends Coprocessor { */ void postClearDeadServers(ObserverContext ctx) throws IOException; + /** + * Called before servers are moved to target region server group + * @param ctx the environment to interact with the framework and master + * @param servers set of servers to move + * @param targetGroup destination group + */ + void preMoveServers(final ObserverContext ctx, + Set
servers, String targetGroup) throws IOException; + + /** + * Called after servers are moved to target region server group + * @param ctx the environment to interact with the framework and master + * @param servers set of servers to move + * @param targetGroup name of group + * @throws IOException on failure + */ + void postMoveServers(final ObserverContext ctx, + Set
servers, String targetGroup) throws IOException; + + /** + * Called before tables are moved to target region server group + * @param ctx the environment to interact with the framework and master + * @param tables set of tables to move + * @param targetGroup name of group + * @throws IOException on failure + */ + void preMoveTables(final ObserverContext ctx, + Set tables, String targetGroup) throws IOException; + + /** + * Called after servers are moved to target region server group + * @param ctx the environment to interact with the framework and master + * @param tables set of tables to move + * @param targetGroup name of group + * @throws IOException on failure + */ + void postMoveTables(final ObserverContext ctx, + Set tables, String targetGroup) throws IOException; + + /** + * Called before servers are moved to target region server group + * @param ctx the environment to interact with the framework and master + * @param servers set of servers to move + * @param targetGroup destination group + * @throws IOException on failure + */ + void preMoveServersAndTables(final ObserverContext ctx, + Set
servers, Set tables, String targetGroup) throws IOException; + + /** + * Called after servers are moved to target region server group + * @param ctx the environment to interact with the framework and master + * @param servers set of servers to move + * @param targetGroup name of group + */ + void postMoveServersAndTables(final ObserverContext ctx, + Set
servers, Set tables, String targetGroup) throws IOException; + + /** + * Called before a new region server group is added + * @param ctx the environment to interact with the framework and master + * @param name group name + * @throws IOException on failure + */ + void preAddRSGroup(final ObserverContext ctx, + String name) throws IOException; + + /** + * Called after a new region server group is added + * @param ctx the environment to interact with the framework and master + * @param name group name + * @throws IOException on failure + */ + void postAddRSGroup(final ObserverContext ctx, + String name) throws IOException; + + /** + * Called before a region server group is removed + * @param ctx the environment to interact with the framework and master + * @param name group name + * @throws IOException on failure + */ + void preRemoveRSGroup(final ObserverContext ctx, + String name) throws IOException; + + /** + * Called after a region server group is removed + * @param ctx the environment to interact with the framework and master + * @param name group name + * @throws IOException on failure + */ + void postRemoveRSGroup(final ObserverContext ctx, + String name) throws IOException; + + /** + * Called before a region server group is removed + * @param ctx the environment to interact with the framework and master + * @param groupName group name + * @throws IOException on failure + */ + void preBalanceRSGroup(final ObserverContext ctx, + String groupName) throws IOException; + + /** + * Called after a region server group is removed + * @param ctx the environment to interact with the framework and master + * @param groupName group name + * @throws IOException on failure + */ + void postBalanceRSGroup(final ObserverContext ctx, + String groupName, boolean balancerRan) throws IOException; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java index 7c145dd73f2..809b980dc31 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java @@ -2226,7 +2226,7 @@ public class AssignmentManager extends ZooKeeperListener { } } LOG.info("Assigning " + region.getRegionNameAsString() + - " to " + plan.getDestination().toString()); + " to " + plan.getDestination()); // Transition RegionState to PENDING_OPEN currentState = regionStates.updateRegionState(region, State.PENDING_OPEN, plan.getDestination()); @@ -2954,6 +2954,8 @@ public class AssignmentManager extends ZooKeeperListener { throw new IOException("Unable to determine a plan to assign region(s)"); } + processBogusAssignments(bulkPlan); + assign(regions.size(), servers.size(), "retainAssignment=true", bulkPlan); } @@ -2983,6 +2985,8 @@ public class AssignmentManager extends ZooKeeperListener { throw new IOException("Unable to determine a plan to assign region(s)"); } + processBogusAssignments(bulkPlan); + processFavoredNodes(regions); assign(regions.size(), servers.size(), "round-robin=true", bulkPlan); } @@ -4665,6 +4669,16 @@ public class AssignmentManager extends ZooKeeperListener { return errorMsg; } + private void processBogusAssignments(Map> bulkPlan) { + if (bulkPlan.containsKey(LoadBalancer.BOGUS_SERVER_NAME)) { + // Found no plan for some regions, put those regions in RIT + for (HRegionInfo hri : bulkPlan.get(LoadBalancer.BOGUS_SERVER_NAME)) { + regionStates.updateRegionState(hri, State.FAILED_OPEN); + } + bulkPlan.remove(LoadBalancer.BOGUS_SERVER_NAME); + } + } + /** * @return Instance of load balancer */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 8a3bbd68ceb..9b41bbf4b98 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -3217,4 +3217,9 @@ public class HMaster extends HRegionServer implements MasterServices, Server { public SplitOrMergeTracker getSplitOrMergeTracker() { return splitOrMergeTracker; } + + @Override + public LoadBalancer getLoadBalancer() { + return balancer; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java index c581b08ab47..937b32ff827 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java @@ -52,6 +52,9 @@ import org.apache.hadoop.hbase.TableName; @InterfaceAudience.Private public interface LoadBalancer extends Configurable, Stoppable, ConfigurationObserver { + //used to signal to the caller that the region(s) cannot be assigned + ServerName BOGUS_SERVER_NAME = ServerName.parseServerName("localhost,1,1"); + /** * Set the current cluster status. This allows a LoadBalancer to map host name to a server * @param st diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java index 9edc60e2e9d..78c79257890 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java @@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.master; import java.io.IOException; import java.util.List; +import java.util.Set; import org.apache.commons.lang.ClassUtils; import org.apache.commons.logging.Log; @@ -44,6 +45,7 @@ import org.apache.hadoop.hbase.coprocessor.MetricsCoprocessor; import org.apache.hadoop.hbase.coprocessor.ObserverContext; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.metrics.MetricRegistry; +import org.apache.hadoop.hbase.net.Address; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas; @@ -67,6 +69,7 @@ public class MasterCoprocessorHost implements MasterCoprocessorEnvironment { private final MasterServices masterServices; private final MetricRegistry metricRegistry; + private final boolean supportGroupCPs; public MasterEnvironment(final Class implClass, final Coprocessor impl, final int priority, final int seq, final Configuration conf, @@ -75,6 +78,8 @@ public class MasterCoprocessorHost this.masterServices = services; this.metricRegistry = MetricsCoprocessor.createRegistryForMasterCoprocessor(implClass.getName()); + supportGroupCPs = !useLegacyMethod(impl.getClass(), + "preBalanceRSGroup", ObserverContext.class, String.class); } @Override @@ -1212,6 +1217,161 @@ public class MasterCoprocessorHost }); } + public void preMoveServers(final Set
servers, final String targetGroup) + throws IOException { + execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + @Override + public void call(MasterObserver oserver, + ObserverContext ctx) throws IOException { + if(((MasterEnvironment)ctx.getEnvironment()).supportGroupCPs) { + oserver.preMoveServers(ctx, servers, targetGroup); + } + } + }); + } + + public void postMoveServers(final Set
servers, final String targetGroup) + throws IOException { + execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + @Override + public void call(MasterObserver oserver, + ObserverContext ctx) throws IOException { + if(((MasterEnvironment)ctx.getEnvironment()).supportGroupCPs) { + oserver.postMoveServers(ctx, servers, targetGroup); + } + } + }); + } + + public void preMoveTables(final Set tables, final String targetGroup) + throws IOException { + execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + @Override + public void call(MasterObserver oserver, + ObserverContext ctx) throws IOException { + if(((MasterEnvironment)ctx.getEnvironment()).supportGroupCPs) { + oserver.preMoveTables(ctx, tables, targetGroup); + } + } + }); + } + + public void postMoveTables(final Set tables, final String targetGroup) + throws IOException { + execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + @Override + public void call(MasterObserver oserver, + ObserverContext ctx) throws IOException { + if(((MasterEnvironment)ctx.getEnvironment()).supportGroupCPs) { + oserver.postMoveTables(ctx, tables, targetGroup); + } + } + }); + } + + public void preMoveServersAndTables(final Set
servers, final Set tables, + final String targetGroup) throws IOException { + execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + @Override + public void call(MasterObserver oserver, + ObserverContext ctx) throws IOException { + if(((MasterEnvironment)ctx.getEnvironment()).supportGroupCPs) { + oserver.preMoveServersAndTables(ctx, servers, tables, targetGroup); + } + } + }); + } + + public void postMoveServersAndTables(final Set
servers, final Set tables, + final String targetGroup) throws IOException { + execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + @Override + public void call(MasterObserver oserver, + ObserverContext ctx) throws IOException { + if(((MasterEnvironment)ctx.getEnvironment()).supportGroupCPs) { + oserver.postMoveServersAndTables(ctx, servers, tables, targetGroup); + } + } + }); + } + + public void preAddRSGroup(final String name) + throws IOException { + execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + @Override + public void call(MasterObserver oserver, + ObserverContext ctx) throws IOException { + if(((MasterEnvironment)ctx.getEnvironment()).supportGroupCPs) { + oserver.preAddRSGroup(ctx, name); + } + } + }); + } + + public void postAddRSGroup(final String name) + throws IOException { + execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + @Override + public void call(MasterObserver oserver, + ObserverContext ctx) throws IOException { + if (((MasterEnvironment) ctx.getEnvironment()).supportGroupCPs) { + oserver.postAddRSGroup(ctx, name); + } + } + }); + } + + public void preRemoveRSGroup(final String name) + throws IOException { + execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + @Override + public void call(MasterObserver oserver, + ObserverContext ctx) throws IOException { + if(((MasterEnvironment)ctx.getEnvironment()).supportGroupCPs) { + oserver.preRemoveRSGroup(ctx, name); + } + } + }); + } + + public void postRemoveRSGroup(final String name) + throws IOException { + execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + @Override + public void call(MasterObserver oserver, + ObserverContext ctx) throws IOException { + if(((MasterEnvironment)ctx.getEnvironment()).supportGroupCPs) { + oserver.postRemoveRSGroup(ctx, name); + } + } + }); + } + + public void preBalanceRSGroup(final String name) + throws IOException { + execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + @Override + public void call(MasterObserver oserver, + ObserverContext ctx) throws IOException { + if(((MasterEnvironment)ctx.getEnvironment()).supportGroupCPs) { + oserver.preBalanceRSGroup(ctx, name); + } + } + }); + } + + public void postBalanceRSGroup(final String name, final boolean balanceRan) + throws IOException { + execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() { + @Override + public void call(MasterObserver oserver, + ObserverContext ctx) throws IOException { + if(((MasterEnvironment)ctx.getEnvironment()).supportGroupCPs) { + oserver.postBalanceRSGroup(ctx, name, balanceRan); + } + } + }); + } private static abstract class CoprocessorOperation extends ObserverContext { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java index 04033160743..7d58070a6eb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java @@ -411,4 +411,9 @@ public interface MasterServices extends Server { public String getRegionServerVersion(final ServerName sn); public void checkIfShouldMoveSystemRegionAsync(); + + /** + * @return load balancer + */ + public LoadBalancer getLoadBalancer(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java index e7277530292..550b98e6934 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java @@ -85,6 +85,7 @@ import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; +import org.apache.hadoop.hbase.net.Address; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.ResponseConverter; @@ -2705,4 +2706,40 @@ public class AccessController extends BaseMasterAndRegionObserver final String namespace, final Quotas quotas) throws IOException { requirePermission("setNamespaceQuota", Action.ADMIN); } + + @Override + public void preMoveServersAndTables(ObserverContext ctx, + Set
servers, Set tables, String targetGroup) throws IOException { + requirePermission("moveServersAndTables", Action.ADMIN); + } + + @Override + public void preMoveServers(ObserverContext ctx, + Set
servers, String targetGroup) throws IOException { + requirePermission("moveServers", Action.ADMIN); + } + + @Override + public void preMoveTables(ObserverContext ctx, + Set tables, String targetGroup) throws IOException { + requirePermission("moveTables", Action.ADMIN); + } + + @Override + public void preAddRSGroup(ObserverContext ctx, + String name) throws IOException { + requirePermission("addRSGroup", Action.ADMIN); + } + + @Override + public void preRemoveRSGroup(ObserverContext ctx, + String name) throws IOException { + requirePermission("removeRSGroup", Action.ADMIN); + } + + @Override + public void preBalanceRSGroup(ObserverContext ctx, + String groupName) throws IOException { + requirePermission("balanceRSGroup", Action.ADMIN); + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java index 452b2a27eb5..b20d7bc3fe0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java @@ -56,6 +56,7 @@ import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.RegionPlan; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; +import org.apache.hadoop.hbase.net.Address; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.RequestConverter; @@ -1278,6 +1279,66 @@ public class TestMasterObserver { public void postSetNamespaceQuota(final ObserverContext ctx, final String namespace, final Quotas quotas) throws IOException { } + + @Override + public void preMoveServers(ObserverContext ctx, + Set
servers, String targetGroup) throws IOException { + } + + @Override + public void postMoveServers(ObserverContext ctx, + Set
servers, String targetGroup) throws IOException { + } + + @Override + public void preMoveTables(ObserverContext ctx, + Set tables, String targetGroup) throws IOException { + } + + @Override + public void postMoveTables(ObserverContext ctx, + Set tables, String targetGroup) throws IOException { + } + + @Override + public void preMoveServersAndTables(ObserverContext ctx, + Set
servers, Set tables, String targetGroup) throws IOException { + } + + @Override + public void postMoveServersAndTables(ObserverContext ctx, + Set
servers, Set tables, String targetGroup) throws IOException { + } + + @Override + public void preAddRSGroup(ObserverContext ctx, String name) + throws IOException { + } + + @Override + public void postAddRSGroup(ObserverContext ctx, String name) + throws IOException { + } + + @Override + public void preRemoveRSGroup(ObserverContext ctx, String name) + throws IOException { + } + + @Override + public void postRemoveRSGroup(ObserverContext ctx, String name) + throws IOException { + } + + @Override + public void preBalanceRSGroup(ObserverContext ctx, + String groupName) throws IOException { + } + + @Override + public void postBalanceRSGroup(ObserverContext ctx, + String groupName, boolean balancerRan) throws IOException { + } } private static HBaseTestingUtility UTIL = new HBaseTestingUtility(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java index f955ac0c2e4..1b12cf0dd5c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java @@ -302,4 +302,9 @@ public class MockNoopMasterServices implements MasterServices, Server { public boolean isStopped() { return false; } + + @Override + public LoadBalancer getLoadBalancer() { + return null; + } } \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java index 4843155533b..78b23c074e0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java @@ -27,12 +27,16 @@ import static org.junit.Assert.fail; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -610,7 +614,7 @@ public class TestAssignmentManagerOnCluster { desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z")); MetaTableAccessor.addRegionToMeta(meta, hri); - MyLoadBalancer.controledRegion = hri.getEncodedName(); + MyLoadBalancer.controledRegion = hri; HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); AssignmentManager am = master.getAssignmentManager(); @@ -633,6 +637,105 @@ public class TestAssignmentManagerOnCluster { } } + /** + * This tests round-robin assignment failed due to no bulkplan + */ + @Test (timeout=60000) + public void testRoundRobinAssignmentFailed() throws Exception { + TableName tableName = TableName.valueOf("testRoundRobinAssignmentFailed"); + try { + HTableDescriptor desc = new HTableDescriptor(tableName); + desc.addFamily(new HColumnDescriptor(FAMILY)); + admin.createTable(desc); + + Table meta = admin.getConnection().getTable(TableName.META_TABLE_NAME); + HRegionInfo hri = new HRegionInfo( + desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z")); + MetaTableAccessor.addRegionToMeta(meta, hri); + + MyLoadBalancer.controledRegion = hri; + + HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); + AssignmentManager am = master.getAssignmentManager(); + // round-robin assignment but balancer cannot find a plan + // assignment should fail + am.assign(Arrays.asList(hri)); + + // if bulk assignment cannot update region state to online + // or failed_open this waits until timeout + assertFalse(am.waitForAssignment(hri)); + RegionState state = am.getRegionStates().getRegionState(hri); + assertEquals(RegionState.State.FAILED_OPEN, state.getState()); + // Failed to open since no plan, so it's on no server + assertNull(state.getServerName()); + + // try again with valid plan + MyLoadBalancer.controledRegion = null; + am.assign(Arrays.asList(hri)); + assertTrue(am.waitForAssignment(hri)); + + ServerName serverName = master.getAssignmentManager(). + getRegionStates().getRegionServerOfRegion(hri); + TEST_UTIL.assertRegionOnServer(hri, serverName, 200); + } finally { + MyLoadBalancer.controledRegion = null; + TEST_UTIL.deleteTable(tableName); + } + } + + /** + * This tests retain assignment failed due to no bulkplan + */ + @Test (timeout=60000) + public void testRetainAssignmentFailed() throws Exception { + TableName tableName = TableName.valueOf("testRetainAssignmentFailed"); + try { + HTableDescriptor desc = new HTableDescriptor(tableName); + desc.addFamily(new HColumnDescriptor(FAMILY)); + admin.createTable(desc); + + Table meta = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME); + HRegionInfo hri = new HRegionInfo( + desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z")); + MetaTableAccessor.addRegionToMeta(meta, hri); + + MyLoadBalancer.controledRegion = hri; + + HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); + AssignmentManager am = master.getAssignmentManager(); + + Map regions = new HashMap(); + ServerName dest = TEST_UTIL.getHBaseCluster().getRegionServer(0).getServerName(); + regions.put(hri, dest); + // retainAssignment but balancer cannot find a plan + // assignment should fail + am.assign(regions); + + // if retain assignment cannot update region state to online + // or failed_open this waits until timeout + assertFalse(am.waitForAssignment(hri)); + RegionState state = am.getRegionStates().getRegionState(hri); + assertEquals(RegionState.State.FAILED_OPEN, state.getState()); + // Failed to open since no plan, so it's on no server + assertNull(state.getServerName()); + + // try retainAssigment again with valid plan + MyLoadBalancer.controledRegion = null; + am.assign(regions); + assertTrue(am.waitForAssignment(hri)); + + ServerName serverName = master.getAssignmentManager(). + getRegionStates().getRegionServerOfRegion(hri); + TEST_UTIL.assertRegionOnServer(hri, serverName, 200); + + // it retains on same server as specified + assertEquals(serverName, dest); + } finally { + MyLoadBalancer.controledRegion = null; + TEST_UTIL.deleteTable(tableName); + } + } + /** * This tests region open failure which is not recoverable */ @@ -1264,7 +1367,7 @@ public class TestAssignmentManagerOnCluster { static class MyLoadBalancer extends StochasticLoadBalancer { // For this region, if specified, always assign to nowhere - static volatile String controledRegion = null; + static volatile HRegionInfo controledRegion = null; static volatile Integer countRegionServers = null; static AtomicInteger counter = new AtomicInteger(0); @@ -1272,7 +1375,7 @@ public class TestAssignmentManagerOnCluster { @Override public ServerName randomAssignment(HRegionInfo regionInfo, List servers) { - if (regionInfo.getEncodedName().equals(controledRegion)) { + if (regionInfo.equals(controledRegion)) { return null; } return super.randomAssignment(regionInfo, servers); @@ -1290,8 +1393,26 @@ public class TestAssignmentManagerOnCluster { return null; } } + if (regions.get(0).equals(controledRegion)) { + Map> m = Maps.newHashMap(); + m.put(LoadBalancer.BOGUS_SERVER_NAME, regions); + return m; + } return super.roundRobinAssignment(regions, servers); } + + @Override + public Map> retainAssignment( + Map regions, List servers) { + for (HRegionInfo hri : regions.keySet()) { + if (hri.equals(controledRegion)) { + Map> m = Maps.newHashMap(); + m.put(LoadBalancer.BOGUS_SERVER_NAME, Lists.newArrayList(regions.keySet())); + return m; + } + } + return super.retainAssignment(regions, servers); + } } public static class MyMaster extends HMaster { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java index 84e20816315..5bcc8d4b3f0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java @@ -491,6 +491,9 @@ public class TestCatalogJanitor { final long nonce) throws IOException { return -1; } + public LoadBalancer getLoadBalancer() { + return null; + } @Override public long disableTable( diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterStatusServlet.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterStatusServlet.java index 5e9b41c0059..94b6531e8b9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterStatusServlet.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterStatusServlet.java @@ -129,7 +129,7 @@ public class TestMasterStatusServlet { setupMockTables(); new MasterStatusTmpl() - .setMetaLocation(ServerName.valueOf("metaserver:123,12345")) + .setMetaLocation(ServerName.valueOf("metaserver,123,12345")) .render(new StringWriter(), master); } @@ -138,16 +138,16 @@ public class TestMasterStatusServlet { setupMockTables(); List servers = Lists.newArrayList( - ServerName.valueOf("rootserver:123,12345"), - ServerName.valueOf("metaserver:123,12345")); + ServerName.valueOf("rootserver,123,12345"), + ServerName.valueOf("metaserver,123,12345")); Set deadServers = new HashSet( Lists.newArrayList( - ServerName.valueOf("badserver:123,12345"), - ServerName.valueOf("uglyserver:123,12345")) + ServerName.valueOf("badserver,123,12345"), + ServerName.valueOf("uglyserver,123,12345")) ); new MasterStatusTmpl() - .setMetaLocation(ServerName.valueOf("metaserver:123,12345")) + .setMetaLocation(ServerName.valueOf("metaserver,123,12345")) .setServers(servers) .setDeadServers(deadServers) .render(new StringWriter(), master); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java index fad2d33e636..81ecdcb1e4b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java @@ -269,7 +269,7 @@ public class TestSimpleRegionNormalizer { masterRpcServices = Mockito.mock(MasterRpcServices.class, RETURNS_DEEP_STUBS); // for simplicity all regions are assumed to be on one server; doesn't matter to us - ServerName sn = ServerName.valueOf("localhost", -1, 1L); + ServerName sn = ServerName.valueOf("localhost", 0, 1L); when(masterServices.getAssignmentManager().getRegionStates(). getRegionsOfTable(any(TableName.class))).thenReturn(hris); when(masterServices.getAssignmentManager().getRegionStates(). diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java index eff0e687fe8..5aedbf86474 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java @@ -2965,4 +2965,79 @@ public class TestAccessController extends SecureTestUtil { TEST_UTIL.deleteTable(tname); } } + + @Test + public void testMoveServers() throws Exception { + AccessTestAction action1 = new AccessTestAction() { + @Override + public Object run() throws Exception { + ACCESS_CONTROLLER.preMoveServers(ObserverContext.createAndPrepare(CP_ENV, null), + null, null); + return null; + } + }; + + verifyAllowed(action1, SUPERUSER, USER_ADMIN); + verifyDenied(action1, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER); + } + + @Test + public void testMoveTables() throws Exception { + AccessTestAction action1 = new AccessTestAction() { + @Override + public Object run() throws Exception { + ACCESS_CONTROLLER.preMoveTables(ObserverContext.createAndPrepare(CP_ENV, null), + null, null); + return null; + } + }; + + verifyAllowed(action1, SUPERUSER, USER_ADMIN); + verifyDenied(action1, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER); + } + + @Test + public void testAddGroup() throws Exception { + AccessTestAction action1 = new AccessTestAction() { + @Override + public Object run() throws Exception { + ACCESS_CONTROLLER.preAddRSGroup(ObserverContext.createAndPrepare(CP_ENV, null), + null); + return null; + } + }; + + verifyAllowed(action1, SUPERUSER, USER_ADMIN); + verifyDenied(action1, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER); + } + + @Test + public void testRemoveGroup() throws Exception { + AccessTestAction action1 = new AccessTestAction() { + @Override + public Object run() throws Exception { + ACCESS_CONTROLLER.preRemoveRSGroup(ObserverContext.createAndPrepare(CP_ENV, null), + null); + return null; + } + }; + + verifyAllowed(action1, SUPERUSER, USER_ADMIN); + verifyDenied(action1, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER); + } + + @Test + public void testBalanceGroup() throws Exception { + AccessTestAction action1 = new AccessTestAction() { + @Override + public Object run() throws Exception { + ACCESS_CONTROLLER.preBalanceRSGroup(ObserverContext.createAndPrepare(CP_ENV, null), + null); + return null; + } + }; + + verifyAllowed(action1, SUPERUSER, USER_ADMIN); + verifyDenied(action1, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER); + } } diff --git a/hbase-shell/pom.xml b/hbase-shell/pom.xml index a2a1d0c5a08..44b6095b0b7 100644 --- a/hbase-shell/pom.xml +++ b/hbase-shell/pom.xml @@ -254,6 +254,41 @@ + + rsgroup + + + !skip-rsgroup + + + + + org.apache.hbase + hbase-rsgroup + + + + + + org.codehaus.mojo + build-helper-maven-plugin + + + add-test-source + + add-test-source + + + + src/test/rsgroup + + + + + + + + skipShellTests diff --git a/hbase-shell/src/main/ruby/hbase.rb b/hbase-shell/src/main/ruby/hbase.rb index 0fa1649e9e1..13794372ef3 100644 --- a/hbase-shell/src/main/ruby/hbase.rb +++ b/hbase-shell/src/main/ruby/hbase.rb @@ -107,6 +107,7 @@ require 'hbase/quotas' require 'hbase/replication_admin' require 'hbase/security' require 'hbase/visibility_labels' +require 'hbase/rsgroup_admin' include HBaseQuotasConstants diff --git a/hbase-shell/src/main/ruby/hbase/hbase.rb b/hbase-shell/src/main/ruby/hbase/hbase.rb index e0243ceaa97..52349753ac2 100644 --- a/hbase-shell/src/main/ruby/hbase/hbase.rb +++ b/hbase-shell/src/main/ruby/hbase/hbase.rb @@ -54,6 +54,10 @@ module Hbase ::Hbase::TaskMonitor.new(configuration) end + def rsgroup_admin() + ::Hbase::RSGroupAdmin.new(@connection) + end + # Create new one each time def table(table, shell) ::Hbase::Table.new(@connection.getTable(table), shell) diff --git a/hbase-shell/src/main/ruby/hbase/rsgroup_admin.rb b/hbase-shell/src/main/ruby/hbase/rsgroup_admin.rb new file mode 100644 index 00000000000..0b72fd8ff56 --- /dev/null +++ b/hbase-shell/src/main/ruby/hbase/rsgroup_admin.rb @@ -0,0 +1,164 @@ +# +# Copyright The Apache Software Foundation +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +include Java +java_import org.apache.hadoop.hbase.util.Pair + +# Wrapper for org.apache.hadoop.hbase.group.GroupAdminClient +# Which is an API to manage region server groups + +module Hbase + class RSGroupAdmin + include HBaseConstants + + def initialize(connection) + @connection = connection + @admin = org.apache.hadoop.hbase.rsgroup.RSGroupAdminClient.new(connection) + end + + def close + @admin.close + end + + #-------------------------------------------------------------------------- + # Returns a list of groups in hbase + def list_rs_groups + @admin.listRSGroups.map { |g| g.getName } + end + + #-------------------------------------------------------------------------- + # get a group's information + def get_rsgroup(group_name) + group = @admin.getRSGroupInfo(group_name) + if group.nil? + raise(ArgumentError, 'Group does not exist: ' + group_name) + end + + res = {} + if block_given? + yield('Servers:') + end + + servers = [] + group.getServers.each do |v| + if block_given? + yield(v.toString) + else + servers << v.toString + end + end + res[:servers] = servers + + tables = [] + if block_given? + yield('Tables:') + end + group.getTables.each do |v| + if block_given? + yield(v.toString) + else + tables << v.toString + end + end + res[:tables] = tables + + if !block_given? + res + else + nil + end + end + + #-------------------------------------------------------------------------- + # add a group + def add_rs_group(group_name) + @admin.addRSGroup(group_name) + end + + #-------------------------------------------------------------------------- + # remove a group + def remove_rs_group(group_name) + @admin.removeRSGroup(group_name) + end + + #-------------------------------------------------------------------------- + # balance a group + def balance_rs_group(group_name) + @admin.balanceRSGroup(group_name) + end + + #-------------------------------------------------------------------------- + # move server to a group + def move_servers(dest, *args) + servers = java.util.HashSet.new + args[0].each do |s| + servers.add(org.apache.hadoop.hbase.net.Address.fromString(s)) + end + @admin.moveServers(servers, dest) + end + + #-------------------------------------------------------------------------- + # move server to a group + def move_tables(dest, *args) + tables = java.util.HashSet.new; + args[0].each do |s| + tables.add(org.apache.hadoop.hbase.TableName.valueOf(s)) + end + @admin.moveTables(tables, dest) + end + + #-------------------------------------------------------------------------- + # get group of server + def get_rsgroup_of_server(server) + res = @admin.getRSGroupOfServer( + org.apache.hadoop.hbase.net.Address.fromString(server)) + if res.nil? + raise(ArgumentError,'Server has no group: ' + server) + end + res + end + + #-------------------------------------------------------------------------- + # get group of table + def get_rsgroup_of_table(table) + res = @admin.getRSGroupInfoOfTable( + org.apache.hadoop.hbase.TableName.valueOf(table)) + if res.nil? + raise(ArgumentError,'Table has no group: ' + table) + end + res + end + + #-------------------------------------------------------------------------- + # move server and table to a group + def move_servers_tables(dest, *args) + servers = java.util.HashSet.new + tables = java.util.HashSet.new; + args[0].each do |s| + servers.add(org.apache.hadoop.hbase.net.Address.fromString(s)) + end + args[1].each do |t| + tables.add(org.apache.hadoop.hbase.TableName.valueOf(t)) + end + @admin.moveServersAndTables(servers, tables, dest) + end + + end +end diff --git a/hbase-shell/src/main/ruby/shell.rb b/hbase-shell/src/main/ruby/shell.rb index 433ed10dc56..2eb872c3847 100644 --- a/hbase-shell/src/main/ruby/shell.rb +++ b/hbase-shell/src/main/ruby/shell.rb @@ -109,6 +109,10 @@ module Shell @hbase_quotas_admin ||= hbase.quotas_admin() end + def hbase_rsgroup_admin + @rsgroup_admin ||= hbase.rsgroup_admin() + end + def export_commands(where) ::Shell.commands.keys.each do |cmd| # here where is the IRB namespace @@ -444,3 +448,21 @@ Shell.load_command_group( set_visibility ] ) + +Shell.load_command_group( + 'rsgroup', + :full_name => 'RSGroups', + :comment => "NOTE: Above commands are only applicable if running with the Groups setup", + :commands => %w[ + list_rsgroups + get_rsgroup + add_rsgroup + remove_rsgroup + balance_rsgroup + move_servers_rsgroup + move_tables_rsgroup + move_servers_tables_rsgroup + get_server_rsgroup + get_table_rsgroup + ] +) diff --git a/hbase-shell/src/main/ruby/shell/commands.rb b/hbase-shell/src/main/ruby/shell/commands.rb index d580f5e7952..102a6e190fc 100644 --- a/hbase-shell/src/main/ruby/shell/commands.rb +++ b/hbase-shell/src/main/ruby/shell/commands.rb @@ -81,6 +81,10 @@ module Shell @shell.hbase_quotas_admin end + def rsgroup_admin + @shell.hbase_rsgroup_admin + end + #---------------------------------------------------------------------- # Creates formatter instance first time and then reuses it. def formatter diff --git a/hbase-shell/src/main/ruby/shell/commands/add_rsgroup.rb b/hbase-shell/src/main/ruby/shell/commands/add_rsgroup.rb new file mode 100644 index 00000000000..5a42e27b886 --- /dev/null +++ b/hbase-shell/src/main/ruby/shell/commands/add_rsgroup.rb @@ -0,0 +1,39 @@ +# +# Copyright The Apache Software Foundation +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Shell + module Commands + class AddRsgroup < Command + def help + return <<-EOF +Create a new region server group. + +Example: + + hbase> add_rsgroup 'my_group' +EOF + end + + def command(group_name) + rsgroup_admin.add_rs_group(group_name) + end + end + end +end diff --git a/hbase-shell/src/main/ruby/shell/commands/balance_rsgroup.rb b/hbase-shell/src/main/ruby/shell/commands/balance_rsgroup.rb new file mode 100644 index 00000000000..bee139fdade --- /dev/null +++ b/hbase-shell/src/main/ruby/shell/commands/balance_rsgroup.rb @@ -0,0 +1,37 @@ +# +# Copyright The Apache Software Foundation +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Shell + module Commands + class BalanceRsgroup < Command + def help + return <<-EOF +Balance a region server group + + hbase> balance_rsgroup 'my_group' +EOF + end + + def command(group_name) + rsgroup_admin.balance_rs_group(group_name) + end + end + end +end diff --git a/hbase-shell/src/main/ruby/shell/commands/get_rsgroup.rb b/hbase-shell/src/main/ruby/shell/commands/get_rsgroup.rb new file mode 100644 index 00000000000..ad8a0e328c7 --- /dev/null +++ b/hbase-shell/src/main/ruby/shell/commands/get_rsgroup.rb @@ -0,0 +1,43 @@ +# +# Copyright The Apache Software Foundation +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Shell + module Commands + class GetRsgroup < Command + def help + return <<-EOF +Get a region server group's information. + +Example: + + hbase> get_rsgroup 'default' +EOF + end + + def command(group_name) + formatter.header(['GROUP INFORMATION']) + rsgroup_admin.get_rsgroup(group_name) do |s| + formatter.row([s]) + end + formatter.footer() + end + end + end +end diff --git a/hbase-shell/src/main/ruby/shell/commands/get_server_rsgroup.rb b/hbase-shell/src/main/ruby/shell/commands/get_server_rsgroup.rb new file mode 100644 index 00000000000..9884cd18b01 --- /dev/null +++ b/hbase-shell/src/main/ruby/shell/commands/get_server_rsgroup.rb @@ -0,0 +1,39 @@ +# +# Copyright The Apache Software Foundation +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Shell + module Commands + class GetServerRsgroup < Command + def help + return <<-EOF +Get the group name the given region server is a member of. + + hbase> get_server_rsgroup 'server1:port1' +EOF + end + + def command(server) + group_name = rsgroup_admin.get_rsgroup_of_server(server).getName + formatter.row([group_name]) + formatter.footer(1) + end + end + end +end diff --git a/hbase-shell/src/main/ruby/shell/commands/get_table_rsgroup.rb b/hbase-shell/src/main/ruby/shell/commands/get_table_rsgroup.rb new file mode 100644 index 00000000000..650cda595d0 --- /dev/null +++ b/hbase-shell/src/main/ruby/shell/commands/get_table_rsgroup.rb @@ -0,0 +1,40 @@ +# +# Copyright The Apache Software Foundation +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Shell + module Commands + class GetTableRsgroup < Command + def help + return <<-EOF +Get the group name the given table is a member of. + + hbase> get_table_rsgroup 'myTable' +EOF + end + + def command(table) + group_name = + rsgroup_admin.get_rsgroup_of_table(table).getName + formatter.row([group_name]) + formatter.footer(1) + end + end + end +end diff --git a/hbase-shell/src/main/ruby/shell/commands/list_rsgroups.rb b/hbase-shell/src/main/ruby/shell/commands/list_rsgroups.rb new file mode 100644 index 00000000000..cabe84b37b9 --- /dev/null +++ b/hbase-shell/src/main/ruby/shell/commands/list_rsgroups.rb @@ -0,0 +1,49 @@ +# +# Copyright The Apache Software Foundation +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Shell + module Commands + class ListRsgroups < Command + def help + return <<-EOF +List all region server groups. Optional regular expression parameter could +be used to filter the output. + +Example: + + hbase> list_rsgroups + hbase> list_rsgroups 'abc.*' +EOF + end + + def command(regex = '.*') + formatter.header(['GROUPS']) + + regex = /#{regex}/ unless regex.is_a?(Regexp) + list = rsgroup_admin.list_rs_groups.grep(regex) + list.each do |group| + formatter.row([group]) + end + + formatter.footer(list.size) + end + end + end +end diff --git a/hbase-shell/src/main/ruby/shell/commands/move_servers_rsgroup.rb b/hbase-shell/src/main/ruby/shell/commands/move_servers_rsgroup.rb new file mode 100644 index 00000000000..1d36f8f1bd0 --- /dev/null +++ b/hbase-shell/src/main/ruby/shell/commands/move_servers_rsgroup.rb @@ -0,0 +1,37 @@ +# +# Copyright The Apache Software Foundation +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Shell + module Commands + class MoveServersRsgroup < Command + def help + return <<-EOF +Reassign a region server from one group to another. + + hbase> move_servers_rsgroup 'dest',['server1:port','server2:port'] +EOF + end + + def command(dest, servers) + rsgroup_admin.move_servers(dest, servers) + end + end + end +end diff --git a/hbase-shell/src/main/ruby/shell/commands/move_servers_tables_rsgroup.rb b/hbase-shell/src/main/ruby/shell/commands/move_servers_tables_rsgroup.rb new file mode 100644 index 00000000000..533714184ae --- /dev/null +++ b/hbase-shell/src/main/ruby/shell/commands/move_servers_tables_rsgroup.rb @@ -0,0 +1,37 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Shell + module Commands + class MoveServersTablesRsgroup < Command + def help + return <<-EOF +Reassign RegionServers and Tables from one group to another. + +Example: + + hbase> move_servers_tables_rsgroup 'dest',['server1:port','server2:port'],['table1','table2'] + +EOF + end + + def command(dest, servers, tables) + rsgroup_admin.move_servers_tables(dest, servers, tables) + end + end + end +end diff --git a/hbase-shell/src/main/ruby/shell/commands/move_tables_rsgroup.rb b/hbase-shell/src/main/ruby/shell/commands/move_tables_rsgroup.rb new file mode 100644 index 00000000000..5d3a75c0bf2 --- /dev/null +++ b/hbase-shell/src/main/ruby/shell/commands/move_tables_rsgroup.rb @@ -0,0 +1,37 @@ +# +# Copyright The Apache Software Foundation +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Shell + module Commands + class MoveTablesRsgroup < Command + def help + return <<-EOF +Reassign tables from one group to another. + + hbase> move_tables_rsgroup 'dest',['table1','table2'] +EOF + end + + def command(dest, tables) + rsgroup_admin.move_tables(dest, tables) + end + end + end +end diff --git a/hbase-shell/src/main/ruby/shell/commands/remove_rsgroup.rb b/hbase-shell/src/main/ruby/shell/commands/remove_rsgroup.rb new file mode 100644 index 00000000000..94077322505 --- /dev/null +++ b/hbase-shell/src/main/ruby/shell/commands/remove_rsgroup.rb @@ -0,0 +1,37 @@ +# +# Copyright The Apache Software Foundation +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +module Shell + module Commands + class RemoveRsgroup < Command + def help + return <<-EOF +Remove a group. + + hbase> remove_rsgroup 'my_group' +EOF + end + + def command(group_name) + rsgroup_admin.remove_rs_group(group_name) + end + end + end +end diff --git a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestShell.java b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestShell.java index 976ba45078d..882b811cf55 100644 --- a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestShell.java +++ b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestShell.java @@ -31,7 +31,7 @@ public class TestShell extends AbstractTestShell { @Test public void testRunShellTests() throws IOException { - System.setProperty("shell.test.exclude", "replication_admin_test.rb"); + System.setProperty("shell.test.exclude", "replication_admin_test.rb,rsgroup_shell_test.rb"); // Start all ruby tests jruby.runScriptlet(PathType.ABSOLUTE, "src/test/ruby/tests_runner.rb"); } diff --git a/hbase-shell/src/test/rsgroup/org/apache/hadoop/hbase/client/rsgroup/TestShellRSGroups.java b/hbase-shell/src/test/rsgroup/org/apache/hadoop/hbase/client/rsgroup/TestShellRSGroups.java new file mode 100644 index 00000000000..be23a59b80d --- /dev/null +++ b/hbase-shell/src/test/rsgroup/org/apache/hadoop/hbase/client/rsgroup/TestShellRSGroups.java @@ -0,0 +1,111 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client.rsgroup; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; +import org.apache.hadoop.hbase.rsgroup.RSGroupAdminEndpoint; +import org.apache.hadoop.hbase.rsgroup.RSGroupBasedLoadBalancer; +import org.apache.hadoop.hbase.security.access.SecureTestUtil; +import org.apache.hadoop.hbase.security.visibility.VisibilityTestUtil; +import org.apache.hadoop.hbase.testclassification.ClientTests; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.jruby.embed.PathType; +import org.jruby.embed.ScriptingContainer; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +//Separate Shell test class for Groups +//Since we need to use a different balancer and run more than 1 RS +@Category({ClientTests.class, LargeTests.class}) +public class TestShellRSGroups { + final Log LOG = LogFactory.getLog(getClass()); + private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + private final static ScriptingContainer jruby = new ScriptingContainer(); + private static String basePath; + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + basePath = System.getProperty("basedir"); + + // Start mini cluster + TEST_UTIL.getConfiguration().setBoolean("hbase.online.schema.update.enable", true); + TEST_UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100); + TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 250); + TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6); + TEST_UTIL.getConfiguration().setBoolean(CoprocessorHost.ABORT_ON_ERROR_KEY, false); + TEST_UTIL.getConfiguration().setInt("hfile.format.version", 3); + TEST_UTIL.getConfiguration().setInt(HConstants.MASTER_INFO_PORT, -1); + TEST_UTIL.getConfiguration().setInt(HConstants.REGIONSERVER_INFO_PORT, -1); + // Security setup configuration + SecureTestUtil.enableSecurity(TEST_UTIL.getConfiguration()); + VisibilityTestUtil.enableVisiblityLabels(TEST_UTIL.getConfiguration()); + + //Setup RegionServer Groups + TEST_UTIL.getConfiguration().set( + HConstants.HBASE_MASTER_LOADBALANCER_CLASS, + RSGroupBasedLoadBalancer.class.getName()); + TEST_UTIL.getConfiguration().set( + CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, + RSGroupAdminEndpoint.class.getName()); + TEST_UTIL.getConfiguration().setBoolean( + HConstants.ZOOKEEPER_USEMULTI, + true); + + TEST_UTIL.startMiniCluster(1,4); + + // Configure jruby runtime + List loadPaths = new ArrayList(); + loadPaths.add(basePath+"/src/main/ruby"); + loadPaths.add(basePath+"/src/test/ruby"); + jruby.getProvider().setLoadPaths(loadPaths); + jruby.put("$TEST_CLUSTER", TEST_UTIL); + System.setProperty("jruby.jit.logging.verbose", "true"); + System.setProperty("jruby.jit.logging", "true"); + System.setProperty("jruby.native.verbose", "true"); + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + @Test + public void testRunShellTests() throws IOException { + try { + // Start only GroupShellTest + System.setProperty("shell.test", "Hbase::RSGroupShellTest"); + jruby.runScriptlet(PathType.ABSOLUTE, + basePath + "/src/test/ruby/tests_runner.rb"); + } finally { + System.clearProperty("shell.test"); + } + } + +} + diff --git a/hbase-shell/src/test/ruby/shell/rsgroup_shell_test.rb b/hbase-shell/src/test/ruby/shell/rsgroup_shell_test.rb new file mode 100644 index 00000000000..7542e6e83c5 --- /dev/null +++ b/hbase-shell/src/test/ruby/shell/rsgroup_shell_test.rb @@ -0,0 +1,96 @@ +# +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +require 'hbase' +require 'shell' + +module Hbase + class RSGroupShellTest < Test::Unit::TestCase + def setup + @hbase = ::Hbase::Hbase.new($TEST_CLUSTER.getConfiguration) + @shell = Shell::Shell.new(@hbase) + connection = $TEST_CLUSTER.getConnection + @rsgroup_admin = + org.apache.hadoop.hbase.rsgroup.RSGroupAdminClient.new(connection) + end + + define_test 'Test Basic RSGroup Commands' do + group_name = 'test_group' + table_name = 'test_table' + + @shell.command('create', table_name, 'f') + + @shell.command('add_rsgroup', group_name) + assert_not_nil(@rsgroup_admin.getRSGroupInfo(group_name)) + + @shell.command('remove_rsgroup', group_name) + assert_nil(@rsgroup_admin.getRSGroupInfo(group_name)) + + @shell.command('add_rsgroup', group_name) + group = @rsgroup_admin.getRSGroupInfo(group_name) + assert_not_nil(group) + assert_equal(0, group.getServers.count) + + address = @rsgroup_admin.getRSGroupInfo('default').getServers.iterator.next + @shell.command('get_rsgroup', 'default') + addressStr = address.toString + @shell.command('get_server_rsgroup', [addressStr]) + @shell.command('move_servers_rsgroup', + group_name, + [addressStr]) + assert_equal(1, @rsgroup_admin.getRSGroupInfo(group_name).getServers.count) + assert_equal(group_name, @rsgroup_admin.getRSGroupOfServer(address).getName) + + @shell.command('move_tables_rsgroup', + group_name, + [table_name]) + assert_equal(1, @rsgroup_admin.getRSGroupInfo(group_name).getTables.count) + + count = 0 + @hbase.rsgroup_admin.get_rsgroup(group_name) do |line| + case count + when 1 + assert_equal(addressStr, line) + when 3 + assert_equal(table_name, line) + end + count += 1 + end + assert_equal(4, count) + + assert_equal(2, @hbase.rsgroup_admin.list_rs_groups.count) + + # just run it to verify jruby->java api binding + @hbase.rsgroup_admin.balance_rs_group(group_name) + end + + # we test exceptions that could be thrown by the ruby wrappers + define_test 'Test bogus arguments' do + assert_raise(ArgumentError) do + @hbase.rsgroup_admin.get_rsgroup('foobar') + end + assert_raise(ArgumentError) do + @hbase.rsgroup_admin.get_rsgroup_of_server('foobar:123') + end + assert_raise(ArgumentError) do + @hbase.rsgroup_admin.get_rsgroup_of_table('foobar') + end + end + end +end diff --git a/hbase-shell/src/test/ruby/test_helper.rb b/hbase-shell/src/test/ruby/test_helper.rb index 179ee5bed48..61015b7e0f3 100644 --- a/hbase-shell/src/test/ruby/test_helper.rb +++ b/hbase-shell/src/test/ruby/test_helper.rb @@ -78,6 +78,10 @@ module Hbase @shell.hbase_replication_admin end + def group_admin + @shell.hbase_group_admin + end + def create_test_table(name) # Create the table if needed unless admin.exists?(name) diff --git a/pom.xml b/pom.xml index 97acf43b860..0e1910d711b 100644 --- a/pom.xml +++ b/pom.xml @@ -1420,6 +1420,18 @@ test-jar test + + hbase-rsgroup + org.apache.hbase + ${project.version} + + + hbase-rsgroup + org.apache.hbase + ${project.version} + test-jar + test + hbase-server org.apache.hbase @@ -1935,6 +1947,17 @@ --> + + rsgroup + + + !skip-rsgroup + + + + hbase-rsgroup + + build-with-jdk8