diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupAdminProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupAdminProtos.java
index 3d2285c3a13..2d8415c9f9d 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupAdminProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RSGroupAdminProtos.java
@@ -12369,6 +12369,1065 @@ public final class RSGroupAdminProtos {
// @@protoc_insertion_point(class_scope:hbase.pb.MoveServersAndTablesResponse)
}
+ public interface RemoveServersRequestOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // repeated .hbase.pb.ServerName servers = 1;
+ /**
+ * repeated .hbase.pb.ServerName servers = 1;
+ */
+ java.util.List
+ getServersList();
+ /**
+ * repeated .hbase.pb.ServerName servers = 1;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServers(int index);
+ /**
+ * repeated .hbase.pb.ServerName servers = 1;
+ */
+ int getServersCount();
+ /**
+ * repeated .hbase.pb.ServerName servers = 1;
+ */
+ java.util.List extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
+ getServersOrBuilderList();
+ /**
+ * repeated .hbase.pb.ServerName servers = 1;
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServersOrBuilder(
+ int index);
+ }
+ /**
+ * Protobuf type {@code hbase.pb.RemoveServersRequest}
+ */
+ public static final class RemoveServersRequest extends
+ com.google.protobuf.GeneratedMessage
+ implements RemoveServersRequestOrBuilder {
+ // Use RemoveServersRequest.newBuilder() to construct.
+ private RemoveServersRequest(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private RemoveServersRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final RemoveServersRequest defaultInstance;
+ public static RemoveServersRequest getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public RemoveServersRequest getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private RemoveServersRequest(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
+ servers_ = new java.util.ArrayList();
+ mutable_bitField0_ |= 0x00000001;
+ }
+ servers_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry));
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
+ servers_ = java.util.Collections.unmodifiableList(servers_);
+ }
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_RemoveServersRequest_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_RemoveServersRequest_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersRequest.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersRequest.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public RemoveServersRequest parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new RemoveServersRequest(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ // repeated .hbase.pb.ServerName servers = 1;
+ public static final int SERVERS_FIELD_NUMBER = 1;
+ private java.util.List servers_;
+ /**
+ * repeated .hbase.pb.ServerName servers = 1;
+ */
+ public java.util.List getServersList() {
+ return servers_;
+ }
+ /**
+ * repeated .hbase.pb.ServerName servers = 1;
+ */
+ public java.util.List extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
+ getServersOrBuilderList() {
+ return servers_;
+ }
+ /**
+ * repeated .hbase.pb.ServerName servers = 1;
+ */
+ public int getServersCount() {
+ return servers_.size();
+ }
+ /**
+ * repeated .hbase.pb.ServerName servers = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServers(int index) {
+ return servers_.get(index);
+ }
+ /**
+ * repeated .hbase.pb.ServerName servers = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServersOrBuilder(
+ int index) {
+ return servers_.get(index);
+ }
+
+ private void initFields() {
+ servers_ = java.util.Collections.emptyList();
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ for (int i = 0; i < getServersCount(); i++) {
+ if (!getServers(i).isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ for (int i = 0; i < servers_.size(); i++) {
+ output.writeMessage(1, servers_.get(i));
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ for (int i = 0; i < servers_.size(); i++) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(1, servers_.get(i));
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersRequest)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersRequest other = (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersRequest) obj;
+
+ boolean result = true;
+ result = result && getServersList()
+ .equals(other.getServersList());
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (getServersCount() > 0) {
+ hash = (37 * hash) + SERVERS_FIELD_NUMBER;
+ hash = (53 * hash) + getServersList().hashCode();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersRequest parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersRequest parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersRequest parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersRequest parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersRequest parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersRequest parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersRequest parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersRequest parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersRequest parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersRequest parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersRequest prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code hbase.pb.RemoveServersRequest}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersRequestOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_RemoveServersRequest_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_RemoveServersRequest_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersRequest.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersRequest.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersRequest.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getServersFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ if (serversBuilder_ == null) {
+ servers_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000001);
+ } else {
+ serversBuilder_.clear();
+ }
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_RemoveServersRequest_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersRequest getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersRequest.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersRequest build() {
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersRequest result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersRequest buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersRequest result = new org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersRequest(this);
+ int from_bitField0_ = bitField0_;
+ if (serversBuilder_ == null) {
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ servers_ = java.util.Collections.unmodifiableList(servers_);
+ bitField0_ = (bitField0_ & ~0x00000001);
+ }
+ result.servers_ = servers_;
+ } else {
+ result.servers_ = serversBuilder_.build();
+ }
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersRequest) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersRequest)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersRequest other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersRequest.getDefaultInstance()) return this;
+ if (serversBuilder_ == null) {
+ if (!other.servers_.isEmpty()) {
+ if (servers_.isEmpty()) {
+ servers_ = other.servers_;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ } else {
+ ensureServersIsMutable();
+ servers_.addAll(other.servers_);
+ }
+ onChanged();
+ }
+ } else {
+ if (!other.servers_.isEmpty()) {
+ if (serversBuilder_.isEmpty()) {
+ serversBuilder_.dispose();
+ serversBuilder_ = null;
+ servers_ = other.servers_;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ serversBuilder_ =
+ com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
+ getServersFieldBuilder() : null;
+ } else {
+ serversBuilder_.addAllMessages(other.servers_);
+ }
+ }
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ for (int i = 0; i < getServersCount(); i++) {
+ if (!getServers(i).isInitialized()) {
+
+ return false;
+ }
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersRequest parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersRequest) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // repeated .hbase.pb.ServerName servers = 1;
+ private java.util.List servers_ =
+ java.util.Collections.emptyList();
+ private void ensureServersIsMutable() {
+ if (!((bitField0_ & 0x00000001) == 0x00000001)) {
+ servers_ = new java.util.ArrayList(servers_);
+ bitField0_ |= 0x00000001;
+ }
+ }
+
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> serversBuilder_;
+
+ /**
+ * repeated .hbase.pb.ServerName servers = 1;
+ */
+ public java.util.List getServersList() {
+ if (serversBuilder_ == null) {
+ return java.util.Collections.unmodifiableList(servers_);
+ } else {
+ return serversBuilder_.getMessageList();
+ }
+ }
+ /**
+ * repeated .hbase.pb.ServerName servers = 1;
+ */
+ public int getServersCount() {
+ if (serversBuilder_ == null) {
+ return servers_.size();
+ } else {
+ return serversBuilder_.getCount();
+ }
+ }
+ /**
+ * repeated .hbase.pb.ServerName servers = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServers(int index) {
+ if (serversBuilder_ == null) {
+ return servers_.get(index);
+ } else {
+ return serversBuilder_.getMessage(index);
+ }
+ }
+ /**
+ * repeated .hbase.pb.ServerName servers = 1;
+ */
+ public Builder setServers(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
+ if (serversBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureServersIsMutable();
+ servers_.set(index, value);
+ onChanged();
+ } else {
+ serversBuilder_.setMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.ServerName servers = 1;
+ */
+ public Builder setServers(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) {
+ if (serversBuilder_ == null) {
+ ensureServersIsMutable();
+ servers_.set(index, builderForValue.build());
+ onChanged();
+ } else {
+ serversBuilder_.setMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.ServerName servers = 1;
+ */
+ public Builder addServers(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
+ if (serversBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureServersIsMutable();
+ servers_.add(value);
+ onChanged();
+ } else {
+ serversBuilder_.addMessage(value);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.ServerName servers = 1;
+ */
+ public Builder addServers(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
+ if (serversBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureServersIsMutable();
+ servers_.add(index, value);
+ onChanged();
+ } else {
+ serversBuilder_.addMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.ServerName servers = 1;
+ */
+ public Builder addServers(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) {
+ if (serversBuilder_ == null) {
+ ensureServersIsMutable();
+ servers_.add(builderForValue.build());
+ onChanged();
+ } else {
+ serversBuilder_.addMessage(builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.ServerName servers = 1;
+ */
+ public Builder addServers(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) {
+ if (serversBuilder_ == null) {
+ ensureServersIsMutable();
+ servers_.add(index, builderForValue.build());
+ onChanged();
+ } else {
+ serversBuilder_.addMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.ServerName servers = 1;
+ */
+ public Builder addAllServers(
+ java.lang.Iterable extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName> values) {
+ if (serversBuilder_ == null) {
+ ensureServersIsMutable();
+ super.addAll(values, servers_);
+ onChanged();
+ } else {
+ serversBuilder_.addAllMessages(values);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.ServerName servers = 1;
+ */
+ public Builder clearServers() {
+ if (serversBuilder_ == null) {
+ servers_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000001);
+ onChanged();
+ } else {
+ serversBuilder_.clear();
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.ServerName servers = 1;
+ */
+ public Builder removeServers(int index) {
+ if (serversBuilder_ == null) {
+ ensureServersIsMutable();
+ servers_.remove(index);
+ onChanged();
+ } else {
+ serversBuilder_.remove(index);
+ }
+ return this;
+ }
+ /**
+ * repeated .hbase.pb.ServerName servers = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getServersBuilder(
+ int index) {
+ return getServersFieldBuilder().getBuilder(index);
+ }
+ /**
+ * repeated .hbase.pb.ServerName servers = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServersOrBuilder(
+ int index) {
+ if (serversBuilder_ == null) {
+ return servers_.get(index); } else {
+ return serversBuilder_.getMessageOrBuilder(index);
+ }
+ }
+ /**
+ * repeated .hbase.pb.ServerName servers = 1;
+ */
+ public java.util.List extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
+ getServersOrBuilderList() {
+ if (serversBuilder_ != null) {
+ return serversBuilder_.getMessageOrBuilderList();
+ } else {
+ return java.util.Collections.unmodifiableList(servers_);
+ }
+ }
+ /**
+ * repeated .hbase.pb.ServerName servers = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder addServersBuilder() {
+ return getServersFieldBuilder().addBuilder(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance());
+ }
+ /**
+ * repeated .hbase.pb.ServerName servers = 1;
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder addServersBuilder(
+ int index) {
+ return getServersFieldBuilder().addBuilder(
+ index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance());
+ }
+ /**
+ * repeated .hbase.pb.ServerName servers = 1;
+ */
+ public java.util.List
+ getServersBuilderList() {
+ return getServersFieldBuilder().getBuilderList();
+ }
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
+ getServersFieldBuilder() {
+ if (serversBuilder_ == null) {
+ serversBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>(
+ servers_,
+ ((bitField0_ & 0x00000001) == 0x00000001),
+ getParentForChildren(),
+ isClean());
+ servers_ = null;
+ }
+ return serversBuilder_;
+ }
+
+ // @@protoc_insertion_point(builder_scope:hbase.pb.RemoveServersRequest)
+ }
+
+ static {
+ defaultInstance = new RemoveServersRequest(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:hbase.pb.RemoveServersRequest)
+ }
+
+ public interface RemoveServersResponseOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+ }
+ /**
+ * Protobuf type {@code hbase.pb.RemoveServersResponse}
+ */
+ public static final class RemoveServersResponse extends
+ com.google.protobuf.GeneratedMessage
+ implements RemoveServersResponseOrBuilder {
+ // Use RemoveServersResponse.newBuilder() to construct.
+ private RemoveServersResponse(com.google.protobuf.GeneratedMessage.Builder> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private RemoveServersResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final RemoveServersResponse defaultInstance;
+ public static RemoveServersResponse getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public RemoveServersResponse getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private RemoveServersResponse(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_RemoveServersResponse_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_RemoveServersResponse_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersResponse.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersResponse.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ public RemoveServersResponse parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new RemoveServersResponse(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ private void initFields() {
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersResponse)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersResponse other = (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersResponse) obj;
+
+ boolean result = true;
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersResponse parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersResponse parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersResponse parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersResponse parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersResponse parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersResponse parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersResponse parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersResponse parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersResponse parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersResponse parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersResponse prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code hbase.pb.RemoveServersResponse}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder
+ implements org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersResponseOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_RemoveServersResponse_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_RemoveServersResponse_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersResponse.class, org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersResponse.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersResponse.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.internal_static_hbase_pb_RemoveServersResponse_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersResponse getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersResponse.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersResponse build() {
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersResponse result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersResponse buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersResponse result = new org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersResponse(this);
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersResponse) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersResponse)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersResponse other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersResponse.getDefaultInstance()) return this;
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersResponse parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersResponse) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:hbase.pb.RemoveServersResponse)
+ }
+
+ static {
+ defaultInstance = new RemoveServersResponse(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:hbase.pb.RemoveServersResponse)
+ }
+
/**
* Protobuf service {@code hbase.pb.RSGroupAdminService}
*/
@@ -12457,6 +13516,14 @@ public final class RSGroupAdminProtos {
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest request,
com.google.protobuf.RpcCallback done);
+ /**
+ * rpc RemoveServers(.hbase.pb.RemoveServersRequest) returns (.hbase.pb.RemoveServersResponse);
+ */
+ public abstract void removeServers(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersRequest request,
+ com.google.protobuf.RpcCallback done);
+
}
public static com.google.protobuf.Service newReflectiveService(
@@ -12542,6 +13609,14 @@ public final class RSGroupAdminProtos {
impl.moveServersAndTables(controller, request, done);
}
+ @java.lang.Override
+ public void removeServers(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersRequest request,
+ com.google.protobuf.RpcCallback done) {
+ impl.removeServers(controller, request, done);
+ }
+
};
}
@@ -12584,6 +13659,8 @@ public final class RSGroupAdminProtos {
return impl.listRSGroupInfos(controller, (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest)request);
case 9:
return impl.moveServersAndTables(controller, (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest)request);
+ case 10:
+ return impl.removeServers(controller, (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersRequest)request);
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -12618,6 +13695,8 @@ public final class RSGroupAdminProtos {
return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest.getDefaultInstance();
case 9:
return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest.getDefaultInstance();
+ case 10:
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersRequest.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -12652,6 +13731,8 @@ public final class RSGroupAdminProtos {
return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse.getDefaultInstance();
case 9:
return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse.getDefaultInstance();
+ case 10:
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersResponse.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -12740,6 +13821,14 @@ public final class RSGroupAdminProtos {
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest request,
com.google.protobuf.RpcCallback done);
+ /**
+ * rpc RemoveServers(.hbase.pb.RemoveServersRequest) returns (.hbase.pb.RemoveServersResponse);
+ */
+ public abstract void removeServers(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersRequest request,
+ com.google.protobuf.RpcCallback done);
+
public static final
com.google.protobuf.Descriptors.ServiceDescriptor
getDescriptor() {
@@ -12812,6 +13901,11 @@ public final class RSGroupAdminProtos {
com.google.protobuf.RpcUtil.specializeCallback(
done));
return;
+ case 10:
+ this.removeServers(controller, (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersRequest)request,
+ com.google.protobuf.RpcUtil.specializeCallback(
+ done));
+ return;
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -12846,6 +13940,8 @@ public final class RSGroupAdminProtos {
return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosRequest.getDefaultInstance();
case 9:
return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest.getDefaultInstance();
+ case 10:
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersRequest.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -12880,6 +13976,8 @@ public final class RSGroupAdminProtos {
return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.ListRSGroupInfosResponse.getDefaultInstance();
case 9:
return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse.getDefaultInstance();
+ case 10:
+ return org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersResponse.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -13050,6 +14148,21 @@ public final class RSGroupAdminProtos {
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse.class,
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse.getDefaultInstance()));
}
+
+ public void removeServers(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersRequest request,
+ com.google.protobuf.RpcCallback done) {
+ channel.callMethod(
+ getDescriptor().getMethods().get(10),
+ controller,
+ request,
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersResponse.getDefaultInstance(),
+ com.google.protobuf.RpcUtil.generalizeCallback(
+ done,
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersResponse.class,
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersResponse.getDefaultInstance()));
+ }
}
public static BlockingInterface newBlockingStub(
@@ -13107,6 +14220,11 @@ public final class RSGroupAdminProtos {
com.google.protobuf.RpcController controller,
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesRequest request)
throws com.google.protobuf.ServiceException;
+
+ public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersResponse removeServers(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersRequest request)
+ throws com.google.protobuf.ServiceException;
}
private static final class BlockingStub implements BlockingInterface {
@@ -13235,6 +14353,18 @@ public final class RSGroupAdminProtos {
org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServersAndTablesResponse.getDefaultInstance());
}
+
+ public org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersResponse removeServers(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersRequest request)
+ throws com.google.protobuf.ServiceException {
+ return (org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersResponse) channel.callBlockingMethod(
+ getDescriptor().getMethods().get(10),
+ controller,
+ request,
+ org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersResponse.getDefaultInstance());
+ }
+
}
// @@protoc_insertion_point(class_scope:hbase.pb.RSGroupAdminService)
@@ -13350,6 +14480,16 @@ public final class RSGroupAdminProtos {
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hbase_pb_MoveServersAndTablesResponse_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_hbase_pb_RemoveServersRequest_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_hbase_pb_RemoveServersRequest_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_hbase_pb_RemoveServersResponse_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_hbase_pb_RemoveServersResponse_fieldAccessorTable;
public static com.google.protobuf.Descriptors.FileDescriptor
getDescriptor() {
@@ -13392,31 +14532,36 @@ public final class RSGroupAdminProtos {
"quest\022\024\n\014target_group\030\001 \002(\t\022%\n\007servers\030\002" +
" \003(\0132\024.hbase.pb.ServerName\022\'\n\ntable_name" +
"\030\003 \003(\0132\023.hbase.pb.TableName\"\036\n\034MoveServe" +
- "rsAndTablesResponse2\210\007\n\023RSGroupAdminServ" +
- "ice\022S\n\016GetRSGroupInfo\022\037.hbase.pb.GetRSGr" +
- "oupInfoRequest\032 .hbase.pb.GetRSGroupInfo" +
- "Response\022h\n\025GetRSGroupInfoOfTable\022&.hbas" +
- "e.pb.GetRSGroupInfoOfTableRequest\032\'.hbas" +
- "e.pb.GetRSGroupInfoOfTableResponse\022k\n\026Ge" +
- "tRSGroupInfoOfServer\022\'.hbase.pb.GetRSGro",
- "upInfoOfServerRequest\032(.hbase.pb.GetRSGr" +
- "oupInfoOfServerResponse\022J\n\013MoveServers\022\034" +
- ".hbase.pb.MoveServersRequest\032\035.hbase.pb." +
- "MoveServersResponse\022G\n\nMoveTables\022\033.hbas" +
- "e.pb.MoveTablesRequest\032\034.hbase.pb.MoveTa" +
- "blesResponse\022G\n\nAddRSGroup\022\033.hbase.pb.Ad" +
- "dRSGroupRequest\032\034.hbase.pb.AddRSGroupRes" +
- "ponse\022P\n\rRemoveRSGroup\022\036.hbase.pb.Remove" +
- "RSGroupRequest\032\037.hbase.pb.RemoveRSGroupR" +
- "esponse\022S\n\016BalanceRSGroup\022\037.hbase.pb.Bal",
- "anceRSGroupRequest\032 .hbase.pb.BalanceRSG" +
- "roupResponse\022Y\n\020ListRSGroupInfos\022!.hbase" +
- ".pb.ListRSGroupInfosRequest\032\".hbase.pb.L" +
- "istRSGroupInfosResponse\022e\n\024MoveServersAn" +
- "dTables\022%.hbase.pb.MoveServersAndTablesR" +
- "equest\032&.hbase.pb.MoveServersAndTablesRe" +
- "sponseBH\n*org.apache.hadoop.hbase.protob" +
- "uf.generatedB\022RSGroupAdminProtosH\001\210\001\001\240\001\001"
+ "rsAndTablesResponse\"=\n\024RemoveServersRequ" +
+ "est\022%\n\007servers\030\001 \003(\0132\024.hbase.pb.ServerNa" +
+ "me\"\027\n\025RemoveServersResponse2\332\007\n\023RSGroupA" +
+ "dminService\022S\n\016GetRSGroupInfo\022\037.hbase.pb" +
+ ".GetRSGroupInfoRequest\032 .hbase.pb.GetRSG" +
+ "roupInfoResponse\022h\n\025GetRSGroupInfoOfTabl" +
+ "e\022&.hbase.pb.GetRSGroupInfoOfTableReques",
+ "t\032\'.hbase.pb.GetRSGroupInfoOfTableRespon" +
+ "se\022k\n\026GetRSGroupInfoOfServer\022\'.hbase.pb." +
+ "GetRSGroupInfoOfServerRequest\032(.hbase.pb" +
+ ".GetRSGroupInfoOfServerResponse\022J\n\013MoveS" +
+ "ervers\022\034.hbase.pb.MoveServersRequest\032\035.h" +
+ "base.pb.MoveServersResponse\022G\n\nMoveTable" +
+ "s\022\033.hbase.pb.MoveTablesRequest\032\034.hbase.p" +
+ "b.MoveTablesResponse\022G\n\nAddRSGroup\022\033.hba" +
+ "se.pb.AddRSGroupRequest\032\034.hbase.pb.AddRS" +
+ "GroupResponse\022P\n\rRemoveRSGroup\022\036.hbase.p",
+ "b.RemoveRSGroupRequest\032\037.hbase.pb.Remove" +
+ "RSGroupResponse\022S\n\016BalanceRSGroup\022\037.hbas" +
+ "e.pb.BalanceRSGroupRequest\032 .hbase.pb.Ba" +
+ "lanceRSGroupResponse\022Y\n\020ListRSGroupInfos" +
+ "\022!.hbase.pb.ListRSGroupInfosRequest\032\".hb" +
+ "ase.pb.ListRSGroupInfosResponse\022e\n\024MoveS" +
+ "erversAndTables\022%.hbase.pb.MoveServersAn" +
+ "dTablesRequest\032&.hbase.pb.MoveServersAnd" +
+ "TablesResponse\022P\n\rRemoveServers\022\036.hbase." +
+ "pb.RemoveServersRequest\032\037.hbase.pb.Remov",
+ "eServersResponseBH\n*org.apache.hadoop.hb" +
+ "ase.protobuf.generatedB\022RSGroupAdminProt" +
+ "osH\001\210\001\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -13555,6 +14700,18 @@ public final class RSGroupAdminProtos {
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_MoveServersAndTablesResponse_descriptor,
new java.lang.String[] { });
+ internal_static_hbase_pb_RemoveServersRequest_descriptor =
+ getDescriptor().getMessageTypes().get(22);
+ internal_static_hbase_pb_RemoveServersRequest_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_hbase_pb_RemoveServersRequest_descriptor,
+ new java.lang.String[] { "Servers", });
+ internal_static_hbase_pb_RemoveServersResponse_descriptor =
+ getDescriptor().getMessageTypes().get(23);
+ internal_static_hbase_pb_RemoveServersResponse_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_hbase_pb_RemoveServersResponse_descriptor,
+ new java.lang.String[] { });
return null;
}
};
diff --git a/hbase-protocol/src/main/protobuf/RSGroupAdmin.proto b/hbase-protocol/src/main/protobuf/RSGroupAdmin.proto
index 65da657e092..fbd55ad2213 100644
--- a/hbase-protocol/src/main/protobuf/RSGroupAdmin.proto
+++ b/hbase-protocol/src/main/protobuf/RSGroupAdmin.proto
@@ -116,6 +116,13 @@ message MoveServersAndTablesRequest {
message MoveServersAndTablesResponse {
}
+message RemoveServersRequest {
+ repeated ServerName servers = 1;
+}
+
+message RemoveServersResponse {
+}
+
service RSGroupAdminService {
rpc GetRSGroupInfo(GetRSGroupInfoRequest)
returns (GetRSGroupInfoResponse);
@@ -146,4 +153,7 @@ service RSGroupAdminService {
rpc MoveServersAndTables(MoveServersAndTablesRequest)
returns (MoveServersAndTablesResponse);
+
+ rpc RemoveServers(RemoveServersRequest)
+ returns (RemoveServersResponse);
}
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdmin.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdmin.java
index f465f834754..ab20ae9975a 100644
--- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdmin.java
+++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdmin.java
@@ -89,4 +89,14 @@ public interface RSGroupAdmin extends Closeable {
*/
void moveServersAndTables(Set servers, Set tables,
String targetGroup) throws IOException;
+
+ /**
+ * Remove decommissioned servers from rsgroup.
+ * 1. Sometimes we may find the server aborted due to some hardware failure and we must offline
+ * the server for repairing. Or we need to move some servers to join other clusters.
+ * So we need to remove these servers from the rsgroup.
+ * 2. Dead/recovering/live servers will be disallowed.
+ * @param servers set of servers to remove
+ */
+ void removeServers(Set servers) throws IOException;
}
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminClient.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminClient.java
index 8d9df44873f..b25752ef048 100644
--- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminClient.java
+++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminClient.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveServers
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesRequest;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RSGroupAdminService;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest;
+import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersRequest;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos;
import com.google.common.collect.Sets;
@@ -206,6 +207,25 @@ class RSGroupAdminClient implements RSGroupAdmin {
}
}
+ @Override
+ public void removeServers(Set servers) throws IOException {
+ Set hostPorts = Sets.newHashSet();
+ for(Address el: servers) {
+ hostPorts.add(HBaseProtos.ServerName.newBuilder()
+ .setHostName(el.getHostname())
+ .setPort(el.getPort())
+ .build());
+ }
+ RemoveServersRequest request = RemoveServersRequest.newBuilder()
+ .addAllServers(hostPorts)
+ .build();
+ try {
+ stub.removeServers(null, request);
+ } catch (ServiceException e) {
+ throw ProtobufUtil.handleRemoteException(e);
+ }
+ }
+
@Override
public void close() throws IOException {
}
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
index 65892fa0d8a..b84465175ca 100644
--- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
+++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
@@ -80,6 +80,8 @@ import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesR
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RSGroupAdminService;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse;
+import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersRequest;
+import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveServersResponse;
import org.apache.hadoop.hbase.protobuf.generated.TableProtos;
public class RSGroupAdminEndpoint extends RSGroupAdminService
@@ -312,6 +314,24 @@ public class RSGroupAdminEndpoint extends RSGroupAdminService
done.run(builder.build());
}
+ @Override
+ public void removeServers(RpcController controller,
+ RemoveServersRequest request,
+ RpcCallback done) {
+ RemoveServersResponse.Builder builder =
+ RemoveServersResponse.newBuilder();
+ try {
+ Set servers = Sets.newHashSet();
+ for (HBaseProtos.ServerName el : request.getServersList()) {
+ servers.add(Address.fromParts(el.getHostName(), el.getPort()));
+ }
+ groupAdminServer.removeServers(servers);
+ } catch (IOException e) {
+ ResponseConverter.setControllerException(controller, e);
+ }
+ done.run(builder.build());
+ }
+
void assignTableToGroup(HTableDescriptor desc) throws IOException {
String groupName =
master.getNamespaceDescriptor(desc.getTableName().getNamespaceAsString())
@@ -971,8 +991,15 @@ public class RSGroupAdminEndpoint extends RSGroupAdminService
@Override
public void postClearDeadServers(ObserverContext ctx,
- List servers, List notClearedServers) throws IOException {
-
+ List servers, List notClearedServers)
+ throws IOException {
+ Set clearedServer = Sets.newHashSet();
+ for (ServerName server: servers) {
+ if (!notClearedServers.contains(server)) {
+ clearedServer.add(server.getAddress());
+ }
+ }
+ groupAdminServer.removeServers(clearedServer);
}
@Override
@@ -1009,6 +1036,16 @@ public class RSGroupAdminEndpoint extends RSGroupAdminService
Set servers, Set tables, String targetGroup) throws IOException {
}
+ @Override
+ public void preRemoveServers(ObserverContext ctx,
+ Set servers) throws IOException {
+ }
+
+ @Override
+ public void postRemoveServers(ObserverContext ctx,
+ Set servers) throws IOException {
+ }
+
@Override
public void preAddRSGroup(ObserverContext ctx,
String name) throws IOException {
@@ -1044,5 +1081,4 @@ public class RSGroupAdminEndpoint extends RSGroupAdminService
String groupName, boolean balancerRan) throws IOException {
}
-
}
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
index 0003df02431..f3d03b5b267 100644
--- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
+++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
@@ -68,12 +68,12 @@ public class RSGroupAdminServer implements RSGroupAdmin {
//Key=host:port,Value=targetGroup
private ConcurrentMap serversInTransition =
new ConcurrentHashMap();
- private RSGroupInfoManager RSGroupInfoManager;
+ private RSGroupInfoManager rsGroupInfoManager;
public RSGroupAdminServer(MasterServices master,
RSGroupInfoManager RSGroupInfoManager) throws IOException {
this.master = master;
- this.RSGroupInfoManager = RSGroupInfoManager;
+ this.rsGroupInfoManager = RSGroupInfoManager;
}
@Override
@@ -412,7 +412,30 @@ public class RSGroupAdminServer implements RSGroupAdmin {
@InterfaceAudience.Private
public RSGroupInfoManager getRSGroupInfoManager() throws IOException {
- return RSGroupInfoManager;
+ return rsGroupInfoManager;
+ }
+
+ @Override
+ public void removeServers(Set servers) throws IOException {
+ {
+ if (servers == null || servers.isEmpty()) {
+ throw new ConstraintException("The set of servers to remove cannot be null or empty.");
+ }
+ // Hold a lock on the manager instance while moving servers to prevent
+ // another writer changing our state while we are working.
+ synchronized (rsGroupInfoManager) {
+ if (master.getMasterCoprocessorHost() != null) {
+ master.getMasterCoprocessorHost().preRemoveServers(servers);
+ }
+ //check the set of servers
+ checkForDeadOrOnlineServers(servers);
+ rsGroupInfoManager.removeServers(servers);
+ if (master.getMasterCoprocessorHost() != null) {
+ master.getMasterCoprocessorHost().postRemoveServers(servers);
+ }
+ LOG.info("Remove decommissioned servers " + servers + " from rsgroup done.");
+ }
+ }
}
private Map rsGroupGetRegionsInTransition(String groupName)
@@ -520,4 +543,33 @@ public class RSGroupAdminServer implements RSGroupAdmin {
@Override
public void close() throws IOException {
}
+
+ /**
+ * Check if the set of servers are belong to dead servers list or online servers list.
+ * @param servers servers to remove
+ */
+ private void checkForDeadOrOnlineServers(Set servers) throws ConstraintException {
+ // This uglyness is because we only have Address, not ServerName.
+ Set onlineServers = new HashSet<>();
+ for(ServerName server: master.getServerManager().getOnlineServers().keySet()) {
+ onlineServers.add(server.getAddress());
+ }
+
+ Set deadServers = new HashSet<>();
+ for(ServerName server: master.getServerManager().getDeadServers().copyServerNames()) {
+ deadServers.add(server.getAddress());
+ }
+
+ for (Address address: servers) {
+ if (onlineServers.contains(address)) {
+ throw new ConstraintException(
+ "Server " + address + " is an online server, not allowed to remove.");
+ }
+ if (deadServers.contains(address)) {
+ throw new ConstraintException(
+ "Server " + address + " is on the dead servers list,"
+ + " Maybe it will come back again, not allowed to remove.");
+ }
+ }
+ }
}
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java
index 2330605d736..d8496992deb 100644
--- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java
+++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java
@@ -117,4 +117,10 @@ public interface RSGroupInfoManager {
*/
void moveServersAndTables(Set servers, Set tables,
String srcGroup, String dstGroup) throws IOException;
+
+ /**
+ * Remove decommissioned servers from rsgroup
+ * @param servers set of servers to remove
+ */
+ void removeServers(Set servers) throws IOException;
}
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
index cfaa632bd61..350d8505045 100644
--- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
+++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
@@ -874,4 +874,29 @@ public class RSGroupInfoManagerImpl implements RSGroupInfoManager, ServerListene
newGroupMap.put(dstGroupInfo.getName(), dstGroupInfo);
flushConfig(newGroupMap);
}
+
+ @Override
+ public synchronized void removeServers(Set servers) throws IOException {
+ Map rsGroupInfos = new HashMap();
+ for (Address el: servers) {
+ RSGroupInfo rsGroupInfo = getRSGroupOfServer(el);
+ if (rsGroupInfo != null) {
+ RSGroupInfo newRsGroupInfo = rsGroupInfos.get(rsGroupInfo.getName());
+ if (newRsGroupInfo == null) {
+ rsGroupInfo.removeServer(el);
+ rsGroupInfos.put(rsGroupInfo.getName(), rsGroupInfo);
+ } else {
+ newRsGroupInfo.removeServer(el);
+ rsGroupInfos.put(newRsGroupInfo.getName(), newRsGroupInfo);
+ }
+ } else {
+ LOG.warn("Server " + el + " does not belong to any rsgroup.");
+ }
+ }
+ if (rsGroupInfos.size() > 0) {
+ Map newGroupMap = Maps.newHashMap(rsGroupMap);
+ newGroupMap.putAll(rsGroupInfos);
+ flushConfig(newGroupMap);
+ }
+ }
}
diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroups.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroups.java
index 3ad928fb28b..d3c546e7123 100644
--- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroups.java
+++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroups.java
@@ -78,10 +78,10 @@ public class TestRSGroups extends TestRSGroupsBase {
TEST_UTIL.getConfiguration().setBoolean(
HConstants.ZOOKEEPER_USEMULTI,
true);
- TEST_UTIL.startMiniCluster(NUM_SLAVES_BASE);
- TEST_UTIL.getConfiguration().set(
+ TEST_UTIL.startMiniCluster(NUM_SLAVES_BASE - 1);
+ TEST_UTIL.getConfiguration().setInt(
ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART,
- ""+NUM_SLAVES_BASE);
+ NUM_SLAVES_BASE - 1);
TEST_UTIL.getConfiguration().setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
admin = TEST_UTIL.getHBaseAdmin();
diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
index 4c538ec08ae..ac82e4c4c5d 100644
--- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
+++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
@@ -19,8 +19,21 @@
*/
package org.apache.hadoop.hbase.rsgroup;
-import com.google.common.collect.Maps;
-import com.google.common.collect.Sets;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+import java.security.SecureRandom;
+import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -35,30 +48,23 @@ import org.apache.hadoop.hbase.RegionLoad;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Waiter;
+import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.constraint.ConstraintException;
+import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.RegionState;
import org.apache.hadoop.hbase.net.Address;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
+import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoRequest;
import org.apache.hadoop.hbase.util.Bytes;
+
import org.junit.Assert;
import org.junit.Test;
-import java.io.IOException;
-import java.security.SecureRandom;
-import java.util.HashSet;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.TreeMap;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
public abstract class TestRSGroupsBase {
protected static final Log LOG = LogFactory.getLog(TestRSGroupsBase.class);
@@ -854,4 +860,109 @@ public abstract class TestRSGroupsBase {
Assert.assertEquals(newGroup.getName(),
rsGroupAdmin.getRSGroupInfoOfTable(tableName).getName());
}
-}
+
+ @Test
+ public void testClearDeadServers() throws Exception {
+ final RSGroupInfo newGroup = addGroup(rsGroupAdmin, "testClearDeadServers", 3);
+
+ ServerName targetServer = ServerName.parseServerName(
+ newGroup.getServers().iterator().next().toString());
+ AdminProtos.AdminService.BlockingInterface targetRS =
+ ((ClusterConnection) admin.getConnection()).getAdmin(targetServer);
+ try {
+ targetServer = ProtobufUtil.toServerName(targetRS.getServerInfo(null,
+ GetServerInfoRequest.newBuilder().build()).getServerInfo().getServerName());
+ //stopping may cause an exception
+ //due to the connection loss
+ targetRS.stopServer(null,
+ AdminProtos.StopServerRequest.newBuilder().setReason("Die").build());
+ } catch(Exception e) {
+ }
+ final HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
+ //wait for stopped regionserver to dead server list
+ TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() {
+ @Override
+ public boolean evaluate() throws Exception {
+ return !master.getServerManager().areDeadServersInProgress()
+ && cluster.getClusterStatus().getDeadServerNames().size() > 0;
+ }
+ });
+ assertFalse(cluster.getClusterStatus().getServers().contains(targetServer));
+ assertTrue(cluster.getClusterStatus().getDeadServerNames().contains(targetServer));
+ assertTrue(newGroup.getServers().contains(targetServer.getAddress()));
+
+ //clear dead servers list
+ List notClearedServers = admin.clearDeadServers(Lists.newArrayList(targetServer));
+ assertEquals(0, notClearedServers.size());
+
+ Set newGroupServers = rsGroupAdmin.getRSGroupInfo(newGroup.getName()).getServers();
+ assertFalse(newGroupServers.contains(targetServer.getAddress()));
+ assertEquals(2, newGroupServers.size());
+ }
+
+ @Test
+ public void testRemoveServers() throws Exception {
+ final RSGroupInfo newGroup = addGroup(rsGroupAdmin, "testRemoveServers", 3);
+ ServerName targetServer = ServerName.parseServerName(
+ newGroup.getServers().iterator().next().toString());
+ try {
+ rsGroupAdmin.removeServers(Sets.newHashSet(targetServer.getAddress()));
+ fail("Online servers shouldn't have been successfully removed.");
+ } catch(IOException ex) {
+ String exp = "Server " + targetServer.getAddress()
+ + " is an online server, not allowed to remove.";
+ String msg = "Expected '" + exp + "' in exception message: ";
+ assertTrue(msg + " " + ex.getMessage(), ex.getMessage().contains(exp));
+ }
+ assertTrue(newGroup.getServers().contains(targetServer.getAddress()));
+
+ AdminProtos.AdminService.BlockingInterface targetRS =
+ ((ClusterConnection) admin.getConnection()).getAdmin(targetServer);
+ try {
+ targetServer = ProtobufUtil.toServerName(targetRS.getServerInfo(null,
+ GetServerInfoRequest.newBuilder().build()).getServerInfo().getServerName());
+ //stopping may cause an exception
+ //due to the connection loss
+ targetRS.stopServer(null,
+ AdminProtos.StopServerRequest.newBuilder().setReason("Die").build());
+ } catch(Exception e) {
+ }
+
+ final HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
+ //wait for stopped regionserver to dead server list
+ TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate() {
+ @Override
+ public boolean evaluate() throws Exception {
+ return !master.getServerManager().areDeadServersInProgress()
+ && cluster.getClusterStatus().getDeadServerNames().size() > 0;
+ }
+ });
+
+ try {
+ rsGroupAdmin.removeServers(Sets.newHashSet(targetServer.getAddress()));
+ fail("Dead servers shouldn't have been successfully removed.");
+ } catch(IOException ex) {
+ String exp = "Server " + targetServer.getAddress() + " is on the dead servers list,"
+ + " Maybe it will come back again, not allowed to remove.";
+ String msg = "Expected '" + exp + "' in exception message: ";
+ assertTrue(msg + " " + ex.getMessage(), ex.getMessage().contains(exp));
+ }
+ assertTrue(newGroup.getServers().contains(targetServer.getAddress()));
+
+ ServerName sn = TEST_UTIL.getHBaseClusterInterface().getClusterStatus().getMaster();
+ TEST_UTIL.getHBaseClusterInterface().stopMaster(sn);
+ TEST_UTIL.getHBaseClusterInterface().waitForMasterToStop(sn, 60000);
+ TEST_UTIL.getHBaseClusterInterface().startMaster(sn.getHostname(), 0);
+ TEST_UTIL.getHBaseClusterInterface().waitForActiveAndReadyMaster(60000);
+
+ assertEquals(3, cluster.getClusterStatus().getServersSize());
+ assertFalse(cluster.getClusterStatus().getServers().contains(targetServer));
+ assertFalse(cluster.getClusterStatus().getDeadServerNames().contains(targetServer));
+ assertTrue(newGroup.getServers().contains(targetServer.getAddress()));
+
+ rsGroupAdmin.removeServers(Sets.newHashSet(targetServer.getAddress()));
+ Set newGroupServers = rsGroupAdmin.getRSGroupInfo(newGroup.getName()).getServers();
+ assertFalse(newGroupServers.contains(targetServer.getAddress()));
+ assertEquals(2, newGroupServers.size());
+ }
+}
\ No newline at end of file
diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdminClient.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdminClient.java
index f5d02f0c647..09a171684e4 100644
--- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdminClient.java
+++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdminClient.java
@@ -104,6 +104,12 @@ public class VerifyingRSGroupAdminClient implements RSGroupAdmin {
return wrapped.getRSGroupOfServer(server);
}
+ @Override
+ public void removeServers(Set servers) throws IOException {
+ wrapped.removeServers(servers);
+ verify();
+ }
+
public void verify() throws IOException {
Map groupMap = Maps.newHashMap();
Set zList = Sets.newHashSet();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java
index de09f71498d..1f9f4ab34a6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java
@@ -665,4 +665,14 @@ public class BaseMasterAndRegionObserver extends BaseRegionObserver
public void preRemoveRSGroup(ObserverContext ctx, String name)
throws IOException {
}
+
+ @Override
+ public void preRemoveServers(ObserverContext ctx,
+ Set servers) throws IOException {
+ }
+
+ @Override
+ public void postRemoveServers(ObserverContext ctx,
+ Set servers) throws IOException {
+ }
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java
index 7d962abb3a0..4e39305927b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java
@@ -656,6 +656,16 @@ public class BaseMasterObserver implements MasterObserver {
throws IOException {
}
+ @Override
+ public void preRemoveServers(ObserverContext ctx,
+ Set servers) throws IOException {
+ }
+
+ @Override
+ public void postRemoveServers(ObserverContext ctx,
+ Set servers) throws IOException {
+ }
+
@Override
public void preBalanceRSGroup(ObserverContext ctx, String groupName)
throws IOException {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
index 6e9179aa9da..ed40e672c84 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
@@ -1164,6 +1164,22 @@ public interface MasterObserver extends Coprocessor {
void postRemoveRSGroup(final ObserverContext ctx,
String name) throws IOException;
+ /**
+ * Called before servers are removed from rsgroup
+ * @param ctx the environment to interact with the framework and master
+ * @param servers set of decommissioned servers to remove
+ */
+ void preRemoveServers(final ObserverContext ctx,
+ Set servers) throws IOException;
+
+ /**
+ * Called after servers are removed from rsgroup
+ * @param ctx the environment to interact with the framework and master
+ * @param servers set of servers to remove
+ */
+ void postRemoveServers(final ObserverContext ctx,
+ Set servers) throws IOException;
+
/**
* Called before a region server group is removed
* @param ctx the environment to interact with the framework and master
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
index 616f76c6939..6965eae3778 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
@@ -1297,6 +1297,32 @@ public class MasterCoprocessorHost
});
}
+ public void preRemoveServers(final Set servers)
+ throws IOException {
+ execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
+ @Override
+ public void call(MasterObserver oserver,
+ ObserverContext ctx) throws IOException {
+ if(((MasterEnvironment)getEnvironment()).supportGroupCPs) {
+ oserver.preRemoveServers(this, servers);
+ }
+ }
+ });
+ }
+
+ public void postRemoveServers(final Set servers)
+ throws IOException {
+ execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
+ @Override
+ public void call(MasterObserver oserver,
+ ObserverContext ctx) throws IOException {
+ if(((MasterEnvironment)getEnvironment()).supportGroupCPs) {
+ oserver.postRemoveServers(this, servers);
+ }
+ }
+ });
+ }
+
public void preAddRSGroup(final String name)
throws IOException {
execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
index 5865b1ae3fe..e90fe053a5d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
@@ -2718,6 +2718,12 @@ public class AccessController extends BaseMasterAndRegionObserver
requirePermission("moveTables", Action.ADMIN);
}
+ @Override
+ public void preRemoveServers(ObserverContext ctx,
+ Set servers) throws IOException {
+ requirePermission("removeServers", Action.ADMIN);
+ }
+
@Override
public void preAddRSGroup(ObserverContext ctx,
String name) throws IOException {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java
index cf899b84681..83d5749d596 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java
@@ -1331,6 +1331,16 @@ public class TestMasterObserver {
throws IOException {
}
+ @Override
+ public void preRemoveServers(ObserverContext ctx,
+ Set servers) throws IOException {
+ }
+
+ @Override
+ public void postRemoveServers(ObserverContext ctx,
+ Set servers) throws IOException {
+ }
+
@Override
public void preBalanceRSGroup(ObserverContext ctx,
String groupName) throws IOException {
diff --git a/hbase-shell/src/main/ruby/hbase/rsgroup_admin.rb b/hbase-shell/src/main/ruby/hbase/rsgroup_admin.rb
index 0b72fd8ff56..f5f4e620ad1 100644
--- a/hbase-shell/src/main/ruby/hbase/rsgroup_admin.rb
+++ b/hbase-shell/src/main/ruby/hbase/rsgroup_admin.rb
@@ -160,5 +160,16 @@ module Hbase
@admin.moveServersAndTables(servers, tables, dest)
end
+ #--------------------------------------------------------------------------
+ # remove decommissioned server from rsgroup
+ def remove_servers(*args)
+ # Flatten params array
+ args = args.flatten.compact
+ servers = java.util.HashSet.new
+ args.each do |s|
+ servers.add(org.apache.hadoop.hbase.net.Address.fromString(s))
+ end
+ @admin.removeServers(servers)
+ end
end
end
diff --git a/hbase-shell/src/main/ruby/shell.rb b/hbase-shell/src/main/ruby/shell.rb
index 2eb872c3847..7444c3b76d5 100644
--- a/hbase-shell/src/main/ruby/shell.rb
+++ b/hbase-shell/src/main/ruby/shell.rb
@@ -464,5 +464,6 @@ Shell.load_command_group(
move_servers_tables_rsgroup
get_server_rsgroup
get_table_rsgroup
+ remove_servers_rsgroup
]
)
diff --git a/hbase-shell/src/main/ruby/shell/commands/remove_servers_rsgroup.rb b/hbase-shell/src/main/ruby/shell/commands/remove_servers_rsgroup.rb
new file mode 100644
index 00000000000..ba8e60c8d2c
--- /dev/null
+++ b/hbase-shell/src/main/ruby/shell/commands/remove_servers_rsgroup.rb
@@ -0,0 +1,35 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+module Shell
+ module Commands
+ class RemoveServersRsgroup < Command
+ def help
+ <<-EOF
+Remove decommissioned servers from rsgroup.
+Dead/recovering/live servers will be disallowed.
+Example:
+ hbase> remove_servers_rsgroup ['server1:port','server2:port']
+EOF
+ end
+
+ def command(servers)
+ rsgroup_admin.remove_servers(servers)
+ end
+ end
+ end
+end