From bdd7782f054a4740bb63f4d9781ffe083d51e4bf Mon Sep 17 00:00:00 2001 From: Stephen Yuan Jiang Date: Sat, 23 Jul 2016 14:47:11 -0700 Subject: [PATCH] HBASE-16008 A robust way deal with early termination of HBCK (Stephen Yuan Jiang) --- .../org/apache/hadoop/hbase/client/Admin.java | 7 + .../client/ConnectionImplementation.java | 7 + .../hadoop/hbase/client/HBaseAdmin.java | 15 + .../hbase/zookeeper/ZooKeeperWatcher.java | 5 + .../protobuf/generated/MasterProtos.java | 2028 ++++++++++++----- hbase-protocol/src/main/protobuf/Master.proto | 13 + .../hadoop/hbase/master/CatalogJanitor.java | 11 + .../apache/hadoop/hbase/master/HMaster.java | 44 +- .../hbase/master/MasterRpcServices.java | 9 + .../hadoop/hbase/master/MasterServices.java | 5 + .../apache/hadoop/hbase/util/HBaseFsck.java | 156 +- .../MasterMaintenanceModeTracker.java | 81 + .../hbase/zookeeper/SplitOrMergeTracker.java | 2 - .../hbase/master/MockNoopMasterServices.java | 5 + .../hadoop/hbase/util/TestHBaseFsckOneRS.java | 2 - 15 files changed, 1738 insertions(+), 652 deletions(-) create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterMaintenanceModeTracker.java diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index df79dcfe410..06105171fc0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -958,6 +958,13 @@ public interface Admin extends Abortable, Closeable { */ void stopMaster() throws IOException; + /** + * Check whether Master is in maintenance mode + * + * @throws IOException if a remote or network exception occurs + */ + boolean isMasterInMaintenanceMode() throws IOException; + /** * Stop the designated regionserver * diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java index 9b913c86fea..bb5c996deb0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java @@ -1415,6 +1415,13 @@ class ConnectionImplementation implements ClusterConnection, Closeable { return stub.stopMaster(controller, request); } + @Override + public MasterProtos.IsInMaintenanceModeResponse isMasterInMaintenanceMode( + final RpcController controller, + final MasterProtos.IsInMaintenanceModeRequest request) throws ServiceException { + return stub.isMasterInMaintenanceMode(controller, request); + } + @Override public MasterProtos.BalanceResponse balance(RpcController controller, MasterProtos.BalanceRequest request) throws ServiceException { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index 074fe7f1bf1..29650efceda 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -130,6 +130,8 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterSta import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest; @@ -1932,6 +1934,19 @@ public class HBaseAdmin implements Admin { } } + @Override + public boolean isMasterInMaintenanceMode() throws IOException { + return executeCallable(new MasterCallable(getConnection()) { + @Override + public IsInMaintenanceModeResponse call(int callTimeout) throws ServiceException { + PayloadCarryingRpcController controller = rpcControllerFactory.newController(); + controller.setCallTimeout(callTimeout); + return master.isMasterInMaintenanceMode( + controller, IsInMaintenanceModeRequest.newBuilder().build()); + } + }).getInMaintenanceMode(); + } + @Override public ClusterStatus getClusterStatus() throws IOException { return executeCallable(new MasterCallable(getConnection()) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java index 7cbfc98108d..5ef71712ca0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java @@ -123,6 +123,8 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable { public String recoveringRegionsZNode; // znode containing namespace descriptors public static String namespaceZNode = "namespace"; + // znode of indicating master maintenance mode + public static String masterMaintZNode = "masterMaintenance"; public final static String META_ZNODE_PREFIX = "meta-region-server"; @@ -194,6 +196,7 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable { ZKUtil.createAndFailSilent(this, backupMasterAddressesZNode); ZKUtil.createAndFailSilent(this, tableLockZNode); ZKUtil.createAndFailSilent(this, recoveringRegionsZNode); + ZKUtil.createAndFailSilent(this, masterMaintZNode); } catch (KeeperException e) { throw new ZooKeeperConnectionException( prefix("Unexpected KeeperException creating base node"), e); @@ -442,6 +445,8 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable { conf.get("zookeeper.znode.recovering.regions", "recovering-regions")); namespaceZNode = ZKUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.namespace", "namespace")); + masterMaintZNode = ZKUtil.joinZNode(baseZNode, + conf.get("zookeeper.znode.masterMaintenance", "master-maintenance")); } /** diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java index 6daf889e7e0..c6477fa8353 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java @@ -26500,6 +26500,786 @@ public final class MasterProtos { // @@protoc_insertion_point(class_scope:hbase.pb.StopMasterResponse) } + public interface IsInMaintenanceModeRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + /** + * Protobuf type {@code hbase.pb.IsInMaintenanceModeRequest} + */ + public static final class IsInMaintenanceModeRequest extends + com.google.protobuf.GeneratedMessage + implements IsInMaintenanceModeRequestOrBuilder { + // Use IsInMaintenanceModeRequest.newBuilder() to construct. + private IsInMaintenanceModeRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private IsInMaintenanceModeRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final IsInMaintenanceModeRequest defaultInstance; + public static IsInMaintenanceModeRequest getDefaultInstance() { + return defaultInstance; + } + + public IsInMaintenanceModeRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private IsInMaintenanceModeRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsInMaintenanceModeRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsInMaintenanceModeRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public IsInMaintenanceModeRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new IsInMaintenanceModeRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest) obj; + + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.IsInMaintenanceModeRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsInMaintenanceModeRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsInMaintenanceModeRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsInMaintenanceModeRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest(this); + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest.getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.IsInMaintenanceModeRequest) + } + + static { + defaultInstance = new IsInMaintenanceModeRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.IsInMaintenanceModeRequest) + } + + public interface IsInMaintenanceModeResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required bool inMaintenanceMode = 1; + /** + * required bool inMaintenanceMode = 1; + */ + boolean hasInMaintenanceMode(); + /** + * required bool inMaintenanceMode = 1; + */ + boolean getInMaintenanceMode(); + } + /** + * Protobuf type {@code hbase.pb.IsInMaintenanceModeResponse} + */ + public static final class IsInMaintenanceModeResponse extends + com.google.protobuf.GeneratedMessage + implements IsInMaintenanceModeResponseOrBuilder { + // Use IsInMaintenanceModeResponse.newBuilder() to construct. + private IsInMaintenanceModeResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private IsInMaintenanceModeResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final IsInMaintenanceModeResponse defaultInstance; + public static IsInMaintenanceModeResponse getDefaultInstance() { + return defaultInstance; + } + + public IsInMaintenanceModeResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private IsInMaintenanceModeResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + inMaintenanceMode_ = input.readBool(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsInMaintenanceModeResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsInMaintenanceModeResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public IsInMaintenanceModeResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new IsInMaintenanceModeResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required bool inMaintenanceMode = 1; + public static final int INMAINTENANCEMODE_FIELD_NUMBER = 1; + private boolean inMaintenanceMode_; + /** + * required bool inMaintenanceMode = 1; + */ + public boolean hasInMaintenanceMode() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bool inMaintenanceMode = 1; + */ + public boolean getInMaintenanceMode() { + return inMaintenanceMode_; + } + + private void initFields() { + inMaintenanceMode_ = false; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasInMaintenanceMode()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBool(1, inMaintenanceMode_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(1, inMaintenanceMode_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse) obj; + + boolean result = true; + result = result && (hasInMaintenanceMode() == other.hasInMaintenanceMode()); + if (hasInMaintenanceMode()) { + result = result && (getInMaintenanceMode() + == other.getInMaintenanceMode()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasInMaintenanceMode()) { + hash = (37 * hash) + INMAINTENANCEMODE_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getInMaintenanceMode()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code hbase.pb.IsInMaintenanceModeResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsInMaintenanceModeResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsInMaintenanceModeResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + inMaintenanceMode_ = false; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_IsInMaintenanceModeResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.inMaintenanceMode_ = inMaintenanceMode_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse.getDefaultInstance()) return this; + if (other.hasInMaintenanceMode()) { + setInMaintenanceMode(other.getInMaintenanceMode()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasInMaintenanceMode()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required bool inMaintenanceMode = 1; + private boolean inMaintenanceMode_ ; + /** + * required bool inMaintenanceMode = 1; + */ + public boolean hasInMaintenanceMode() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bool inMaintenanceMode = 1; + */ + public boolean getInMaintenanceMode() { + return inMaintenanceMode_; + } + /** + * required bool inMaintenanceMode = 1; + */ + public Builder setInMaintenanceMode(boolean value) { + bitField0_ |= 0x00000001; + inMaintenanceMode_ = value; + onChanged(); + return this; + } + /** + * required bool inMaintenanceMode = 1; + */ + public Builder clearInMaintenanceMode() { + bitField0_ = (bitField0_ & ~0x00000001); + inMaintenanceMode_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:hbase.pb.IsInMaintenanceModeResponse) + } + + static { + defaultInstance = new IsInMaintenanceModeResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:hbase.pb.IsInMaintenanceModeResponse) + } + public interface BalanceRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { @@ -60043,6 +60823,19 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest request, com.google.protobuf.RpcCallback done); + /** + * rpc IsMasterInMaintenanceMode(.hbase.pb.IsInMaintenanceModeRequest) returns (.hbase.pb.IsInMaintenanceModeResponse); + * + *
+       **
+       * Query whether the Master is in maintenance mode.
+       * 
+ */ + public abstract void isMasterInMaintenanceMode( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest request, + com.google.protobuf.RpcCallback done); + /** * rpc Balance(.hbase.pb.BalanceRequest) returns (.hbase.pb.BalanceResponse); * @@ -60656,6 +61449,14 @@ public final class MasterProtos { impl.stopMaster(controller, request, done); } + @java.lang.Override + public void isMasterInMaintenanceMode( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest request, + com.google.protobuf.RpcCallback done) { + impl.isMasterInMaintenanceMode(controller, request, done); + } + @java.lang.Override public void balance( com.google.protobuf.RpcController controller, @@ -61001,74 +61802,76 @@ public final class MasterProtos { case 20: return impl.stopMaster(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest)request); case 21: - return impl.balance(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest)request); + return impl.isMasterInMaintenanceMode(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest)request); case 22: - return impl.setBalancerRunning(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest)request); + return impl.balance(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest)request); case 23: - return impl.isBalancerEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest)request); + return impl.setBalancerRunning(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest)request); case 24: - return impl.setSplitOrMergeEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest)request); + return impl.isBalancerEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest)request); case 25: - return impl.isSplitOrMergeEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest)request); + return impl.setSplitOrMergeEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest)request); case 26: - return impl.normalize(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest)request); + return impl.isSplitOrMergeEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest)request); case 27: - return impl.setNormalizerRunning(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest)request); + return impl.normalize(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest)request); case 28: - return impl.isNormalizerEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest)request); + return impl.setNormalizerRunning(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest)request); case 29: - return impl.runCatalogScan(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest)request); + return impl.isNormalizerEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest)request); case 30: - return impl.enableCatalogJanitor(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest)request); + return impl.runCatalogScan(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest)request); case 31: - return impl.isCatalogJanitorEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest)request); + return impl.enableCatalogJanitor(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest)request); case 32: - return impl.execMasterService(controller, (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest)request); + return impl.isCatalogJanitorEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest)request); case 33: - return impl.snapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest)request); + return impl.execMasterService(controller, (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest)request); case 34: - return impl.getCompletedSnapshots(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest)request); + return impl.snapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest)request); case 35: - return impl.deleteSnapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest)request); + return impl.getCompletedSnapshots(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest)request); case 36: - return impl.isSnapshotDone(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest)request); + return impl.deleteSnapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest)request); case 37: - return impl.restoreSnapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest)request); + return impl.isSnapshotDone(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest)request); case 38: - return impl.execProcedure(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest)request); + return impl.restoreSnapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest)request); case 39: - return impl.execProcedureWithRet(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest)request); + return impl.execProcedure(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest)request); case 40: - return impl.isProcedureDone(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest)request); + return impl.execProcedureWithRet(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest)request); case 41: - return impl.modifyNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest)request); + return impl.isProcedureDone(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest)request); case 42: - return impl.createNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest)request); + return impl.modifyNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest)request); case 43: - return impl.deleteNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest)request); + return impl.createNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest)request); case 44: - return impl.getNamespaceDescriptor(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest)request); + return impl.deleteNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest)request); case 45: - return impl.listNamespaceDescriptors(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest)request); + return impl.getNamespaceDescriptor(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest)request); case 46: - return impl.listTableDescriptorsByNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest)request); + return impl.listNamespaceDescriptors(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest)request); case 47: - return impl.listTableNamesByNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest)request); + return impl.listTableDescriptorsByNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest)request); case 48: - return impl.getTableState(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest)request); + return impl.listTableNamesByNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest)request); case 49: - return impl.setQuota(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest)request); + return impl.getTableState(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest)request); case 50: - return impl.getLastMajorCompactionTimestamp(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest)request); + return impl.setQuota(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest)request); case 51: - return impl.getLastMajorCompactionTimestampForRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest)request); + return impl.getLastMajorCompactionTimestamp(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest)request); case 52: - return impl.getProcedureResult(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest)request); + return impl.getLastMajorCompactionTimestampForRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest)request); case 53: - return impl.getSecurityCapabilities(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest)request); + return impl.getProcedureResult(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest)request); case 54: - return impl.abortProcedure(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest)request); + return impl.getSecurityCapabilities(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest)request); case 55: + return impl.abortProcedure(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest)request); + case 56: return impl.listProcedures(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest)request); default: throw new java.lang.AssertionError("Can't get here."); @@ -61127,74 +61930,76 @@ public final class MasterProtos { case 20: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest.getDefaultInstance(); case 21: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest.getDefaultInstance(); case 22: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest.getDefaultInstance(); case 23: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest.getDefaultInstance(); case 24: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest.getDefaultInstance(); case 25: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest.getDefaultInstance(); case 26: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest.getDefaultInstance(); case 27: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest.getDefaultInstance(); case 28: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest.getDefaultInstance(); case 29: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest.getDefaultInstance(); case 30: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest.getDefaultInstance(); case 31: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest.getDefaultInstance(); case 32: - return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest.getDefaultInstance(); case 33: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest.getDefaultInstance(); case 34: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest.getDefaultInstance(); case 35: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest.getDefaultInstance(); case 36: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest.getDefaultInstance(); case 37: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest.getDefaultInstance(); case 38: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest.getDefaultInstance(); case 39: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance(); case 40: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance(); case 41: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest.getDefaultInstance(); case 42: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest.getDefaultInstance(); case 43: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest.getDefaultInstance(); case 44: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest.getDefaultInstance(); case 45: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest.getDefaultInstance(); case 46: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest.getDefaultInstance(); case 47: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest.getDefaultInstance(); case 48: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest.getDefaultInstance(); case 49: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance(); case 50: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.getDefaultInstance(); case 51: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.getDefaultInstance(); case 52: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.getDefaultInstance(); case 53: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.getDefaultInstance(); case 54: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest.getDefaultInstance(); case 55: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.getDefaultInstance(); + case 56: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); @@ -61253,74 +62058,76 @@ public final class MasterProtos { case 20: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse.getDefaultInstance(); case 21: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse.getDefaultInstance(); case 22: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse.getDefaultInstance(); case 23: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse.getDefaultInstance(); case 24: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse.getDefaultInstance(); case 25: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledResponse.getDefaultInstance(); case 26: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse.getDefaultInstance(); case 27: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeResponse.getDefaultInstance(); case 28: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningResponse.getDefaultInstance(); case 29: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse.getDefaultInstance(); case 30: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse.getDefaultInstance(); case 31: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse.getDefaultInstance(); case 32: - return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance(); case 33: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse.getDefaultInstance(); case 34: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse.getDefaultInstance(); case 35: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse.getDefaultInstance(); case 36: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse.getDefaultInstance(); case 37: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse.getDefaultInstance(); case 38: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse.getDefaultInstance(); case 39: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance(); case 40: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance(); case 41: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefaultInstance(); case 42: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse.getDefaultInstance(); case 43: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse.getDefaultInstance(); case 44: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse.getDefaultInstance(); case 45: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse.getDefaultInstance(); case 46: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse.getDefaultInstance(); case 47: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse.getDefaultInstance(); case 48: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance(); case 49: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance(); case 50: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance(); case 51: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(); case 52: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(); case 53: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance(); case 54: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance(); case 55: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance(); + case 56: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); @@ -61590,6 +62397,19 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest request, com.google.protobuf.RpcCallback done); + /** + * rpc IsMasterInMaintenanceMode(.hbase.pb.IsInMaintenanceModeRequest) returns (.hbase.pb.IsInMaintenanceModeResponse); + * + *
+     **
+     * Query whether the Master is in maintenance mode.
+     * 
+ */ + public abstract void isMasterInMaintenanceMode( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest request, + com.google.protobuf.RpcCallback done); + /** * rpc Balance(.hbase.pb.BalanceRequest) returns (.hbase.pb.BalanceResponse); * @@ -62158,176 +62978,181 @@ public final class MasterProtos { done)); return; case 21: + this.isMasterInMaintenanceMode(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; + case 22: this.balance(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 22: + case 23: this.setBalancerRunning(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 23: + case 24: this.isBalancerEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 24: + case 25: this.setSplitOrMergeEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 25: + case 26: this.isSplitOrMergeEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 26: + case 27: this.normalize(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 27: + case 28: this.setNormalizerRunning(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 28: + case 29: this.isNormalizerEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 29: + case 30: this.runCatalogScan(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 30: + case 31: this.enableCatalogJanitor(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 31: + case 32: this.isCatalogJanitorEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 32: + case 33: this.execMasterService(controller, (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 33: + case 34: this.snapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 34: + case 35: this.getCompletedSnapshots(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 35: + case 36: this.deleteSnapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 36: + case 37: this.isSnapshotDone(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 37: + case 38: this.restoreSnapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 38: + case 39: this.execProcedure(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 39: + case 40: this.execProcedureWithRet(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 40: + case 41: this.isProcedureDone(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 41: + case 42: this.modifyNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 42: + case 43: this.createNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 43: + case 44: this.deleteNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 44: + case 45: this.getNamespaceDescriptor(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 45: + case 46: this.listNamespaceDescriptors(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 46: + case 47: this.listTableDescriptorsByNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 47: + case 48: this.listTableNamesByNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 48: + case 49: this.getTableState(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 49: + case 50: this.setQuota(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 50: + case 51: this.getLastMajorCompactionTimestamp(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 51: + case 52: this.getLastMajorCompactionTimestampForRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 52: + case 53: this.getProcedureResult(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 53: + case 54: this.getSecurityCapabilities(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 54: + case 55: this.abortProcedure(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 55: + case 56: this.listProcedures(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); @@ -62389,74 +63214,76 @@ public final class MasterProtos { case 20: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest.getDefaultInstance(); case 21: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest.getDefaultInstance(); case 22: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest.getDefaultInstance(); case 23: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest.getDefaultInstance(); case 24: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest.getDefaultInstance(); case 25: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest.getDefaultInstance(); case 26: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest.getDefaultInstance(); case 27: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest.getDefaultInstance(); case 28: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest.getDefaultInstance(); case 29: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest.getDefaultInstance(); case 30: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest.getDefaultInstance(); case 31: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest.getDefaultInstance(); case 32: - return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest.getDefaultInstance(); case 33: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest.getDefaultInstance(); case 34: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest.getDefaultInstance(); case 35: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest.getDefaultInstance(); case 36: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest.getDefaultInstance(); case 37: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest.getDefaultInstance(); case 38: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest.getDefaultInstance(); case 39: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance(); case 40: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance(); case 41: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest.getDefaultInstance(); case 42: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest.getDefaultInstance(); case 43: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest.getDefaultInstance(); case 44: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest.getDefaultInstance(); case 45: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest.getDefaultInstance(); case 46: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest.getDefaultInstance(); case 47: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest.getDefaultInstance(); case 48: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest.getDefaultInstance(); case 49: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance(); case 50: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.getDefaultInstance(); case 51: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.getDefaultInstance(); case 52: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.getDefaultInstance(); case 53: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.getDefaultInstance(); case 54: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest.getDefaultInstance(); case 55: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.getDefaultInstance(); + case 56: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); @@ -62515,74 +63342,76 @@ public final class MasterProtos { case 20: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse.getDefaultInstance(); case 21: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse.getDefaultInstance(); case 22: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse.getDefaultInstance(); case 23: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse.getDefaultInstance(); case 24: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse.getDefaultInstance(); case 25: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledResponse.getDefaultInstance(); case 26: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse.getDefaultInstance(); case 27: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeResponse.getDefaultInstance(); case 28: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningResponse.getDefaultInstance(); case 29: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse.getDefaultInstance(); case 30: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse.getDefaultInstance(); case 31: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse.getDefaultInstance(); case 32: - return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance(); case 33: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse.getDefaultInstance(); case 34: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse.getDefaultInstance(); case 35: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse.getDefaultInstance(); case 36: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse.getDefaultInstance(); case 37: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse.getDefaultInstance(); case 38: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse.getDefaultInstance(); case 39: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance(); case 40: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance(); case 41: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefaultInstance(); case 42: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse.getDefaultInstance(); case 43: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse.getDefaultInstance(); case 44: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse.getDefaultInstance(); case 45: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse.getDefaultInstance(); case 46: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse.getDefaultInstance(); case 47: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse.getDefaultInstance(); case 48: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance(); case 49: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance(); case 50: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance(); case 51: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(); case 52: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(); case 53: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance(); case 54: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance(); + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance(); case 55: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance(); + case 56: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); @@ -62920,12 +63749,27 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse.getDefaultInstance())); } + public void isMasterInMaintenanceMode( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(21), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse.getDefaultInstance())); + } + public void balance( com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(21), + getDescriptor().getMethods().get(22), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse.getDefaultInstance(), @@ -62940,7 +63784,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(22), + getDescriptor().getMethods().get(23), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse.getDefaultInstance(), @@ -62955,7 +63799,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(23), + getDescriptor().getMethods().get(24), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse.getDefaultInstance(), @@ -62970,7 +63814,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(24), + getDescriptor().getMethods().get(25), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledResponse.getDefaultInstance(), @@ -62985,7 +63829,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(25), + getDescriptor().getMethods().get(26), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse.getDefaultInstance(), @@ -63000,7 +63844,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(26), + getDescriptor().getMethods().get(27), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeResponse.getDefaultInstance(), @@ -63015,7 +63859,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(27), + getDescriptor().getMethods().get(28), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningResponse.getDefaultInstance(), @@ -63030,7 +63874,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(28), + getDescriptor().getMethods().get(29), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse.getDefaultInstance(), @@ -63045,7 +63889,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(29), + getDescriptor().getMethods().get(30), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse.getDefaultInstance(), @@ -63060,7 +63904,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(30), + getDescriptor().getMethods().get(31), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse.getDefaultInstance(), @@ -63075,7 +63919,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(31), + getDescriptor().getMethods().get(32), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance(), @@ -63090,7 +63934,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(32), + getDescriptor().getMethods().get(33), controller, request, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse.getDefaultInstance(), @@ -63105,7 +63949,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(33), + getDescriptor().getMethods().get(34), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse.getDefaultInstance(), @@ -63120,7 +63964,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(34), + getDescriptor().getMethods().get(35), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse.getDefaultInstance(), @@ -63135,7 +63979,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(35), + getDescriptor().getMethods().get(36), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse.getDefaultInstance(), @@ -63150,7 +63994,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(36), + getDescriptor().getMethods().get(37), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse.getDefaultInstance(), @@ -63165,7 +64009,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(37), + getDescriptor().getMethods().get(38), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse.getDefaultInstance(), @@ -63180,7 +64024,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(38), + getDescriptor().getMethods().get(39), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance(), @@ -63195,7 +64039,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(39), + getDescriptor().getMethods().get(40), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance(), @@ -63210,7 +64054,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(40), + getDescriptor().getMethods().get(41), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefaultInstance(), @@ -63225,7 +64069,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(41), + getDescriptor().getMethods().get(42), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse.getDefaultInstance(), @@ -63240,7 +64084,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(42), + getDescriptor().getMethods().get(43), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse.getDefaultInstance(), @@ -63255,7 +64099,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(43), + getDescriptor().getMethods().get(44), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse.getDefaultInstance(), @@ -63270,7 +64114,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(44), + getDescriptor().getMethods().get(45), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse.getDefaultInstance(), @@ -63285,7 +64129,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(45), + getDescriptor().getMethods().get(46), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse.getDefaultInstance(), @@ -63300,7 +64144,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(46), + getDescriptor().getMethods().get(47), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse.getDefaultInstance(), @@ -63315,7 +64159,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(47), + getDescriptor().getMethods().get(48), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance(), @@ -63330,7 +64174,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(48), + getDescriptor().getMethods().get(49), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance(), @@ -63345,7 +64189,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(49), + getDescriptor().getMethods().get(50), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance(), @@ -63360,7 +64204,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(50), + getDescriptor().getMethods().get(51), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(), @@ -63375,7 +64219,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(51), + getDescriptor().getMethods().get(52), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(), @@ -63390,7 +64234,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(52), + getDescriptor().getMethods().get(53), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance(), @@ -63405,7 +64249,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(53), + getDescriptor().getMethods().get(54), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance(), @@ -63420,7 +64264,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(54), + getDescriptor().getMethods().get(55), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance(), @@ -63435,7 +64279,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(55), + getDescriptor().getMethods().get(56), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance(), @@ -63557,6 +64401,11 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest request) throws com.google.protobuf.ServiceException; + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse isMasterInMaintenanceMode( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest request) + throws com.google.protobuf.ServiceException; + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse balance( com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest request) @@ -63992,12 +64841,24 @@ public final class MasterProtos { } + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse isMasterInMaintenanceMode( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(21), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse.getDefaultInstance()); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse balance( com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(21), + getDescriptor().getMethods().get(22), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse.getDefaultInstance()); @@ -64009,7 +64870,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(22), + getDescriptor().getMethods().get(23), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse.getDefaultInstance()); @@ -64021,7 +64882,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(23), + getDescriptor().getMethods().get(24), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsBalancerEnabledResponse.getDefaultInstance()); @@ -64033,7 +64894,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(24), + getDescriptor().getMethods().get(25), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledResponse.getDefaultInstance()); @@ -64045,7 +64906,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(25), + getDescriptor().getMethods().get(26), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse.getDefaultInstance()); @@ -64057,7 +64918,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(26), + getDescriptor().getMethods().get(27), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeResponse.getDefaultInstance()); @@ -64069,7 +64930,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(27), + getDescriptor().getMethods().get(28), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningResponse.getDefaultInstance()); @@ -64081,7 +64942,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(28), + getDescriptor().getMethods().get(29), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse.getDefaultInstance()); @@ -64093,7 +64954,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(29), + getDescriptor().getMethods().get(30), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse.getDefaultInstance()); @@ -64105,7 +64966,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(30), + getDescriptor().getMethods().get(31), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse.getDefaultInstance()); @@ -64117,7 +64978,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(31), + getDescriptor().getMethods().get(32), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance()); @@ -64129,7 +64990,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(32), + getDescriptor().getMethods().get(33), controller, request, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse.getDefaultInstance()); @@ -64141,7 +65002,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(33), + getDescriptor().getMethods().get(34), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse.getDefaultInstance()); @@ -64153,7 +65014,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(34), + getDescriptor().getMethods().get(35), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse.getDefaultInstance()); @@ -64165,7 +65026,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(35), + getDescriptor().getMethods().get(36), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse.getDefaultInstance()); @@ -64177,7 +65038,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(36), + getDescriptor().getMethods().get(37), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse.getDefaultInstance()); @@ -64189,7 +65050,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(37), + getDescriptor().getMethods().get(38), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse.getDefaultInstance()); @@ -64201,7 +65062,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(38), + getDescriptor().getMethods().get(39), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance()); @@ -64213,7 +65074,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(39), + getDescriptor().getMethods().get(40), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance()); @@ -64225,7 +65086,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(40), + getDescriptor().getMethods().get(41), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefaultInstance()); @@ -64237,7 +65098,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(41), + getDescriptor().getMethods().get(42), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse.getDefaultInstance()); @@ -64249,7 +65110,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(42), + getDescriptor().getMethods().get(43), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse.getDefaultInstance()); @@ -64261,7 +65122,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(43), + getDescriptor().getMethods().get(44), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse.getDefaultInstance()); @@ -64273,7 +65134,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(44), + getDescriptor().getMethods().get(45), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse.getDefaultInstance()); @@ -64285,7 +65146,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(45), + getDescriptor().getMethods().get(46), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse.getDefaultInstance()); @@ -64297,7 +65158,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(46), + getDescriptor().getMethods().get(47), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse.getDefaultInstance()); @@ -64309,7 +65170,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(47), + getDescriptor().getMethods().get(48), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance()); @@ -64321,7 +65182,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(48), + getDescriptor().getMethods().get(49), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance()); @@ -64333,7 +65194,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(49), + getDescriptor().getMethods().get(50), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance()); @@ -64345,7 +65206,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(50), + getDescriptor().getMethods().get(51), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance()); @@ -64357,7 +65218,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(51), + getDescriptor().getMethods().get(52), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance()); @@ -64369,7 +65230,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(52), + getDescriptor().getMethods().get(53), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance()); @@ -64381,7 +65242,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(53), + getDescriptor().getMethods().get(54), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance()); @@ -64393,7 +65254,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(54), + getDescriptor().getMethods().get(55), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance()); @@ -64405,7 +65266,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(55), + getDescriptor().getMethods().get(56), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance()); @@ -64646,6 +65507,16 @@ public final class MasterProtos { private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hbase_pb_StopMasterResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_IsInMaintenanceModeRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_IsInMaintenanceModeRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_hbase_pb_IsInMaintenanceModeResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_hbase_pb_IsInMaintenanceModeResponse_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hbase_pb_BalanceRequest_descriptor; private static @@ -65056,245 +65927,250 @@ public final class MasterProtos { "tTableNamesByNamespaceResponse\022&\n\ttableN" + "ame\030\001 \003(\0132\023.hbase.pb.TableName\"\021\n\017Shutdo" + "wnRequest\"\022\n\020ShutdownResponse\"\023\n\021StopMas" + - "terRequest\"\024\n\022StopMasterResponse\"\037\n\016Bala" + - "nceRequest\022\r\n\005force\030\001 \001(\010\"\'\n\017BalanceResp" + - "onse\022\024\n\014balancer_ran\030\001 \002(\010\"<\n\031SetBalance" + - "rRunningRequest\022\n\n\002on\030\001 \002(\010\022\023\n\013synchrono", - "us\030\002 \001(\010\"8\n\032SetBalancerRunningResponse\022\032" + - "\n\022prev_balance_value\030\001 \001(\010\"\032\n\030IsBalancer" + - "EnabledRequest\",\n\031IsBalancerEnabledRespo" + - "nse\022\017\n\007enabled\030\001 \002(\010\"w\n\035SetSplitOrMergeE" + - "nabledRequest\022\017\n\007enabled\030\001 \002(\010\022\023\n\013synchr" + - "onous\030\002 \001(\010\0220\n\014switch_types\030\003 \003(\0162\032.hbas" + - "e.pb.MasterSwitchType\"4\n\036SetSplitOrMerge" + - "EnabledResponse\022\022\n\nprev_value\030\001 \003(\010\"O\n\034I" + - "sSplitOrMergeEnabledRequest\022/\n\013switch_ty" + - "pe\030\001 \002(\0162\032.hbase.pb.MasterSwitchType\"0\n\035", - "IsSplitOrMergeEnabledResponse\022\017\n\007enabled" + - "\030\001 \002(\010\"\022\n\020NormalizeRequest\"+\n\021NormalizeR" + - "esponse\022\026\n\016normalizer_ran\030\001 \002(\010\")\n\033SetNo" + - "rmalizerRunningRequest\022\n\n\002on\030\001 \002(\010\"=\n\034Se" + - "tNormalizerRunningResponse\022\035\n\025prev_norma" + - "lizer_value\030\001 \001(\010\"\034\n\032IsNormalizerEnabled" + - "Request\".\n\033IsNormalizerEnabledResponse\022\017" + - "\n\007enabled\030\001 \002(\010\"\027\n\025RunCatalogScanRequest" + - "\"-\n\026RunCatalogScanResponse\022\023\n\013scan_resul" + - "t\030\001 \001(\005\"-\n\033EnableCatalogJanitorRequest\022\016", - "\n\006enable\030\001 \002(\010\"2\n\034EnableCatalogJanitorRe" + - "sponse\022\022\n\nprev_value\030\001 \001(\010\" \n\036IsCatalogJ" + - "anitorEnabledRequest\"0\n\037IsCatalogJanitor" + - "EnabledResponse\022\r\n\005value\030\001 \002(\010\"B\n\017Snapsh" + - "otRequest\022/\n\010snapshot\030\001 \002(\0132\035.hbase.pb.S" + - "napshotDescription\",\n\020SnapshotResponse\022\030" + - "\n\020expected_timeout\030\001 \002(\003\"\036\n\034GetCompleted" + - "SnapshotsRequest\"Q\n\035GetCompletedSnapshot" + - "sResponse\0220\n\tsnapshots\030\001 \003(\0132\035.hbase.pb." + - "SnapshotDescription\"H\n\025DeleteSnapshotReq", - "uest\022/\n\010snapshot\030\001 \002(\0132\035.hbase.pb.Snapsh" + - "otDescription\"\030\n\026DeleteSnapshotResponse\"" + - "s\n\026RestoreSnapshotRequest\022/\n\010snapshot\030\001 " + - "\002(\0132\035.hbase.pb.SnapshotDescription\022\026\n\013no" + - "nce_group\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"*\n\027" + - "RestoreSnapshotResponse\022\017\n\007proc_id\030\001 \002(\004" + - "\"H\n\025IsSnapshotDoneRequest\022/\n\010snapshot\030\001 " + - "\001(\0132\035.hbase.pb.SnapshotDescription\"^\n\026Is" + - "SnapshotDoneResponse\022\023\n\004done\030\001 \001(\010:\005fals" + - "e\022/\n\010snapshot\030\002 \001(\0132\035.hbase.pb.SnapshotD", - "escription\"O\n\034IsRestoreSnapshotDoneReque" + - "st\022/\n\010snapshot\030\001 \001(\0132\035.hbase.pb.Snapshot" + - "Description\"4\n\035IsRestoreSnapshotDoneResp" + - "onse\022\023\n\004done\030\001 \001(\010:\005false\"F\n\033GetSchemaAl" + - "terStatusRequest\022\'\n\ntable_name\030\001 \002(\0132\023.h" + - "base.pb.TableName\"T\n\034GetSchemaAlterStatu" + - "sResponse\022\035\n\025yet_to_update_regions\030\001 \001(\r" + - "\022\025\n\rtotal_regions\030\002 \001(\r\"\213\001\n\032GetTableDesc" + - "riptorsRequest\022(\n\013table_names\030\001 \003(\0132\023.hb" + - "ase.pb.TableName\022\r\n\005regex\030\002 \001(\t\022!\n\022inclu", - "de_sys_tables\030\003 \001(\010:\005false\022\021\n\tnamespace\030" + - "\004 \001(\t\"J\n\033GetTableDescriptorsResponse\022+\n\014" + - "table_schema\030\001 \003(\0132\025.hbase.pb.TableSchem" + - "a\"[\n\024GetTableNamesRequest\022\r\n\005regex\030\001 \001(\t" + - "\022!\n\022include_sys_tables\030\002 \001(\010:\005false\022\021\n\tn" + - "amespace\030\003 \001(\t\"A\n\025GetTableNamesResponse\022" + - "(\n\013table_names\030\001 \003(\0132\023.hbase.pb.TableNam" + - "e\"?\n\024GetTableStateRequest\022\'\n\ntable_name\030" + - "\001 \002(\0132\023.hbase.pb.TableName\"B\n\025GetTableSt" + - "ateResponse\022)\n\013table_state\030\001 \002(\0132\024.hbase", - ".pb.TableState\"\031\n\027GetClusterStatusReques" + - "t\"K\n\030GetClusterStatusResponse\022/\n\016cluster" + - "_status\030\001 \002(\0132\027.hbase.pb.ClusterStatus\"\030" + - "\n\026IsMasterRunningRequest\"4\n\027IsMasterRunn" + - "ingResponse\022\031\n\021is_master_running\030\001 \002(\010\"I" + - "\n\024ExecProcedureRequest\0221\n\tprocedure\030\001 \002(" + - "\0132\036.hbase.pb.ProcedureDescription\"F\n\025Exe" + - "cProcedureResponse\022\030\n\020expected_timeout\030\001" + - " \001(\003\022\023\n\013return_data\030\002 \001(\014\"K\n\026IsProcedure" + - "DoneRequest\0221\n\tprocedure\030\001 \001(\0132\036.hbase.p", - "b.ProcedureDescription\"`\n\027IsProcedureDon" + - "eResponse\022\023\n\004done\030\001 \001(\010:\005false\0220\n\010snapsh" + - "ot\030\002 \001(\0132\036.hbase.pb.ProcedureDescription" + - "\",\n\031GetProcedureResultRequest\022\017\n\007proc_id" + - "\030\001 \002(\004\"\371\001\n\032GetProcedureResultResponse\0229\n" + - "\005state\030\001 \002(\0162*.hbase.pb.GetProcedureResu" + - "ltResponse.State\022\022\n\nstart_time\030\002 \001(\004\022\023\n\013" + - "last_update\030\003 \001(\004\022\016\n\006result\030\004 \001(\014\0224\n\texc" + - "eption\030\005 \001(\0132!.hbase.pb.ForeignException" + - "Message\"1\n\005State\022\r\n\tNOT_FOUND\020\000\022\013\n\007RUNNI", - "NG\020\001\022\014\n\010FINISHED\020\002\"M\n\025AbortProcedureRequ" + - "est\022\017\n\007proc_id\030\001 \002(\004\022#\n\025mayInterruptIfRu" + - "nning\030\002 \001(\010:\004true\"6\n\026AbortProcedureRespo" + - "nse\022\034\n\024is_procedure_aborted\030\001 \002(\010\"\027\n\025Lis" + - "tProceduresRequest\"@\n\026ListProceduresResp" + - "onse\022&\n\tprocedure\030\001 \003(\0132\023.hbase.pb.Proce" + - "dure\"\315\001\n\017SetQuotaRequest\022\021\n\tuser_name\030\001 " + - "\001(\t\022\022\n\nuser_group\030\002 \001(\t\022\021\n\tnamespace\030\003 \001" + - "(\t\022\'\n\ntable_name\030\004 \001(\0132\023.hbase.pb.TableN" + - "ame\022\022\n\nremove_all\030\005 \001(\010\022\026\n\016bypass_global", - "s\030\006 \001(\010\022+\n\010throttle\030\007 \001(\0132\031.hbase.pb.Thr" + - "ottleRequest\"\022\n\020SetQuotaResponse\"J\n\037Majo" + - "rCompactionTimestampRequest\022\'\n\ntable_nam" + - "e\030\001 \002(\0132\023.hbase.pb.TableName\"U\n(MajorCom" + - "pactionTimestampForRegionRequest\022)\n\006regi" + - "on\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\"@\n M" + - "ajorCompactionTimestampResponse\022\034\n\024compa" + - "ction_timestamp\030\001 \002(\003\"\035\n\033SecurityCapabil" + - "itiesRequest\"\354\001\n\034SecurityCapabilitiesRes" + - "ponse\022G\n\014capabilities\030\001 \003(\01621.hbase.pb.S", - "ecurityCapabilitiesResponse.Capability\"\202" + - "\001\n\nCapability\022\031\n\025SIMPLE_AUTHENTICATION\020\000" + - "\022\031\n\025SECURE_AUTHENTICATION\020\001\022\021\n\rAUTHORIZA" + - "TION\020\002\022\026\n\022CELL_AUTHORIZATION\020\003\022\023\n\017CELL_V" + - "ISIBILITY\020\004*(\n\020MasterSwitchType\022\t\n\005SPLIT" + - "\020\000\022\t\n\005MERGE\020\0012\351\'\n\rMasterService\022e\n\024GetSc" + - "hemaAlterStatus\022%.hbase.pb.GetSchemaAlte" + - "rStatusRequest\032&.hbase.pb.GetSchemaAlter" + - "StatusResponse\022b\n\023GetTableDescriptors\022$." + - "hbase.pb.GetTableDescriptorsRequest\032%.hb", - "ase.pb.GetTableDescriptorsResponse\022P\n\rGe" + - "tTableNames\022\036.hbase.pb.GetTableNamesRequ" + - "est\032\037.hbase.pb.GetTableNamesResponse\022Y\n\020" + - "GetClusterStatus\022!.hbase.pb.GetClusterSt" + - "atusRequest\032\".hbase.pb.GetClusterStatusR" + - "esponse\022V\n\017IsMasterRunning\022 .hbase.pb.Is" + - "MasterRunningRequest\032!.hbase.pb.IsMaster" + - "RunningResponse\022D\n\tAddColumn\022\032.hbase.pb." + - "AddColumnRequest\032\033.hbase.pb.AddColumnRes" + - "ponse\022M\n\014DeleteColumn\022\035.hbase.pb.DeleteC", - "olumnRequest\032\036.hbase.pb.DeleteColumnResp" + - "onse\022M\n\014ModifyColumn\022\035.hbase.pb.ModifyCo" + - "lumnRequest\032\036.hbase.pb.ModifyColumnRespo" + - "nse\022G\n\nMoveRegion\022\033.hbase.pb.MoveRegionR" + - "equest\032\034.hbase.pb.MoveRegionResponse\022k\n\026" + - "DispatchMergingRegions\022\'.hbase.pb.Dispat" + - "chMergingRegionsRequest\032(.hbase.pb.Dispa" + - "tchMergingRegionsResponse\022M\n\014AssignRegio" + - "n\022\035.hbase.pb.AssignRegionRequest\032\036.hbase" + - ".pb.AssignRegionResponse\022S\n\016UnassignRegi", - "on\022\037.hbase.pb.UnassignRegionRequest\032 .hb" + - "ase.pb.UnassignRegionResponse\022P\n\rOffline" + - "Region\022\036.hbase.pb.OfflineRegionRequest\032\037" + - ".hbase.pb.OfflineRegionResponse\022J\n\013Delet" + - "eTable\022\034.hbase.pb.DeleteTableRequest\032\035.h" + - "base.pb.DeleteTableResponse\022P\n\rtruncateT" + - "able\022\036.hbase.pb.TruncateTableRequest\032\037.h" + - "base.pb.TruncateTableResponse\022J\n\013EnableT" + - "able\022\034.hbase.pb.EnableTableRequest\032\035.hba" + - "se.pb.EnableTableResponse\022M\n\014DisableTabl", - "e\022\035.hbase.pb.DisableTableRequest\032\036.hbase" + - ".pb.DisableTableResponse\022J\n\013ModifyTable\022" + - "\034.hbase.pb.ModifyTableRequest\032\035.hbase.pb" + - ".ModifyTableResponse\022J\n\013CreateTable\022\034.hb" + - "ase.pb.CreateTableRequest\032\035.hbase.pb.Cre" + - "ateTableResponse\022A\n\010Shutdown\022\031.hbase.pb." + - "ShutdownRequest\032\032.hbase.pb.ShutdownRespo" + - "nse\022G\n\nStopMaster\022\033.hbase.pb.StopMasterR" + - "equest\032\034.hbase.pb.StopMasterResponse\022>\n\007" + - "Balance\022\030.hbase.pb.BalanceRequest\032\031.hbas", - "e.pb.BalanceResponse\022_\n\022SetBalancerRunni" + - "ng\022#.hbase.pb.SetBalancerRunningRequest\032" + - "$.hbase.pb.SetBalancerRunningResponse\022\\\n" + - "\021IsBalancerEnabled\022\".hbase.pb.IsBalancer" + - "EnabledRequest\032#.hbase.pb.IsBalancerEnab" + - "ledResponse\022k\n\026SetSplitOrMergeEnabled\022\'." + - "hbase.pb.SetSplitOrMergeEnabledRequest\032(" + - ".hbase.pb.SetSplitOrMergeEnabledResponse" + - "\022h\n\025IsSplitOrMergeEnabled\022&.hbase.pb.IsS" + - "plitOrMergeEnabledRequest\032\'.hbase.pb.IsS", - "plitOrMergeEnabledResponse\022D\n\tNormalize\022" + - "\032.hbase.pb.NormalizeRequest\032\033.hbase.pb.N" + - "ormalizeResponse\022e\n\024SetNormalizerRunning" + - "\022%.hbase.pb.SetNormalizerRunningRequest\032" + - "&.hbase.pb.SetNormalizerRunningResponse\022" + - "b\n\023IsNormalizerEnabled\022$.hbase.pb.IsNorm" + - "alizerEnabledRequest\032%.hbase.pb.IsNormal" + - "izerEnabledResponse\022S\n\016RunCatalogScan\022\037." + - "hbase.pb.RunCatalogScanRequest\032 .hbase.p" + - "b.RunCatalogScanResponse\022e\n\024EnableCatalo", - "gJanitor\022%.hbase.pb.EnableCatalogJanitor" + - "Request\032&.hbase.pb.EnableCatalogJanitorR" + - "esponse\022n\n\027IsCatalogJanitorEnabled\022(.hba" + - "se.pb.IsCatalogJanitorEnabledRequest\032).h" + - "base.pb.IsCatalogJanitorEnabledResponse\022" + - "^\n\021ExecMasterService\022#.hbase.pb.Coproces" + - "sorServiceRequest\032$.hbase.pb.Coprocessor" + - "ServiceResponse\022A\n\010Snapshot\022\031.hbase.pb.S" + - "napshotRequest\032\032.hbase.pb.SnapshotRespon" + - "se\022h\n\025GetCompletedSnapshots\022&.hbase.pb.G", - "etCompletedSnapshotsRequest\032\'.hbase.pb.G" + - "etCompletedSnapshotsResponse\022S\n\016DeleteSn" + - "apshot\022\037.hbase.pb.DeleteSnapshotRequest\032" + - " .hbase.pb.DeleteSnapshotResponse\022S\n\016IsS" + - "napshotDone\022\037.hbase.pb.IsSnapshotDoneReq" + - "uest\032 .hbase.pb.IsSnapshotDoneResponse\022V" + - "\n\017RestoreSnapshot\022 .hbase.pb.RestoreSnap" + - "shotRequest\032!.hbase.pb.RestoreSnapshotRe" + - "sponse\022P\n\rExecProcedure\022\036.hbase.pb.ExecP" + - "rocedureRequest\032\037.hbase.pb.ExecProcedure", - "Response\022W\n\024ExecProcedureWithRet\022\036.hbase" + - ".pb.ExecProcedureRequest\032\037.hbase.pb.Exec" + - "ProcedureResponse\022V\n\017IsProcedureDone\022 .h" + - "base.pb.IsProcedureDoneRequest\032!.hbase.p" + - "b.IsProcedureDoneResponse\022V\n\017ModifyNames" + - "pace\022 .hbase.pb.ModifyNamespaceRequest\032!" + - ".hbase.pb.ModifyNamespaceResponse\022V\n\017Cre" + - "ateNamespace\022 .hbase.pb.CreateNamespaceR" + - "equest\032!.hbase.pb.CreateNamespaceRespons" + - "e\022V\n\017DeleteNamespace\022 .hbase.pb.DeleteNa", - "mespaceRequest\032!.hbase.pb.DeleteNamespac" + - "eResponse\022k\n\026GetNamespaceDescriptor\022\'.hb" + - "ase.pb.GetNamespaceDescriptorRequest\032(.h" + - "base.pb.GetNamespaceDescriptorResponse\022q" + - "\n\030ListNamespaceDescriptors\022).hbase.pb.Li" + - "stNamespaceDescriptorsRequest\032*.hbase.pb" + - ".ListNamespaceDescriptorsResponse\022\206\001\n\037Li" + - "stTableDescriptorsByNamespace\0220.hbase.pb" + - ".ListTableDescriptorsByNamespaceRequest\032" + - "1.hbase.pb.ListTableDescriptorsByNamespa", - "ceResponse\022t\n\031ListTableNamesByNamespace\022" + - "*.hbase.pb.ListTableNamesByNamespaceRequ" + - "est\032+.hbase.pb.ListTableNamesByNamespace" + - "Response\022P\n\rGetTableState\022\036.hbase.pb.Get" + - "TableStateRequest\032\037.hbase.pb.GetTableSta" + - "teResponse\022A\n\010SetQuota\022\031.hbase.pb.SetQuo" + - "taRequest\032\032.hbase.pb.SetQuotaResponse\022x\n" + - "\037getLastMajorCompactionTimestamp\022).hbase" + - ".pb.MajorCompactionTimestampRequest\032*.hb" + - "ase.pb.MajorCompactionTimestampResponse\022", - "\212\001\n(getLastMajorCompactionTimestampForRe" + - "gion\0222.hbase.pb.MajorCompactionTimestamp" + - "ForRegionRequest\032*.hbase.pb.MajorCompact" + - "ionTimestampResponse\022_\n\022getProcedureResu" + - "lt\022#.hbase.pb.GetProcedureResultRequest\032" + - "$.hbase.pb.GetProcedureResultResponse\022h\n" + - "\027getSecurityCapabilities\022%.hbase.pb.Secu" + - "rityCapabilitiesRequest\032&.hbase.pb.Secur" + - "ityCapabilitiesResponse\022S\n\016AbortProcedur" + - "e\022\037.hbase.pb.AbortProcedureRequest\032 .hba", - "se.pb.AbortProcedureResponse\022S\n\016ListProc" + - "edures\022\037.hbase.pb.ListProceduresRequest\032" + - " .hbase.pb.ListProceduresResponseBB\n*org" + - ".apache.hadoop.hbase.protobuf.generatedB" + - "\014MasterProtosH\001\210\001\001\240\001\001" + "terRequest\"\024\n\022StopMasterResponse\"\034\n\032IsIn" + + "MaintenanceModeRequest\"8\n\033IsInMaintenanc" + + "eModeResponse\022\031\n\021inMaintenanceMode\030\001 \002(\010" + + "\"\037\n\016BalanceRequest\022\r\n\005force\030\001 \001(\010\"\'\n\017Bal", + "anceResponse\022\024\n\014balancer_ran\030\001 \002(\010\"<\n\031Se" + + "tBalancerRunningRequest\022\n\n\002on\030\001 \002(\010\022\023\n\013s" + + "ynchronous\030\002 \001(\010\"8\n\032SetBalancerRunningRe" + + "sponse\022\032\n\022prev_balance_value\030\001 \001(\010\"\032\n\030Is" + + "BalancerEnabledRequest\",\n\031IsBalancerEnab" + + "ledResponse\022\017\n\007enabled\030\001 \002(\010\"w\n\035SetSplit" + + "OrMergeEnabledRequest\022\017\n\007enabled\030\001 \002(\010\022\023" + + "\n\013synchronous\030\002 \001(\010\0220\n\014switch_types\030\003 \003(" + + "\0162\032.hbase.pb.MasterSwitchType\"4\n\036SetSpli" + + "tOrMergeEnabledResponse\022\022\n\nprev_value\030\001 ", + "\003(\010\"O\n\034IsSplitOrMergeEnabledRequest\022/\n\013s" + + "witch_type\030\001 \002(\0162\032.hbase.pb.MasterSwitch" + + "Type\"0\n\035IsSplitOrMergeEnabledResponse\022\017\n" + + "\007enabled\030\001 \002(\010\"\022\n\020NormalizeRequest\"+\n\021No" + + "rmalizeResponse\022\026\n\016normalizer_ran\030\001 \002(\010\"" + + ")\n\033SetNormalizerRunningRequest\022\n\n\002on\030\001 \002" + + "(\010\"=\n\034SetNormalizerRunningResponse\022\035\n\025pr" + + "ev_normalizer_value\030\001 \001(\010\"\034\n\032IsNormalize" + + "rEnabledRequest\".\n\033IsNormalizerEnabledRe" + + "sponse\022\017\n\007enabled\030\001 \002(\010\"\027\n\025RunCatalogSca", + "nRequest\"-\n\026RunCatalogScanResponse\022\023\n\013sc" + + "an_result\030\001 \001(\005\"-\n\033EnableCatalogJanitorR" + + "equest\022\016\n\006enable\030\001 \002(\010\"2\n\034EnableCatalogJ" + + "anitorResponse\022\022\n\nprev_value\030\001 \001(\010\" \n\036Is" + + "CatalogJanitorEnabledRequest\"0\n\037IsCatalo" + + "gJanitorEnabledResponse\022\r\n\005value\030\001 \002(\010\"B" + + "\n\017SnapshotRequest\022/\n\010snapshot\030\001 \002(\0132\035.hb" + + "ase.pb.SnapshotDescription\",\n\020SnapshotRe" + + "sponse\022\030\n\020expected_timeout\030\001 \002(\003\"\036\n\034GetC" + + "ompletedSnapshotsRequest\"Q\n\035GetCompleted", + "SnapshotsResponse\0220\n\tsnapshots\030\001 \003(\0132\035.h" + + "base.pb.SnapshotDescription\"H\n\025DeleteSna" + + "pshotRequest\022/\n\010snapshot\030\001 \002(\0132\035.hbase.p" + + "b.SnapshotDescription\"\030\n\026DeleteSnapshotR" + + "esponse\"s\n\026RestoreSnapshotRequest\022/\n\010sna" + + "pshot\030\001 \002(\0132\035.hbase.pb.SnapshotDescripti" + + "on\022\026\n\013nonce_group\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(" + + "\004:\0010\"*\n\027RestoreSnapshotResponse\022\017\n\007proc_" + + "id\030\001 \002(\004\"H\n\025IsSnapshotDoneRequest\022/\n\010sna" + + "pshot\030\001 \001(\0132\035.hbase.pb.SnapshotDescripti", + "on\"^\n\026IsSnapshotDoneResponse\022\023\n\004done\030\001 \001" + + "(\010:\005false\022/\n\010snapshot\030\002 \001(\0132\035.hbase.pb.S" + + "napshotDescription\"O\n\034IsRestoreSnapshotD" + + "oneRequest\022/\n\010snapshot\030\001 \001(\0132\035.hbase.pb." + + "SnapshotDescription\"4\n\035IsRestoreSnapshot" + + "DoneResponse\022\023\n\004done\030\001 \001(\010:\005false\"F\n\033Get" + + "SchemaAlterStatusRequest\022\'\n\ntable_name\030\001" + + " \002(\0132\023.hbase.pb.TableName\"T\n\034GetSchemaAl" + + "terStatusResponse\022\035\n\025yet_to_update_regio" + + "ns\030\001 \001(\r\022\025\n\rtotal_regions\030\002 \001(\r\"\213\001\n\032GetT", + "ableDescriptorsRequest\022(\n\013table_names\030\001 " + + "\003(\0132\023.hbase.pb.TableName\022\r\n\005regex\030\002 \001(\t\022" + + "!\n\022include_sys_tables\030\003 \001(\010:\005false\022\021\n\tna" + + "mespace\030\004 \001(\t\"J\n\033GetTableDescriptorsResp" + + "onse\022+\n\014table_schema\030\001 \003(\0132\025.hbase.pb.Ta" + + "bleSchema\"[\n\024GetTableNamesRequest\022\r\n\005reg" + + "ex\030\001 \001(\t\022!\n\022include_sys_tables\030\002 \001(\010:\005fa" + + "lse\022\021\n\tnamespace\030\003 \001(\t\"A\n\025GetTableNamesR" + + "esponse\022(\n\013table_names\030\001 \003(\0132\023.hbase.pb." + + "TableName\"?\n\024GetTableStateRequest\022\'\n\ntab", + "le_name\030\001 \002(\0132\023.hbase.pb.TableName\"B\n\025Ge" + + "tTableStateResponse\022)\n\013table_state\030\001 \002(\013" + + "2\024.hbase.pb.TableState\"\031\n\027GetClusterStat" + + "usRequest\"K\n\030GetClusterStatusResponse\022/\n" + + "\016cluster_status\030\001 \002(\0132\027.hbase.pb.Cluster" + + "Status\"\030\n\026IsMasterRunningRequest\"4\n\027IsMa" + + "sterRunningResponse\022\031\n\021is_master_running" + + "\030\001 \002(\010\"I\n\024ExecProcedureRequest\0221\n\tproced" + + "ure\030\001 \002(\0132\036.hbase.pb.ProcedureDescriptio" + + "n\"F\n\025ExecProcedureResponse\022\030\n\020expected_t", + "imeout\030\001 \001(\003\022\023\n\013return_data\030\002 \001(\014\"K\n\026IsP" + + "rocedureDoneRequest\0221\n\tprocedure\030\001 \001(\0132\036" + + ".hbase.pb.ProcedureDescription\"`\n\027IsProc" + + "edureDoneResponse\022\023\n\004done\030\001 \001(\010:\005false\0220" + + "\n\010snapshot\030\002 \001(\0132\036.hbase.pb.ProcedureDes" + + "cription\",\n\031GetProcedureResultRequest\022\017\n" + + "\007proc_id\030\001 \002(\004\"\371\001\n\032GetProcedureResultRes" + + "ponse\0229\n\005state\030\001 \002(\0162*.hbase.pb.GetProce" + + "dureResultResponse.State\022\022\n\nstart_time\030\002" + + " \001(\004\022\023\n\013last_update\030\003 \001(\004\022\016\n\006result\030\004 \001(", + "\014\0224\n\texception\030\005 \001(\0132!.hbase.pb.ForeignE" + + "xceptionMessage\"1\n\005State\022\r\n\tNOT_FOUND\020\000\022" + + "\013\n\007RUNNING\020\001\022\014\n\010FINISHED\020\002\"M\n\025AbortProce" + + "dureRequest\022\017\n\007proc_id\030\001 \002(\004\022#\n\025mayInter" + + "ruptIfRunning\030\002 \001(\010:\004true\"6\n\026AbortProced" + + "ureResponse\022\034\n\024is_procedure_aborted\030\001 \002(" + + "\010\"\027\n\025ListProceduresRequest\"@\n\026ListProced" + + "uresResponse\022&\n\tprocedure\030\001 \003(\0132\023.hbase." + + "pb.Procedure\"\315\001\n\017SetQuotaRequest\022\021\n\tuser" + + "_name\030\001 \001(\t\022\022\n\nuser_group\030\002 \001(\t\022\021\n\tnames", + "pace\030\003 \001(\t\022\'\n\ntable_name\030\004 \001(\0132\023.hbase.p" + + "b.TableName\022\022\n\nremove_all\030\005 \001(\010\022\026\n\016bypas" + + "s_globals\030\006 \001(\010\022+\n\010throttle\030\007 \001(\0132\031.hbas" + + "e.pb.ThrottleRequest\"\022\n\020SetQuotaResponse" + + "\"J\n\037MajorCompactionTimestampRequest\022\'\n\nt" + + "able_name\030\001 \002(\0132\023.hbase.pb.TableName\"U\n(" + + "MajorCompactionTimestampForRegionRequest" + + "\022)\n\006region\030\001 \002(\0132\031.hbase.pb.RegionSpecif" + + "ier\"@\n MajorCompactionTimestampResponse\022" + + "\034\n\024compaction_timestamp\030\001 \002(\003\"\035\n\033Securit", + "yCapabilitiesRequest\"\354\001\n\034SecurityCapabil" + + "itiesResponse\022G\n\014capabilities\030\001 \003(\01621.hb" + + "ase.pb.SecurityCapabilitiesResponse.Capa" + + "bility\"\202\001\n\nCapability\022\031\n\025SIMPLE_AUTHENTI" + + "CATION\020\000\022\031\n\025SECURE_AUTHENTICATION\020\001\022\021\n\rA" + + "UTHORIZATION\020\002\022\026\n\022CELL_AUTHORIZATION\020\003\022\023" + + "\n\017CELL_VISIBILITY\020\004*(\n\020MasterSwitchType\022" + + "\t\n\005SPLIT\020\000\022\t\n\005MERGE\020\0012\323(\n\rMasterService\022" + + "e\n\024GetSchemaAlterStatus\022%.hbase.pb.GetSc" + + "hemaAlterStatusRequest\032&.hbase.pb.GetSch", + "emaAlterStatusResponse\022b\n\023GetTableDescri" + + "ptors\022$.hbase.pb.GetTableDescriptorsRequ" + + "est\032%.hbase.pb.GetTableDescriptorsRespon" + + "se\022P\n\rGetTableNames\022\036.hbase.pb.GetTableN" + + "amesRequest\032\037.hbase.pb.GetTableNamesResp" + + "onse\022Y\n\020GetClusterStatus\022!.hbase.pb.GetC" + + "lusterStatusRequest\032\".hbase.pb.GetCluste" + + "rStatusResponse\022V\n\017IsMasterRunning\022 .hba" + + "se.pb.IsMasterRunningRequest\032!.hbase.pb." + + "IsMasterRunningResponse\022D\n\tAddColumn\022\032.h", + "base.pb.AddColumnRequest\032\033.hbase.pb.AddC" + + "olumnResponse\022M\n\014DeleteColumn\022\035.hbase.pb" + + ".DeleteColumnRequest\032\036.hbase.pb.DeleteCo" + + "lumnResponse\022M\n\014ModifyColumn\022\035.hbase.pb." + + "ModifyColumnRequest\032\036.hbase.pb.ModifyCol" + + "umnResponse\022G\n\nMoveRegion\022\033.hbase.pb.Mov" + + "eRegionRequest\032\034.hbase.pb.MoveRegionResp" + + "onse\022k\n\026DispatchMergingRegions\022\'.hbase.p" + + "b.DispatchMergingRegionsRequest\032(.hbase." + + "pb.DispatchMergingRegionsResponse\022M\n\014Ass", + "ignRegion\022\035.hbase.pb.AssignRegionRequest" + + "\032\036.hbase.pb.AssignRegionResponse\022S\n\016Unas" + + "signRegion\022\037.hbase.pb.UnassignRegionRequ" + + "est\032 .hbase.pb.UnassignRegionResponse\022P\n" + + "\rOfflineRegion\022\036.hbase.pb.OfflineRegionR" + + "equest\032\037.hbase.pb.OfflineRegionResponse\022" + + "J\n\013DeleteTable\022\034.hbase.pb.DeleteTableReq" + + "uest\032\035.hbase.pb.DeleteTableResponse\022P\n\rt" + + "runcateTable\022\036.hbase.pb.TruncateTableReq" + + "uest\032\037.hbase.pb.TruncateTableResponse\022J\n", + "\013EnableTable\022\034.hbase.pb.EnableTableReque" + + "st\032\035.hbase.pb.EnableTableResponse\022M\n\014Dis" + + "ableTable\022\035.hbase.pb.DisableTableRequest" + + "\032\036.hbase.pb.DisableTableResponse\022J\n\013Modi" + + "fyTable\022\034.hbase.pb.ModifyTableRequest\032\035." + + "hbase.pb.ModifyTableResponse\022J\n\013CreateTa" + + "ble\022\034.hbase.pb.CreateTableRequest\032\035.hbas" + + "e.pb.CreateTableResponse\022A\n\010Shutdown\022\031.h" + + "base.pb.ShutdownRequest\032\032.hbase.pb.Shutd" + + "ownResponse\022G\n\nStopMaster\022\033.hbase.pb.Sto", + "pMasterRequest\032\034.hbase.pb.StopMasterResp" + + "onse\022h\n\031IsMasterInMaintenanceMode\022$.hbas" + + "e.pb.IsInMaintenanceModeRequest\032%.hbase." + + "pb.IsInMaintenanceModeResponse\022>\n\007Balanc" + + "e\022\030.hbase.pb.BalanceRequest\032\031.hbase.pb.B" + + "alanceResponse\022_\n\022SetBalancerRunning\022#.h" + + "base.pb.SetBalancerRunningRequest\032$.hbas" + + "e.pb.SetBalancerRunningResponse\022\\\n\021IsBal" + + "ancerEnabled\022\".hbase.pb.IsBalancerEnable" + + "dRequest\032#.hbase.pb.IsBalancerEnabledRes", + "ponse\022k\n\026SetSplitOrMergeEnabled\022\'.hbase." + + "pb.SetSplitOrMergeEnabledRequest\032(.hbase" + + ".pb.SetSplitOrMergeEnabledResponse\022h\n\025Is" + + "SplitOrMergeEnabled\022&.hbase.pb.IsSplitOr" + + "MergeEnabledRequest\032\'.hbase.pb.IsSplitOr" + + "MergeEnabledResponse\022D\n\tNormalize\022\032.hbas" + + "e.pb.NormalizeRequest\032\033.hbase.pb.Normali" + + "zeResponse\022e\n\024SetNormalizerRunning\022%.hba" + + "se.pb.SetNormalizerRunningRequest\032&.hbas" + + "e.pb.SetNormalizerRunningResponse\022b\n\023IsN", + "ormalizerEnabled\022$.hbase.pb.IsNormalizer" + + "EnabledRequest\032%.hbase.pb.IsNormalizerEn" + + "abledResponse\022S\n\016RunCatalogScan\022\037.hbase." + + "pb.RunCatalogScanRequest\032 .hbase.pb.RunC" + + "atalogScanResponse\022e\n\024EnableCatalogJanit" + + "or\022%.hbase.pb.EnableCatalogJanitorReques" + + "t\032&.hbase.pb.EnableCatalogJanitorRespons" + + "e\022n\n\027IsCatalogJanitorEnabled\022(.hbase.pb." + + "IsCatalogJanitorEnabledRequest\032).hbase.p" + + "b.IsCatalogJanitorEnabledResponse\022^\n\021Exe", + "cMasterService\022#.hbase.pb.CoprocessorSer" + + "viceRequest\032$.hbase.pb.CoprocessorServic" + + "eResponse\022A\n\010Snapshot\022\031.hbase.pb.Snapsho" + + "tRequest\032\032.hbase.pb.SnapshotResponse\022h\n\025" + + "GetCompletedSnapshots\022&.hbase.pb.GetComp" + + "letedSnapshotsRequest\032\'.hbase.pb.GetComp" + + "letedSnapshotsResponse\022S\n\016DeleteSnapshot" + + "\022\037.hbase.pb.DeleteSnapshotRequest\032 .hbas" + + "e.pb.DeleteSnapshotResponse\022S\n\016IsSnapsho" + + "tDone\022\037.hbase.pb.IsSnapshotDoneRequest\032 ", + ".hbase.pb.IsSnapshotDoneResponse\022V\n\017Rest" + + "oreSnapshot\022 .hbase.pb.RestoreSnapshotRe" + + "quest\032!.hbase.pb.RestoreSnapshotResponse" + + "\022P\n\rExecProcedure\022\036.hbase.pb.ExecProcedu" + + "reRequest\032\037.hbase.pb.ExecProcedureRespon" + + "se\022W\n\024ExecProcedureWithRet\022\036.hbase.pb.Ex" + + "ecProcedureRequest\032\037.hbase.pb.ExecProced" + + "ureResponse\022V\n\017IsProcedureDone\022 .hbase.p" + + "b.IsProcedureDoneRequest\032!.hbase.pb.IsPr" + + "ocedureDoneResponse\022V\n\017ModifyNamespace\022 ", + ".hbase.pb.ModifyNamespaceRequest\032!.hbase" + + ".pb.ModifyNamespaceResponse\022V\n\017CreateNam" + + "espace\022 .hbase.pb.CreateNamespaceRequest" + + "\032!.hbase.pb.CreateNamespaceResponse\022V\n\017D" + + "eleteNamespace\022 .hbase.pb.DeleteNamespac" + + "eRequest\032!.hbase.pb.DeleteNamespaceRespo" + + "nse\022k\n\026GetNamespaceDescriptor\022\'.hbase.pb" + + ".GetNamespaceDescriptorRequest\032(.hbase.p" + + "b.GetNamespaceDescriptorResponse\022q\n\030List" + + "NamespaceDescriptors\022).hbase.pb.ListName", + "spaceDescriptorsRequest\032*.hbase.pb.ListN" + + "amespaceDescriptorsResponse\022\206\001\n\037ListTabl" + + "eDescriptorsByNamespace\0220.hbase.pb.ListT" + + "ableDescriptorsByNamespaceRequest\0321.hbas" + + "e.pb.ListTableDescriptorsByNamespaceResp" + + "onse\022t\n\031ListTableNamesByNamespace\022*.hbas" + + "e.pb.ListTableNamesByNamespaceRequest\032+." + + "hbase.pb.ListTableNamesByNamespaceRespon" + + "se\022P\n\rGetTableState\022\036.hbase.pb.GetTableS" + + "tateRequest\032\037.hbase.pb.GetTableStateResp", + "onse\022A\n\010SetQuota\022\031.hbase.pb.SetQuotaRequ" + + "est\032\032.hbase.pb.SetQuotaResponse\022x\n\037getLa" + + "stMajorCompactionTimestamp\022).hbase.pb.Ma" + + "jorCompactionTimestampRequest\032*.hbase.pb" + + ".MajorCompactionTimestampResponse\022\212\001\n(ge" + + "tLastMajorCompactionTimestampForRegion\0222" + + ".hbase.pb.MajorCompactionTimestampForReg" + + "ionRequest\032*.hbase.pb.MajorCompactionTim" + + "estampResponse\022_\n\022getProcedureResult\022#.h" + + "base.pb.GetProcedureResultRequest\032$.hbas", + "e.pb.GetProcedureResultResponse\022h\n\027getSe" + + "curityCapabilities\022%.hbase.pb.SecurityCa" + + "pabilitiesRequest\032&.hbase.pb.SecurityCap" + + "abilitiesResponse\022S\n\016AbortProcedure\022\037.hb" + + "ase.pb.AbortProcedureRequest\032 .hbase.pb." + + "AbortProcedureResponse\022S\n\016ListProcedures" + + "\022\037.hbase.pb.ListProceduresRequest\032 .hbas" + + "e.pb.ListProceduresResponseBB\n*org.apach" + + "e.hadoop.hbase.protobuf.generatedB\014Maste" + + "rProtosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -65577,380 +66453,392 @@ public final class MasterProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_StopMasterResponse_descriptor, new java.lang.String[] { }); - internal_static_hbase_pb_BalanceRequest_descriptor = + internal_static_hbase_pb_IsInMaintenanceModeRequest_descriptor = getDescriptor().getMessageTypes().get(46); + internal_static_hbase_pb_IsInMaintenanceModeRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_IsInMaintenanceModeRequest_descriptor, + new java.lang.String[] { }); + internal_static_hbase_pb_IsInMaintenanceModeResponse_descriptor = + getDescriptor().getMessageTypes().get(47); + internal_static_hbase_pb_IsInMaintenanceModeResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_hbase_pb_IsInMaintenanceModeResponse_descriptor, + new java.lang.String[] { "InMaintenanceMode", }); + internal_static_hbase_pb_BalanceRequest_descriptor = + getDescriptor().getMessageTypes().get(48); internal_static_hbase_pb_BalanceRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_BalanceRequest_descriptor, new java.lang.String[] { "Force", }); internal_static_hbase_pb_BalanceResponse_descriptor = - getDescriptor().getMessageTypes().get(47); + getDescriptor().getMessageTypes().get(49); internal_static_hbase_pb_BalanceResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_BalanceResponse_descriptor, new java.lang.String[] { "BalancerRan", }); internal_static_hbase_pb_SetBalancerRunningRequest_descriptor = - getDescriptor().getMessageTypes().get(48); + getDescriptor().getMessageTypes().get(50); internal_static_hbase_pb_SetBalancerRunningRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_SetBalancerRunningRequest_descriptor, new java.lang.String[] { "On", "Synchronous", }); internal_static_hbase_pb_SetBalancerRunningResponse_descriptor = - getDescriptor().getMessageTypes().get(49); + getDescriptor().getMessageTypes().get(51); internal_static_hbase_pb_SetBalancerRunningResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_SetBalancerRunningResponse_descriptor, new java.lang.String[] { "PrevBalanceValue", }); internal_static_hbase_pb_IsBalancerEnabledRequest_descriptor = - getDescriptor().getMessageTypes().get(50); + getDescriptor().getMessageTypes().get(52); internal_static_hbase_pb_IsBalancerEnabledRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_IsBalancerEnabledRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_IsBalancerEnabledResponse_descriptor = - getDescriptor().getMessageTypes().get(51); + getDescriptor().getMessageTypes().get(53); internal_static_hbase_pb_IsBalancerEnabledResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_IsBalancerEnabledResponse_descriptor, new java.lang.String[] { "Enabled", }); internal_static_hbase_pb_SetSplitOrMergeEnabledRequest_descriptor = - getDescriptor().getMessageTypes().get(52); + getDescriptor().getMessageTypes().get(54); internal_static_hbase_pb_SetSplitOrMergeEnabledRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_SetSplitOrMergeEnabledRequest_descriptor, new java.lang.String[] { "Enabled", "Synchronous", "SwitchTypes", }); internal_static_hbase_pb_SetSplitOrMergeEnabledResponse_descriptor = - getDescriptor().getMessageTypes().get(53); + getDescriptor().getMessageTypes().get(55); internal_static_hbase_pb_SetSplitOrMergeEnabledResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_SetSplitOrMergeEnabledResponse_descriptor, new java.lang.String[] { "PrevValue", }); internal_static_hbase_pb_IsSplitOrMergeEnabledRequest_descriptor = - getDescriptor().getMessageTypes().get(54); + getDescriptor().getMessageTypes().get(56); internal_static_hbase_pb_IsSplitOrMergeEnabledRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_IsSplitOrMergeEnabledRequest_descriptor, new java.lang.String[] { "SwitchType", }); internal_static_hbase_pb_IsSplitOrMergeEnabledResponse_descriptor = - getDescriptor().getMessageTypes().get(55); + getDescriptor().getMessageTypes().get(57); internal_static_hbase_pb_IsSplitOrMergeEnabledResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_IsSplitOrMergeEnabledResponse_descriptor, new java.lang.String[] { "Enabled", }); internal_static_hbase_pb_NormalizeRequest_descriptor = - getDescriptor().getMessageTypes().get(56); + getDescriptor().getMessageTypes().get(58); internal_static_hbase_pb_NormalizeRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_NormalizeRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_NormalizeResponse_descriptor = - getDescriptor().getMessageTypes().get(57); + getDescriptor().getMessageTypes().get(59); internal_static_hbase_pb_NormalizeResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_NormalizeResponse_descriptor, new java.lang.String[] { "NormalizerRan", }); internal_static_hbase_pb_SetNormalizerRunningRequest_descriptor = - getDescriptor().getMessageTypes().get(58); + getDescriptor().getMessageTypes().get(60); internal_static_hbase_pb_SetNormalizerRunningRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_SetNormalizerRunningRequest_descriptor, new java.lang.String[] { "On", }); internal_static_hbase_pb_SetNormalizerRunningResponse_descriptor = - getDescriptor().getMessageTypes().get(59); + getDescriptor().getMessageTypes().get(61); internal_static_hbase_pb_SetNormalizerRunningResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_SetNormalizerRunningResponse_descriptor, new java.lang.String[] { "PrevNormalizerValue", }); internal_static_hbase_pb_IsNormalizerEnabledRequest_descriptor = - getDescriptor().getMessageTypes().get(60); + getDescriptor().getMessageTypes().get(62); internal_static_hbase_pb_IsNormalizerEnabledRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_IsNormalizerEnabledRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_IsNormalizerEnabledResponse_descriptor = - getDescriptor().getMessageTypes().get(61); + getDescriptor().getMessageTypes().get(63); internal_static_hbase_pb_IsNormalizerEnabledResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_IsNormalizerEnabledResponse_descriptor, new java.lang.String[] { "Enabled", }); internal_static_hbase_pb_RunCatalogScanRequest_descriptor = - getDescriptor().getMessageTypes().get(62); + getDescriptor().getMessageTypes().get(64); internal_static_hbase_pb_RunCatalogScanRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_RunCatalogScanRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_RunCatalogScanResponse_descriptor = - getDescriptor().getMessageTypes().get(63); + getDescriptor().getMessageTypes().get(65); internal_static_hbase_pb_RunCatalogScanResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_RunCatalogScanResponse_descriptor, new java.lang.String[] { "ScanResult", }); internal_static_hbase_pb_EnableCatalogJanitorRequest_descriptor = - getDescriptor().getMessageTypes().get(64); + getDescriptor().getMessageTypes().get(66); internal_static_hbase_pb_EnableCatalogJanitorRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_EnableCatalogJanitorRequest_descriptor, new java.lang.String[] { "Enable", }); internal_static_hbase_pb_EnableCatalogJanitorResponse_descriptor = - getDescriptor().getMessageTypes().get(65); + getDescriptor().getMessageTypes().get(67); internal_static_hbase_pb_EnableCatalogJanitorResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_EnableCatalogJanitorResponse_descriptor, new java.lang.String[] { "PrevValue", }); internal_static_hbase_pb_IsCatalogJanitorEnabledRequest_descriptor = - getDescriptor().getMessageTypes().get(66); + getDescriptor().getMessageTypes().get(68); internal_static_hbase_pb_IsCatalogJanitorEnabledRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_IsCatalogJanitorEnabledRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_IsCatalogJanitorEnabledResponse_descriptor = - getDescriptor().getMessageTypes().get(67); + getDescriptor().getMessageTypes().get(69); internal_static_hbase_pb_IsCatalogJanitorEnabledResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_IsCatalogJanitorEnabledResponse_descriptor, new java.lang.String[] { "Value", }); internal_static_hbase_pb_SnapshotRequest_descriptor = - getDescriptor().getMessageTypes().get(68); + getDescriptor().getMessageTypes().get(70); internal_static_hbase_pb_SnapshotRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_SnapshotRequest_descriptor, new java.lang.String[] { "Snapshot", }); internal_static_hbase_pb_SnapshotResponse_descriptor = - getDescriptor().getMessageTypes().get(69); + getDescriptor().getMessageTypes().get(71); internal_static_hbase_pb_SnapshotResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_SnapshotResponse_descriptor, new java.lang.String[] { "ExpectedTimeout", }); internal_static_hbase_pb_GetCompletedSnapshotsRequest_descriptor = - getDescriptor().getMessageTypes().get(70); + getDescriptor().getMessageTypes().get(72); internal_static_hbase_pb_GetCompletedSnapshotsRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_GetCompletedSnapshotsRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_GetCompletedSnapshotsResponse_descriptor = - getDescriptor().getMessageTypes().get(71); + getDescriptor().getMessageTypes().get(73); internal_static_hbase_pb_GetCompletedSnapshotsResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_GetCompletedSnapshotsResponse_descriptor, new java.lang.String[] { "Snapshots", }); internal_static_hbase_pb_DeleteSnapshotRequest_descriptor = - getDescriptor().getMessageTypes().get(72); + getDescriptor().getMessageTypes().get(74); internal_static_hbase_pb_DeleteSnapshotRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_DeleteSnapshotRequest_descriptor, new java.lang.String[] { "Snapshot", }); internal_static_hbase_pb_DeleteSnapshotResponse_descriptor = - getDescriptor().getMessageTypes().get(73); + getDescriptor().getMessageTypes().get(75); internal_static_hbase_pb_DeleteSnapshotResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_DeleteSnapshotResponse_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_RestoreSnapshotRequest_descriptor = - getDescriptor().getMessageTypes().get(74); + getDescriptor().getMessageTypes().get(76); internal_static_hbase_pb_RestoreSnapshotRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_RestoreSnapshotRequest_descriptor, new java.lang.String[] { "Snapshot", "NonceGroup", "Nonce", }); internal_static_hbase_pb_RestoreSnapshotResponse_descriptor = - getDescriptor().getMessageTypes().get(75); + getDescriptor().getMessageTypes().get(77); internal_static_hbase_pb_RestoreSnapshotResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_RestoreSnapshotResponse_descriptor, new java.lang.String[] { "ProcId", }); internal_static_hbase_pb_IsSnapshotDoneRequest_descriptor = - getDescriptor().getMessageTypes().get(76); + getDescriptor().getMessageTypes().get(78); internal_static_hbase_pb_IsSnapshotDoneRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_IsSnapshotDoneRequest_descriptor, new java.lang.String[] { "Snapshot", }); internal_static_hbase_pb_IsSnapshotDoneResponse_descriptor = - getDescriptor().getMessageTypes().get(77); + getDescriptor().getMessageTypes().get(79); internal_static_hbase_pb_IsSnapshotDoneResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_IsSnapshotDoneResponse_descriptor, new java.lang.String[] { "Done", "Snapshot", }); internal_static_hbase_pb_IsRestoreSnapshotDoneRequest_descriptor = - getDescriptor().getMessageTypes().get(78); + getDescriptor().getMessageTypes().get(80); internal_static_hbase_pb_IsRestoreSnapshotDoneRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_IsRestoreSnapshotDoneRequest_descriptor, new java.lang.String[] { "Snapshot", }); internal_static_hbase_pb_IsRestoreSnapshotDoneResponse_descriptor = - getDescriptor().getMessageTypes().get(79); + getDescriptor().getMessageTypes().get(81); internal_static_hbase_pb_IsRestoreSnapshotDoneResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_IsRestoreSnapshotDoneResponse_descriptor, new java.lang.String[] { "Done", }); internal_static_hbase_pb_GetSchemaAlterStatusRequest_descriptor = - getDescriptor().getMessageTypes().get(80); + getDescriptor().getMessageTypes().get(82); internal_static_hbase_pb_GetSchemaAlterStatusRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_GetSchemaAlterStatusRequest_descriptor, new java.lang.String[] { "TableName", }); internal_static_hbase_pb_GetSchemaAlterStatusResponse_descriptor = - getDescriptor().getMessageTypes().get(81); + getDescriptor().getMessageTypes().get(83); internal_static_hbase_pb_GetSchemaAlterStatusResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_GetSchemaAlterStatusResponse_descriptor, new java.lang.String[] { "YetToUpdateRegions", "TotalRegions", }); internal_static_hbase_pb_GetTableDescriptorsRequest_descriptor = - getDescriptor().getMessageTypes().get(82); + getDescriptor().getMessageTypes().get(84); internal_static_hbase_pb_GetTableDescriptorsRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_GetTableDescriptorsRequest_descriptor, new java.lang.String[] { "TableNames", "Regex", "IncludeSysTables", "Namespace", }); internal_static_hbase_pb_GetTableDescriptorsResponse_descriptor = - getDescriptor().getMessageTypes().get(83); + getDescriptor().getMessageTypes().get(85); internal_static_hbase_pb_GetTableDescriptorsResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_GetTableDescriptorsResponse_descriptor, new java.lang.String[] { "TableSchema", }); internal_static_hbase_pb_GetTableNamesRequest_descriptor = - getDescriptor().getMessageTypes().get(84); + getDescriptor().getMessageTypes().get(86); internal_static_hbase_pb_GetTableNamesRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_GetTableNamesRequest_descriptor, new java.lang.String[] { "Regex", "IncludeSysTables", "Namespace", }); internal_static_hbase_pb_GetTableNamesResponse_descriptor = - getDescriptor().getMessageTypes().get(85); + getDescriptor().getMessageTypes().get(87); internal_static_hbase_pb_GetTableNamesResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_GetTableNamesResponse_descriptor, new java.lang.String[] { "TableNames", }); internal_static_hbase_pb_GetTableStateRequest_descriptor = - getDescriptor().getMessageTypes().get(86); + getDescriptor().getMessageTypes().get(88); internal_static_hbase_pb_GetTableStateRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_GetTableStateRequest_descriptor, new java.lang.String[] { "TableName", }); internal_static_hbase_pb_GetTableStateResponse_descriptor = - getDescriptor().getMessageTypes().get(87); + getDescriptor().getMessageTypes().get(89); internal_static_hbase_pb_GetTableStateResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_GetTableStateResponse_descriptor, new java.lang.String[] { "TableState", }); internal_static_hbase_pb_GetClusterStatusRequest_descriptor = - getDescriptor().getMessageTypes().get(88); + getDescriptor().getMessageTypes().get(90); internal_static_hbase_pb_GetClusterStatusRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_GetClusterStatusRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_GetClusterStatusResponse_descriptor = - getDescriptor().getMessageTypes().get(89); + getDescriptor().getMessageTypes().get(91); internal_static_hbase_pb_GetClusterStatusResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_GetClusterStatusResponse_descriptor, new java.lang.String[] { "ClusterStatus", }); internal_static_hbase_pb_IsMasterRunningRequest_descriptor = - getDescriptor().getMessageTypes().get(90); + getDescriptor().getMessageTypes().get(92); internal_static_hbase_pb_IsMasterRunningRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_IsMasterRunningRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_IsMasterRunningResponse_descriptor = - getDescriptor().getMessageTypes().get(91); + getDescriptor().getMessageTypes().get(93); internal_static_hbase_pb_IsMasterRunningResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_IsMasterRunningResponse_descriptor, new java.lang.String[] { "IsMasterRunning", }); internal_static_hbase_pb_ExecProcedureRequest_descriptor = - getDescriptor().getMessageTypes().get(92); + getDescriptor().getMessageTypes().get(94); internal_static_hbase_pb_ExecProcedureRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_ExecProcedureRequest_descriptor, new java.lang.String[] { "Procedure", }); internal_static_hbase_pb_ExecProcedureResponse_descriptor = - getDescriptor().getMessageTypes().get(93); + getDescriptor().getMessageTypes().get(95); internal_static_hbase_pb_ExecProcedureResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_ExecProcedureResponse_descriptor, new java.lang.String[] { "ExpectedTimeout", "ReturnData", }); internal_static_hbase_pb_IsProcedureDoneRequest_descriptor = - getDescriptor().getMessageTypes().get(94); + getDescriptor().getMessageTypes().get(96); internal_static_hbase_pb_IsProcedureDoneRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_IsProcedureDoneRequest_descriptor, new java.lang.String[] { "Procedure", }); internal_static_hbase_pb_IsProcedureDoneResponse_descriptor = - getDescriptor().getMessageTypes().get(95); + getDescriptor().getMessageTypes().get(97); internal_static_hbase_pb_IsProcedureDoneResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_IsProcedureDoneResponse_descriptor, new java.lang.String[] { "Done", "Snapshot", }); internal_static_hbase_pb_GetProcedureResultRequest_descriptor = - getDescriptor().getMessageTypes().get(96); + getDescriptor().getMessageTypes().get(98); internal_static_hbase_pb_GetProcedureResultRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_GetProcedureResultRequest_descriptor, new java.lang.String[] { "ProcId", }); internal_static_hbase_pb_GetProcedureResultResponse_descriptor = - getDescriptor().getMessageTypes().get(97); + getDescriptor().getMessageTypes().get(99); internal_static_hbase_pb_GetProcedureResultResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_GetProcedureResultResponse_descriptor, new java.lang.String[] { "State", "StartTime", "LastUpdate", "Result", "Exception", }); internal_static_hbase_pb_AbortProcedureRequest_descriptor = - getDescriptor().getMessageTypes().get(98); + getDescriptor().getMessageTypes().get(100); internal_static_hbase_pb_AbortProcedureRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_AbortProcedureRequest_descriptor, new java.lang.String[] { "ProcId", "MayInterruptIfRunning", }); internal_static_hbase_pb_AbortProcedureResponse_descriptor = - getDescriptor().getMessageTypes().get(99); + getDescriptor().getMessageTypes().get(101); internal_static_hbase_pb_AbortProcedureResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_AbortProcedureResponse_descriptor, new java.lang.String[] { "IsProcedureAborted", }); internal_static_hbase_pb_ListProceduresRequest_descriptor = - getDescriptor().getMessageTypes().get(100); + getDescriptor().getMessageTypes().get(102); internal_static_hbase_pb_ListProceduresRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_ListProceduresRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_ListProceduresResponse_descriptor = - getDescriptor().getMessageTypes().get(101); + getDescriptor().getMessageTypes().get(103); internal_static_hbase_pb_ListProceduresResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_ListProceduresResponse_descriptor, new java.lang.String[] { "Procedure", }); internal_static_hbase_pb_SetQuotaRequest_descriptor = - getDescriptor().getMessageTypes().get(102); + getDescriptor().getMessageTypes().get(104); internal_static_hbase_pb_SetQuotaRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_SetQuotaRequest_descriptor, new java.lang.String[] { "UserName", "UserGroup", "Namespace", "TableName", "RemoveAll", "BypassGlobals", "Throttle", }); internal_static_hbase_pb_SetQuotaResponse_descriptor = - getDescriptor().getMessageTypes().get(103); + getDescriptor().getMessageTypes().get(105); internal_static_hbase_pb_SetQuotaResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_SetQuotaResponse_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_MajorCompactionTimestampRequest_descriptor = - getDescriptor().getMessageTypes().get(104); + getDescriptor().getMessageTypes().get(106); internal_static_hbase_pb_MajorCompactionTimestampRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_MajorCompactionTimestampRequest_descriptor, new java.lang.String[] { "TableName", }); internal_static_hbase_pb_MajorCompactionTimestampForRegionRequest_descriptor = - getDescriptor().getMessageTypes().get(105); + getDescriptor().getMessageTypes().get(107); internal_static_hbase_pb_MajorCompactionTimestampForRegionRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_MajorCompactionTimestampForRegionRequest_descriptor, new java.lang.String[] { "Region", }); internal_static_hbase_pb_MajorCompactionTimestampResponse_descriptor = - getDescriptor().getMessageTypes().get(106); + getDescriptor().getMessageTypes().get(108); internal_static_hbase_pb_MajorCompactionTimestampResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_MajorCompactionTimestampResponse_descriptor, new java.lang.String[] { "CompactionTimestamp", }); internal_static_hbase_pb_SecurityCapabilitiesRequest_descriptor = - getDescriptor().getMessageTypes().get(107); + getDescriptor().getMessageTypes().get(109); internal_static_hbase_pb_SecurityCapabilitiesRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_SecurityCapabilitiesRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_SecurityCapabilitiesResponse_descriptor = - getDescriptor().getMessageTypes().get(108); + getDescriptor().getMessageTypes().get(110); internal_static_hbase_pb_SecurityCapabilitiesResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_SecurityCapabilitiesResponse_descriptor, diff --git a/hbase-protocol/src/main/protobuf/Master.proto b/hbase-protocol/src/main/protobuf/Master.proto index 2bf36b48d2c..f67b7c74ef8 100644 --- a/hbase-protocol/src/main/protobuf/Master.proto +++ b/hbase-protocol/src/main/protobuf/Master.proto @@ -258,6 +258,13 @@ message StopMasterRequest { message StopMasterResponse { } +message IsInMaintenanceModeRequest { +} + +message IsInMaintenanceModeResponse { + required bool inMaintenanceMode = 1; +} + message BalanceRequest { optional bool force = 1; } @@ -640,6 +647,12 @@ service MasterService { rpc StopMaster(StopMasterRequest) returns(StopMasterResponse); + /** + * Query whether the Master is in maintenance mode. + */ + rpc IsMasterInMaintenanceMode(IsInMaintenanceModeRequest) + returns(IsInMaintenanceModeResponse); + /** * Run the balancer. Will run the balancer and if regions to move, it will * go ahead and do the reassignments. Can NOT run for various reasons. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java index c93b307384e..476c796f887 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java @@ -109,6 +109,7 @@ public class CatalogJanitor extends ScheduledChore { try { AssignmentManager am = this.services.getAssignmentManager(); if (this.enabled.get() + && !this.services.isInMaintenanceMode() && am != null && am.isFailoverCleanupDone() && am.getRegionStates().getRegionsInTransition().size() == 0) { @@ -241,6 +242,11 @@ public class CatalogJanitor extends ScheduledChore { int mergeCleaned = 0; Map mergedRegions = scanTriple.getSecond(); for (Map.Entry e : mergedRegions.entrySet()) { + if (this.services.isInMaintenanceMode()) { + // Stop cleaning if the master is in maintenance mode + break; + } + PairOfSameType p = MetaTableAccessor.getMergeRegions(e.getValue()); HRegionInfo regionA = p.getFirst(); HRegionInfo regionB = p.getSecond(); @@ -266,6 +272,11 @@ public class CatalogJanitor extends ScheduledChore { // regions whose parents are still around HashSet parentNotCleaned = new HashSet(); for (Map.Entry e : splitParents.entrySet()) { + if (this.services.isInMaintenanceMode()) { + // Stop cleaning if the master is in maintenance mode + break; + } + if (!parentNotCleaned.contains(e.getKey().getEncodedName()) && cleanParent(e.getKey(), e.getValue())) { splitCleaned++; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 91963689722..5f5cc3887a2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -159,6 +159,8 @@ import org.apache.hadoop.hbase.util.ZKDataMigrator; import org.apache.hadoop.hbase.zookeeper.DrainingServerTracker; import org.apache.hadoop.hbase.zookeeper.LoadBalancerTracker; import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker; +import org.apache.hadoop.hbase.zookeeper.MasterMaintenanceModeTracker; +import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; import org.apache.hadoop.hbase.zookeeper.RegionNormalizerTracker; import org.apache.hadoop.hbase.zookeeper.RegionServerTracker; import org.apache.hadoop.hbase.zookeeper.SplitOrMergeTracker; @@ -261,6 +263,9 @@ public class HMaster extends HRegionServer implements MasterServices { // Tracker for region normalizer state private RegionNormalizerTracker regionNormalizerTracker; + //Tracker for master maintenance mode setting + private MasterMaintenanceModeTracker maintenanceModeTracker; + private ClusterSchemaService clusterSchemaService; // Metrics for the HMaster @@ -616,6 +621,9 @@ public class HMaster extends HRegionServer implements MasterServices { this.drainingServerTracker = new DrainingServerTracker(zooKeeper, this, this.serverManager); this.drainingServerTracker.start(); + this.maintenanceModeTracker = new MasterMaintenanceModeTracker(zooKeeper); + this.maintenanceModeTracker.start(); + // Set the cluster as up. If new RSs, they'll be waiting on this before // going ahead with their startup. boolean wasUp = this.clusterStatusTracker.isClusterUp(); @@ -1125,6 +1133,12 @@ public class HMaster extends HRegionServer implements MasterServices { LOG.debug("Master has not been initialized, don't run balancer."); return false; } + + if (isInMaintenanceMode()) { + LOG.info("Master is in maintenanceMode mode, don't run balancer."); + return false; + } + // Do this call outside of synchronized block. int maximumBalanceTime = getBalancerCutoffTime(); synchronized (this.balancer) { @@ -1228,6 +1242,11 @@ public class HMaster extends HRegionServer implements MasterServices { return false; } + if (isInMaintenanceMode()) { + LOG.info("Master is in maintenance mode, don't run region normalizer."); + return false; + } + if (!this.regionNormalizerTracker.isNormalizerOn()) { LOG.debug("Region normalization is disabled, don't run region normalizer."); return false; @@ -1241,6 +1260,11 @@ public class HMaster extends HRegionServer implements MasterServices { Collections.shuffle(allEnabledTables); for (TableName table : allEnabledTables) { + if (isInMaintenanceMode()) { + LOG.debug("Master is in maintenance mode, stop running region normalizer."); + return false; + } + HTableDescriptor tblDesc = getTableDescriptors().get(table); if (table.isSystemTable() || (tblDesc != null && !tblDesc.isNormalizationEnabled())) { @@ -2264,6 +2288,16 @@ public class HMaster extends HRegionServer implements MasterServices { return initialized.isReady(); } + /** + * Report whether this master is in maintenance mode. + * + * @return true if master is in maintenanceMode + */ + @Override + public boolean isInMaintenanceMode() { + return maintenanceModeTracker.isInMaintenanceMode(); + } + @VisibleForTesting public void setInitialized(boolean isInitialized) { procedureExecutor.getEnvironment().setEventReady(initialized, isInitialized); @@ -2735,7 +2769,9 @@ public class HMaster extends HRegionServer implements MasterServices { * @return The state of the load balancer, or false if the load balancer isn't defined. */ public boolean isBalancerOn() { - if (null == loadBalancerTracker) return false; + if (null == loadBalancerTracker || isInMaintenanceMode()) { + return false; + } return loadBalancerTracker.isBalancerOn(); } @@ -2744,10 +2780,10 @@ public class HMaster extends HRegionServer implements MasterServices { * false is returned. */ public boolean isNormalizerOn() { - return null == regionNormalizerTracker? false: regionNormalizerTracker.isNormalizerOn(); + return (null == regionNormalizerTracker || isInMaintenanceMode()) ? + false: regionNormalizerTracker.isNormalizerOn(); } - /** * Queries the state of the {@link SplitOrMergeTracker}. If it is not initialized, * false is returned. If switchType is illegal, false will return. @@ -2755,7 +2791,7 @@ public class HMaster extends HRegionServer implements MasterServices { * @return The state of the switch */ public boolean isSplitOrMergeEnabled(MasterSwitchType switchType) { - if (null == splitOrMergeTracker) { + if (null == splitOrMergeTracker || isInMaintenanceMode()) { return false; } return splitOrMergeTracker.isSplitOrMergeEnabled(switchType); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index 90af7c7935d..8974945ea96 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -1245,6 +1245,15 @@ public class MasterRpcServices extends RSRpcServices return StopMasterResponse.newBuilder().build(); } + @Override + public IsInMaintenanceModeResponse isMasterInMaintenanceMode( + final RpcController controller, + final IsInMaintenanceModeRequest request) throws ServiceException { + IsInMaintenanceModeResponse.Builder response = IsInMaintenanceModeResponse.newBuilder(); + response.setInMaintenanceMode(master.isInMaintenanceMode()); + return response.build(); + } + @Override public UnassignRegionResponse unassignRegion(RpcController controller, UnassignRegionRequest req) throws ServiceException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java index 9b91572afa8..cfb20230908 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java @@ -309,6 +309,11 @@ public interface MasterServices extends Server { */ boolean isInitialized(); + /** + * @return true if master is in maintanceMode + */ + boolean isInMaintenanceMode(); + /** * Abort a procedure. * @param procId ID of the procedure diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index c05973b1d4f..d483c71c4c0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -104,7 +104,6 @@ import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.MasterSwitchType; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.Result; @@ -209,6 +208,9 @@ public class HBaseFsck extends Configured implements Closeable { // AlreadyBeingCreatedException which is implies timeout on this operations up to // HdfsConstants.LEASE_SOFTLIMIT_PERIOD (60 seconds). private static final int DEFAULT_WAIT_FOR_LOCK_TIMEOUT = 80; // seconds + private static final int DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS = 5; + private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL = 200; // milliseconds + private static final int DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME = 5000; // milliseconds /********************** * Internal resources @@ -236,8 +238,6 @@ public class HBaseFsck extends Configured implements Closeable { private static boolean details = false; // do we display the full report private long timelag = DEFAULT_TIME_LAG; // tables whose modtime is older private static boolean forceExclusive = false; // only this hbck can modify HBase - private static boolean disableBalancer = false; // disable load balancer to keep regions stable - private static boolean disableSplitAndMerge = false; // disable split and merge private boolean fixAssignments = false; // fix assignment errors? private boolean fixMeta = false; // fix meta errors? private boolean checkHdfs = true; // load and check fs consistency? @@ -306,10 +306,13 @@ public class HBaseFsck extends Configured implements Closeable { private Map tableStates = new HashMap(); private final RetryCounterFactory lockFileRetryCounterFactory; + private final RetryCounterFactory createZNodeRetryCounterFactory; private Map> skippedRegions = new HashMap>(); - ZooKeeperWatcher zkw = null; + private ZooKeeperWatcher zkw = null; + private String hbckEphemeralNodePath = null; + private boolean hbckZodeCreated = false; /** * Constructor @@ -349,6 +352,14 @@ public class HBaseFsck extends Configured implements Closeable { "hbase.hbck.lockfile.attempt.sleep.interval", DEFAULT_LOCK_FILE_ATTEMPT_SLEEP_INTERVAL), getConf().getInt( "hbase.hbck.lockfile.attempt.maxsleeptime", DEFAULT_LOCK_FILE_ATTEMPT_MAX_SLEEP_TIME)); + createZNodeRetryCounterFactory = new RetryCounterFactory( + getConf().getInt("hbase.hbck.createznode.attempts", DEFAULT_MAX_CREATE_ZNODE_ATTEMPTS), + getConf().getInt( + "hbase.hbck.createznode.attempt.sleep.interval", + DEFAULT_CREATE_ZNODE_ATTEMPT_SLEEP_INTERVAL), + getConf().getInt( + "hbase.hbck.createznode.attempt.maxsleeptime", + DEFAULT_CREATE_ZNODE_ATTEMPT_MAX_SLEEP_TIME)); zkw = createZooKeeperWatcher(); } @@ -498,6 +509,7 @@ public class HBaseFsck extends Configured implements Closeable { @Override public void run() { IOUtils.closeQuietly(HBaseFsck.this); + cleanupHbckZnode(); unlockHbck(); } }); @@ -676,49 +688,78 @@ public class HBaseFsck extends Configured implements Closeable { return errors.getErrorList().size(); } + /** + * This method maintains an ephemeral znode. If the creation fails we return false or throw + * exception + * + * @return true if creating znode succeeds; false otherwise + * @throws IOException if IO failure occurs + */ + private boolean setMasterInMaintenanceMode() throws IOException { + RetryCounter retryCounter = createZNodeRetryCounterFactory.create(); + hbckEphemeralNodePath = ZKUtil.joinZNode( + ZooKeeperWatcher.masterMaintZNode, + "hbck-" + Long.toString(EnvironmentEdgeManager.currentTime())); + do { + try { + hbckZodeCreated = ZKUtil.createEphemeralNodeAndWatch(zkw, hbckEphemeralNodePath, null); + if (hbckZodeCreated) { + break; + } + } catch (KeeperException e) { + if (retryCounter.getAttemptTimes() >= retryCounter.getMaxAttempts()) { + throw new IOException("Can't create znode " + hbckEphemeralNodePath, e); + } + // fall through and retry + } + + LOG.warn("Fail to create znode " + hbckEphemeralNodePath + ", try=" + + (retryCounter.getAttemptTimes() + 1) + " of " + retryCounter.getMaxAttempts()); + + try { + retryCounter.sleepUntilNextRetry(); + } catch (InterruptedException ie) { + throw (InterruptedIOException) new InterruptedIOException( + "Can't create znode " + hbckEphemeralNodePath).initCause(ie); + } + } while (retryCounter.shouldRetry()); + return hbckZodeCreated; + } + + private void cleanupHbckZnode() { + try { + if (zkw != null && hbckZodeCreated) { + ZKUtil.deleteNode(zkw, hbckEphemeralNodePath); + hbckZodeCreated = false; + } + } catch (KeeperException e) { + // Ignore + if (!e.code().equals(KeeperException.Code.NONODE)) { + LOG.warn("Delete HBCK znode " + hbckEphemeralNodePath + " failed ", e); + } + } + } + /** * Contacts the master and prints out cluster-wide information * @return 0 on success, non-zero on failure */ - public int onlineHbck() throws IOException, KeeperException, InterruptedException, ServiceException { + public int onlineHbck() + throws IOException, KeeperException, InterruptedException, ServiceException { // print hbase server version errors.print("Version: " + status.getHBaseVersion()); offlineHdfsIntegrityRepair(); - boolean oldBalancer = false; - if (shouldDisableBalancer()) { - oldBalancer = admin.setBalancerRunning(false, true); - } - boolean[] oldSplitAndMerge = null; - if (shouldDisableSplitAndMerge()) { - oldSplitAndMerge = admin.setSplitOrMergeEnabled(false, false, - MasterSwitchType.SPLIT, MasterSwitchType.MERGE); + // If Master runs maintenance tasks (such as balancer, catalog janitor, etc) during online + // hbck, it is likely that hbck would be misled and report transient errors. Therefore, it + // is better to set Master into maintenance mode during online hbck. + // + if (!setMasterInMaintenanceMode()) { + LOG.warn("HBCK is running while master is not in maintenance mode, you might see transient " + + "error. Please run HBCK multiple times to reduce the chance of transient error."); } - try { - onlineConsistencyRepair(); - } - finally { - // Only restore the balancer if it was true when we started repairing and - // we actually disabled it. Otherwise, we might clobber another run of - // hbck that has just restored it. - if (shouldDisableBalancer() && oldBalancer) { - admin.setBalancerRunning(oldBalancer, false); - } - - if (shouldDisableSplitAndMerge()) { - if (oldSplitAndMerge != null) { - if (oldSplitAndMerge[0] && oldSplitAndMerge[1]) { - admin.setSplitOrMergeEnabled(true, false, - MasterSwitchType.SPLIT, MasterSwitchType.MERGE); - } else if (oldSplitAndMerge[0]) { - admin.setSplitOrMergeEnabled(true, false, MasterSwitchType.SPLIT); - } else if (oldSplitAndMerge[1]) { - admin.setSplitOrMergeEnabled(true, false, MasterSwitchType.MERGE); - } - } - } - } + onlineConsistencyRepair(); if (checkRegionBoundaries) { checkRegionBoundaries(); @@ -730,6 +771,9 @@ public class HBaseFsck extends Configured implements Closeable { checkAndFixReplication(); + // Remove the hbck znode + cleanupHbckZnode(); + // Remove the hbck lock unlockHbck(); @@ -750,6 +794,7 @@ public class HBaseFsck extends Configured implements Closeable { @Override public void close() throws IOException { try { + cleanupHbckZnode(); unlockHbck(); } catch (Exception io) { LOG.warn(io); @@ -4221,38 +4266,6 @@ public class HBaseFsck extends Configured implements Closeable { return fixAny || forceExclusive; } - /** - * Disable the load balancer. - */ - public static void setDisableBalancer() { - disableBalancer = true; - } - - /** - * Disable the split and merge - */ - public static void setDisableSplitAndMerge() { - disableSplitAndMerge = true; - } - - /** - * The balancer should be disabled if we are modifying HBase. - * It can be disabled if you want to prevent region movement from causing - * false positives. - */ - public boolean shouldDisableBalancer() { - return fixAny || disableBalancer; - } - - /** - * The split and merge should be disabled if we are modifying HBase. - * It can be disabled if you want to prevent region movement from causing - * false positives. - */ - public boolean shouldDisableSplitAndMerge() { - return fixAny || disableSplitAndMerge; - } - /** * Set summary mode. * Print only summary of the tables and status (OK or INCONSISTENT) @@ -4514,7 +4527,6 @@ public class HBaseFsck extends Configured implements Closeable { out.println(" -sidelineDir HDFS path to backup existing meta."); out.println(" -boundaries Verify that regions boundaries are the same between META and store files."); out.println(" -exclusive Abort if another hbck is exclusive or fixing."); - out.println(" -disableBalancer Disable the load balancer."); out.println(""); out.println(" Metadata Repair options: (expert features, use with caution!)"); @@ -4610,10 +4622,6 @@ public class HBaseFsck extends Configured implements Closeable { setDisplayFullReport(); } else if (cmd.equals("-exclusive")) { setForceExclusive(); - } else if (cmd.equals("-disableBalancer")) { - setDisableBalancer(); - } else if (cmd.equals("-disableSplitAndMerge")) { - setDisableSplitAndMerge(); } else if (cmd.equals("-timelag")) { if (i == args.length - 1) { errors.reportError(ERROR_CODE.WRONG_USAGE, "HBaseFsck: -timelag needs a value."); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterMaintenanceModeTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterMaintenanceModeTracker.java new file mode 100644 index 00000000000..fc0e05fb7b3 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterMaintenanceModeTracker.java @@ -0,0 +1,81 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.zookeeper; + +import java.util.List; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.zookeeper.KeeperException; + +/** + * Tracks the master Maintenance Mode via ZK. + */ +@InterfaceAudience.Private +public class MasterMaintenanceModeTracker extends ZooKeeperListener { + private boolean hasChildren; + + public MasterMaintenanceModeTracker(ZooKeeperWatcher watcher) { + super(watcher); + hasChildren = false; + } + + public boolean isInMaintenanceMode() { + return hasChildren; + } + + private void update(String path) { + if (path.startsWith(ZooKeeperWatcher.masterMaintZNode)) { + update(); + } + } + + private void update() { + try { + List children = + ZKUtil.listChildrenAndWatchForNewChildren(watcher, ZooKeeperWatcher.masterMaintZNode); + hasChildren = (children != null && children.size() > 0); + } catch (KeeperException e) { + // Ignore the ZK keeper exception + hasChildren = false; + } + } + + /** + * Starts the tracking of whether master is in Maintenance Mode. + */ + public void start() { + watcher.registerListener(this); + update(); + } + + @Override + public void nodeCreated(String path) { + update(path); + } + + @Override + public void nodeDeleted(String path) { + update(path); + } + + @Override + public void nodeChildrenChanged(String path) { + update(path); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/SplitOrMergeTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/SplitOrMergeTracker.java index 2dad9878a6e..83ab3502379 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/SplitOrMergeTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/SplitOrMergeTracker.java @@ -146,6 +146,4 @@ public class SplitOrMergeTracker { return builder.build(); } } - - } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java index c7a42d9d2d0..0a86ecb4981 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java @@ -306,6 +306,11 @@ public class MockNoopMasterServices implements MasterServices, Server { return false; } + @Override + public boolean isInMaintenanceMode() { + return false; + } + @Override public long getLastMajorCompactionTimestamp(TableName table) throws IOException { return 0; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java index 7f0f6dbcfa4..e03a0d5c496 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java @@ -38,7 +38,6 @@ import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.MasterSwitchType; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.Result; @@ -78,7 +77,6 @@ import java.util.LinkedList; import java.util.List; import java.util.HashMap; import java.util.Map; -import java.util.Set; import java.util.concurrent.Callable; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutorService;