diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index 34e0a8971f6..df79dcfe410 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -1729,15 +1729,10 @@ public interface Admin extends Abortable, Closeable { * * @param enabled enabled or not * @param synchronous If true, it waits until current split() call, if outstanding, to return. - * @param skipLock if false, we will do lock before change switch. - * with the lock, other requests to change the switch will be rejected! - * And when you set it to be false, - * you should call {@link #releaseSplitOrMergeLockAndRollback()} by yourself * @param switchTypes switchType list {@link MasterSwitchType} * @return Previous switch value array */ boolean[] setSplitOrMergeEnabled(final boolean enabled, final boolean synchronous, - final boolean skipLock, final MasterSwitchType... switchTypes) throws IOException; /** @@ -1746,12 +1741,4 @@ public interface Admin extends Abortable, Closeable { * @return true if the switch is enabled, false otherwise. */ boolean isSplitOrMergeEnabled(final MasterSwitchType switchType) throws IOException; - - /** - * You should call this method after you call - * {@link #setSplitOrMergeEnabled(boolean, boolean, boolean, MasterSwitchType...)} - * with skipLock be false, this method will release the lock created by above method - * and rollback the switch state to be original state before you change switch - * */ - void releaseSplitOrMergeLockAndRollback() throws IOException; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java index 1fe29c8386d..9b913c86fea 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java @@ -1663,13 +1663,6 @@ class ConnectionImplementation implements ClusterConnection, Closeable { return stub.isSplitOrMergeEnabled(controller, request); } - @Override - public MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse - releaseSplitOrMergeLockAndRollback(RpcController controller, - MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest request) throws ServiceException { - return stub.releaseSplitOrMergeLockAndRollback(controller, request); - } - @Override public IsNormalizerEnabledResponse isNormalizerEnabled(RpcController controller, IsNormalizerEnabledRequest request) throws ServiceException { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index ac98bdb618e..074fe7f1bf1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -3704,13 +3704,13 @@ public class HBaseAdmin implements Admin { @Override public boolean[] setSplitOrMergeEnabled(final boolean enabled, final boolean synchronous, - final boolean skipLock, final MasterSwitchType... switchTypes) throws IOException { + final MasterSwitchType... switchTypes) + throws IOException { return executeCallable(new MasterCallable(getConnection()) { @Override public boolean[] call(int callTimeout) throws ServiceException { MasterProtos.SetSplitOrMergeEnabledResponse response = master.setSplitOrMergeEnabled(null, - RequestConverter.buildSetSplitOrMergeEnabledRequest(enabled, synchronous, - skipLock, switchTypes)); + RequestConverter.buildSetSplitOrMergeEnabledRequest(enabled, synchronous, switchTypes)); boolean[] result = new boolean[switchTypes.length]; int i = 0; for (Boolean prevValue : response.getPrevValueList()) { @@ -3732,18 +3732,6 @@ public class HBaseAdmin implements Admin { }); } - @Override - public void releaseSplitOrMergeLockAndRollback() throws IOException { - executeCallable(new MasterCallable(getConnection()) { - @Override - public Void call(int callTimeout) throws ServiceException { - master.releaseSplitOrMergeLockAndRollback(null, - RequestConverter.buildReleaseSplitOrMergeLockAndRollbackRequest()); - return null; - } - }); - } - private HRegionInfo getMobRegionInfo(TableName tableName) { return new HRegionInfo(tableName, Bytes.toBytes(".mob"), HConstants.EMPTY_END_ROW, false, 0); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java index 5fe2016868d..b75d2b8cd03 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java @@ -103,7 +103,6 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableReques import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.OfflineRegionRequest; -import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest; @@ -1739,13 +1738,6 @@ public final class RequestConverter { return builder.build(); } - public static ReleaseSplitOrMergeLockAndRollbackRequest - buildReleaseSplitOrMergeLockAndRollbackRequest() { - ReleaseSplitOrMergeLockAndRollbackRequest.Builder builder = - ReleaseSplitOrMergeLockAndRollbackRequest.newBuilder(); - return builder.build(); - } - /** * Creates a protocol buffer SetSplitOrMergeEnabledRequest * @@ -1756,11 +1748,10 @@ public final class RequestConverter { * @return a SetSplitOrMergeEnabledRequest */ public static SetSplitOrMergeEnabledRequest buildSetSplitOrMergeEnabledRequest(boolean enabled, - boolean synchronous, boolean skipLock, MasterSwitchType... switchTypes) { + boolean synchronous, MasterSwitchType... switchTypes) { SetSplitOrMergeEnabledRequest.Builder builder = SetSplitOrMergeEnabledRequest.newBuilder(); builder.setEnabled(enabled); builder.setSynchronous(synchronous); - builder.setSkipLock(skipLock); for (MasterSwitchType switchType : switchTypes) { builder.addSwitchTypes(convert(switchType)); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java index ff3d1c70a5e..7cbfc98108d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java @@ -117,8 +117,6 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable { private String regionNormalizerZNode; // znode containing the state of all switches, currently there are split and merge child node. private String switchZNode; - // znode containing the lock for the switches - private String switchLockZNode; // znode containing the lock for the tables public String tableLockZNode; // znode containing the state of recovering regions @@ -438,7 +436,6 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable { regionNormalizerZNode = ZKUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.regionNormalizer", "normalizer")); switchZNode = ZKUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.switch", "switch")); - switchLockZNode = ZKUtil.joinZNode(switchZNode, "locks"); tableLockZNode = ZKUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.tableLock", "table-lock")); recoveringRegionsZNode = ZKUtil.joinZNode(baseZNode, @@ -782,11 +779,4 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable { public String getSwitchZNode() { return switchZNode; } - - /** - * @return ZK node for switchLock node. - * */ - public String getSwitchLockZNode() { - return switchLockZNode; - } } diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java index 4a92e146e3c..6daf889e7e0 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java @@ -29158,16 +29158,6 @@ public final class MasterProtos { * repeated .hbase.pb.MasterSwitchType switch_types = 3; */ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterSwitchType getSwitchTypes(int index); - - // optional bool skip_lock = 4; - /** - * optional bool skip_lock = 4; - */ - boolean hasSkipLock(); - /** - * optional bool skip_lock = 4; - */ - boolean getSkipLock(); } /** * Protobuf type {@code hbase.pb.SetSplitOrMergeEnabledRequest} @@ -29263,11 +29253,6 @@ public final class MasterProtos { input.popLimit(oldLimit); break; } - case 32: { - bitField0_ |= 0x00000004; - skipLock_ = input.readBool(); - break; - } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -29365,27 +29350,10 @@ public final class MasterProtos { return switchTypes_.get(index); } - // optional bool skip_lock = 4; - public static final int SKIP_LOCK_FIELD_NUMBER = 4; - private boolean skipLock_; - /** - * optional bool skip_lock = 4; - */ - public boolean hasSkipLock() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * optional bool skip_lock = 4; - */ - public boolean getSkipLock() { - return skipLock_; - } - private void initFields() { enabled_ = false; synchronous_ = false; switchTypes_ = java.util.Collections.emptyList(); - skipLock_ = false; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -29412,9 +29380,6 @@ public final class MasterProtos { for (int i = 0; i < switchTypes_.size(); i++) { output.writeEnum(3, switchTypes_.get(i).getNumber()); } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeBool(4, skipLock_); - } getUnknownFields().writeTo(output); } @@ -29441,10 +29406,6 @@ public final class MasterProtos { size += dataSize; size += 1 * switchTypes_.size(); } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(4, skipLock_); - } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -29480,11 +29441,6 @@ public final class MasterProtos { } result = result && getSwitchTypesList() .equals(other.getSwitchTypesList()); - result = result && (hasSkipLock() == other.hasSkipLock()); - if (hasSkipLock()) { - result = result && (getSkipLock() - == other.getSkipLock()); - } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; @@ -29510,10 +29466,6 @@ public final class MasterProtos { hash = (37 * hash) + SWITCH_TYPES_FIELD_NUMBER; hash = (53 * hash) + hashEnumList(getSwitchTypesList()); } - if (hasSkipLock()) { - hash = (37 * hash) + SKIP_LOCK_FIELD_NUMBER; - hash = (53 * hash) + hashBoolean(getSkipLock()); - } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -29629,8 +29581,6 @@ public final class MasterProtos { bitField0_ = (bitField0_ & ~0x00000002); switchTypes_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000004); - skipLock_ = false; - bitField0_ = (bitField0_ & ~0x00000008); return this; } @@ -29672,10 +29622,6 @@ public final class MasterProtos { bitField0_ = (bitField0_ & ~0x00000004); } result.switchTypes_ = switchTypes_; - if (((from_bitField0_ & 0x00000008) == 0x00000008)) { - to_bitField0_ |= 0x00000004; - } - result.skipLock_ = skipLock_; result.bitField0_ = to_bitField0_; onBuilt(); return result; @@ -29708,9 +29654,6 @@ public final class MasterProtos { } onChanged(); } - if (other.hasSkipLock()) { - setSkipLock(other.getSkipLock()); - } this.mergeUnknownFields(other.getUnknownFields()); return this; } @@ -29880,39 +29823,6 @@ public final class MasterProtos { return this; } - // optional bool skip_lock = 4; - private boolean skipLock_ ; - /** - * optional bool skip_lock = 4; - */ - public boolean hasSkipLock() { - return ((bitField0_ & 0x00000008) == 0x00000008); - } - /** - * optional bool skip_lock = 4; - */ - public boolean getSkipLock() { - return skipLock_; - } - /** - * optional bool skip_lock = 4; - */ - public Builder setSkipLock(boolean value) { - bitField0_ |= 0x00000008; - skipLock_ = value; - onChanged(); - return this; - } - /** - * optional bool skip_lock = 4; - */ - public Builder clearSkipLock() { - bitField0_ = (bitField0_ & ~0x00000008); - skipLock_ = false; - onChanged(); - return this; - } - // @@protoc_insertion_point(builder_scope:hbase.pb.SetSplitOrMergeEnabledRequest) } @@ -31318,682 +31228,6 @@ public final class MasterProtos { // @@protoc_insertion_point(class_scope:hbase.pb.IsSplitOrMergeEnabledResponse) } - public interface ReleaseSplitOrMergeLockAndRollbackRequestOrBuilder - extends com.google.protobuf.MessageOrBuilder { - } - /** - * Protobuf type {@code hbase.pb.ReleaseSplitOrMergeLockAndRollbackRequest} - */ - public static final class ReleaseSplitOrMergeLockAndRollbackRequest extends - com.google.protobuf.GeneratedMessage - implements ReleaseSplitOrMergeLockAndRollbackRequestOrBuilder { - // Use ReleaseSplitOrMergeLockAndRollbackRequest.newBuilder() to construct. - private ReleaseSplitOrMergeLockAndRollbackRequest(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private ReleaseSplitOrMergeLockAndRollbackRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final ReleaseSplitOrMergeLockAndRollbackRequest defaultInstance; - public static ReleaseSplitOrMergeLockAndRollbackRequest getDefaultInstance() { - return defaultInstance; - } - - public ReleaseSplitOrMergeLockAndRollbackRequest getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private ReleaseSplitOrMergeLockAndRollbackRequest( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ReleaseSplitOrMergeLockAndRollbackRequest_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ReleaseSplitOrMergeLockAndRollbackRequest_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public ReleaseSplitOrMergeLockAndRollbackRequest parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new ReleaseSplitOrMergeLockAndRollbackRequest(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - private void initFields() { - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest)) { - return super.equals(obj); - } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest) obj; - - boolean result = true; - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - private int memoizedHashCode = 0; - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; - } - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - hash = (29 * hash) + getUnknownFields().hashCode(); - memoizedHashCode = hash; - return hash; - } - - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code hbase.pb.ReleaseSplitOrMergeLockAndRollbackRequest} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequestOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ReleaseSplitOrMergeLockAndRollbackRequest_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ReleaseSplitOrMergeLockAndRollbackRequest_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest.Builder.class); - } - - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ReleaseSplitOrMergeLockAndRollbackRequest_descriptor; - } - - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest.getDefaultInstance(); - } - - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest(this); - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest.getDefaultInstance()) return this; - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - - // @@protoc_insertion_point(builder_scope:hbase.pb.ReleaseSplitOrMergeLockAndRollbackRequest) - } - - static { - defaultInstance = new ReleaseSplitOrMergeLockAndRollbackRequest(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:hbase.pb.ReleaseSplitOrMergeLockAndRollbackRequest) - } - - public interface ReleaseSplitOrMergeLockAndRollbackResponseOrBuilder - extends com.google.protobuf.MessageOrBuilder { - } - /** - * Protobuf type {@code hbase.pb.ReleaseSplitOrMergeLockAndRollbackResponse} - */ - public static final class ReleaseSplitOrMergeLockAndRollbackResponse extends - com.google.protobuf.GeneratedMessage - implements ReleaseSplitOrMergeLockAndRollbackResponseOrBuilder { - // Use ReleaseSplitOrMergeLockAndRollbackResponse.newBuilder() to construct. - private ReleaseSplitOrMergeLockAndRollbackResponse(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private ReleaseSplitOrMergeLockAndRollbackResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final ReleaseSplitOrMergeLockAndRollbackResponse defaultInstance; - public static ReleaseSplitOrMergeLockAndRollbackResponse getDefaultInstance() { - return defaultInstance; - } - - public ReleaseSplitOrMergeLockAndRollbackResponse getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private ReleaseSplitOrMergeLockAndRollbackResponse( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ReleaseSplitOrMergeLockAndRollbackResponse_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ReleaseSplitOrMergeLockAndRollbackResponse_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public ReleaseSplitOrMergeLockAndRollbackResponse parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new ReleaseSplitOrMergeLockAndRollbackResponse(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - private void initFields() { - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse)) { - return super.equals(obj); - } - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse) obj; - - boolean result = true; - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - private int memoizedHashCode = 0; - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; - } - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - hash = (29 * hash) + getUnknownFields().hashCode(); - memoizedHashCode = hash; - return hash; - } - - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code hbase.pb.ReleaseSplitOrMergeLockAndRollbackResponse} - */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponseOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ReleaseSplitOrMergeLockAndRollbackResponse_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ReleaseSplitOrMergeLockAndRollbackResponse_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse.Builder.class); - } - - // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ReleaseSplitOrMergeLockAndRollbackResponse_descriptor; - } - - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse.getDefaultInstance(); - } - - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse build() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse(this); - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse.getDefaultInstance()) return this; - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - - // @@protoc_insertion_point(builder_scope:hbase.pb.ReleaseSplitOrMergeLockAndRollbackResponse) - } - - static { - defaultInstance = new ReleaseSplitOrMergeLockAndRollbackResponse(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:hbase.pb.ReleaseSplitOrMergeLockAndRollbackResponse) - } - public interface NormalizeRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { } @@ -60878,19 +60112,6 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest request, com.google.protobuf.RpcCallback done); - /** - * rpc ReleaseSplitOrMergeLockAndRollback(.hbase.pb.ReleaseSplitOrMergeLockAndRollbackRequest) returns (.hbase.pb.ReleaseSplitOrMergeLockAndRollbackResponse); - * - *
-       **
-       * Release lock and rollback state.
-       * 
- */ - public abstract void releaseSplitOrMergeLockAndRollback( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest request, - com.google.protobuf.RpcCallback done); - /** * rpc Normalize(.hbase.pb.NormalizeRequest) returns (.hbase.pb.NormalizeResponse); * @@ -61475,14 +60696,6 @@ public final class MasterProtos { impl.isSplitOrMergeEnabled(controller, request, done); } - @java.lang.Override - public void releaseSplitOrMergeLockAndRollback( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest request, - com.google.protobuf.RpcCallback done) { - impl.releaseSplitOrMergeLockAndRollback(controller, request, done); - } - @java.lang.Override public void normalize( com.google.protobuf.RpcController controller, @@ -61798,66 +61011,64 @@ public final class MasterProtos { case 25: return impl.isSplitOrMergeEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest)request); case 26: - return impl.releaseSplitOrMergeLockAndRollback(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest)request); - case 27: return impl.normalize(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest)request); - case 28: + case 27: return impl.setNormalizerRunning(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest)request); - case 29: + case 28: return impl.isNormalizerEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest)request); - case 30: + case 29: return impl.runCatalogScan(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest)request); - case 31: + case 30: return impl.enableCatalogJanitor(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest)request); - case 32: + case 31: return impl.isCatalogJanitorEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest)request); - case 33: + case 32: return impl.execMasterService(controller, (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest)request); - case 34: + case 33: return impl.snapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest)request); - case 35: + case 34: return impl.getCompletedSnapshots(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest)request); - case 36: + case 35: return impl.deleteSnapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest)request); - case 37: + case 36: return impl.isSnapshotDone(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest)request); - case 38: + case 37: return impl.restoreSnapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest)request); - case 39: + case 38: return impl.execProcedure(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest)request); - case 40: + case 39: return impl.execProcedureWithRet(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest)request); - case 41: + case 40: return impl.isProcedureDone(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest)request); - case 42: + case 41: return impl.modifyNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest)request); - case 43: + case 42: return impl.createNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest)request); - case 44: + case 43: return impl.deleteNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest)request); - case 45: + case 44: return impl.getNamespaceDescriptor(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest)request); - case 46: + case 45: return impl.listNamespaceDescriptors(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest)request); - case 47: + case 46: return impl.listTableDescriptorsByNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest)request); - case 48: + case 47: return impl.listTableNamesByNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest)request); - case 49: + case 48: return impl.getTableState(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest)request); - case 50: + case 49: return impl.setQuota(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest)request); - case 51: + case 50: return impl.getLastMajorCompactionTimestamp(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest)request); - case 52: + case 51: return impl.getLastMajorCompactionTimestampForRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest)request); - case 53: + case 52: return impl.getProcedureResult(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest)request); - case 54: + case 53: return impl.getSecurityCapabilities(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest)request); - case 55: + case 54: return impl.abortProcedure(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest)request); - case 56: + case 55: return impl.listProcedures(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest)request); default: throw new java.lang.AssertionError("Can't get here."); @@ -61926,66 +61137,64 @@ public final class MasterProtos { case 25: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest.getDefaultInstance(); case 26: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest.getDefaultInstance(); - case 27: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest.getDefaultInstance(); - case 28: + case 27: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest.getDefaultInstance(); - case 29: + case 28: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest.getDefaultInstance(); - case 30: + case 29: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest.getDefaultInstance(); - case 31: + case 30: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest.getDefaultInstance(); - case 32: + case 31: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest.getDefaultInstance(); - case 33: + case 32: return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest.getDefaultInstance(); - case 34: + case 33: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest.getDefaultInstance(); - case 35: + case 34: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest.getDefaultInstance(); - case 36: + case 35: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest.getDefaultInstance(); - case 37: + case 36: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest.getDefaultInstance(); - case 38: + case 37: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest.getDefaultInstance(); + case 38: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance(); case 39: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance(); case 40: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance(); - case 41: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest.getDefaultInstance(); - case 42: + case 41: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest.getDefaultInstance(); - case 43: + case 42: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest.getDefaultInstance(); - case 44: + case 43: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest.getDefaultInstance(); - case 45: + case 44: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest.getDefaultInstance(); - case 46: + case 45: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest.getDefaultInstance(); - case 47: + case 46: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest.getDefaultInstance(); - case 48: + case 47: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest.getDefaultInstance(); - case 49: + case 48: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance(); - case 50: + case 49: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.getDefaultInstance(); - case 51: + case 50: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.getDefaultInstance(); - case 52: + case 51: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.getDefaultInstance(); - case 53: + case 52: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.getDefaultInstance(); - case 54: + case 53: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest.getDefaultInstance(); - case 55: + case 54: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.getDefaultInstance(); - case 56: + case 55: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); @@ -62054,66 +61263,64 @@ public final class MasterProtos { case 25: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse.getDefaultInstance(); case 26: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse.getDefaultInstance(); - case 27: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeResponse.getDefaultInstance(); - case 28: + case 27: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningResponse.getDefaultInstance(); - case 29: + case 28: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse.getDefaultInstance(); - case 30: + case 29: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse.getDefaultInstance(); - case 31: + case 30: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse.getDefaultInstance(); - case 32: + case 31: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance(); - case 33: + case 32: return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse.getDefaultInstance(); - case 34: + case 33: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse.getDefaultInstance(); - case 35: + case 34: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse.getDefaultInstance(); - case 36: + case 35: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse.getDefaultInstance(); - case 37: + case 36: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse.getDefaultInstance(); - case 38: + case 37: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse.getDefaultInstance(); + case 38: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance(); case 39: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance(); case 40: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance(); - case 41: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefaultInstance(); - case 42: + case 41: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse.getDefaultInstance(); - case 43: + case 42: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse.getDefaultInstance(); - case 44: + case 43: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse.getDefaultInstance(); - case 45: + case 44: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse.getDefaultInstance(); - case 46: + case 45: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse.getDefaultInstance(); - case 47: + case 46: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse.getDefaultInstance(); - case 48: + case 47: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance(); - case 49: + case 48: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance(); - case 50: + case 49: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance(); + case 50: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(); case 51: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(); case 52: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(); - case 53: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance(); - case 54: + case 53: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance(); - case 55: + case 54: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance(); - case 56: + case 55: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); @@ -62452,19 +61659,6 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest request, com.google.protobuf.RpcCallback done); - /** - * rpc ReleaseSplitOrMergeLockAndRollback(.hbase.pb.ReleaseSplitOrMergeLockAndRollbackRequest) returns (.hbase.pb.ReleaseSplitOrMergeLockAndRollbackResponse); - * - *
-     **
-     * Release lock and rollback state.
-     * 
- */ - public abstract void releaseSplitOrMergeLockAndRollback( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest request, - com.google.protobuf.RpcCallback done); - /** * rpc Normalize(.hbase.pb.NormalizeRequest) returns (.hbase.pb.NormalizeResponse); * @@ -62989,156 +62183,151 @@ public final class MasterProtos { done)); return; case 26: - this.releaseSplitOrMergeLockAndRollback(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest)request, - com.google.protobuf.RpcUtil.specializeCallback( - done)); - return; - case 27: this.normalize(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 28: + case 27: this.setNormalizerRunning(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 29: + case 28: this.isNormalizerEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 30: + case 29: this.runCatalogScan(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 31: + case 30: this.enableCatalogJanitor(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 32: + case 31: this.isCatalogJanitorEnabled(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 33: + case 32: this.execMasterService(controller, (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 34: + case 33: this.snapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 35: + case 34: this.getCompletedSnapshots(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 36: + case 35: this.deleteSnapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 37: + case 36: this.isSnapshotDone(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 38: + case 37: this.restoreSnapshot(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 39: + case 38: this.execProcedure(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 40: + case 39: this.execProcedureWithRet(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 41: + case 40: this.isProcedureDone(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 42: + case 41: this.modifyNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 43: + case 42: this.createNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 44: + case 43: this.deleteNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 45: + case 44: this.getNamespaceDescriptor(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 46: + case 45: this.listNamespaceDescriptors(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 47: + case 46: this.listTableDescriptorsByNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 48: + case 47: this.listTableNamesByNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 49: + case 48: this.getTableState(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 50: + case 49: this.setQuota(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 51: + case 50: this.getLastMajorCompactionTimestamp(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 52: + case 51: this.getLastMajorCompactionTimestampForRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 53: + case 52: this.getProcedureResult(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 54: + case 53: this.getSecurityCapabilities(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 55: + case 54: this.abortProcedure(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); return; - case 56: + case 55: this.listProcedures(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest)request, com.google.protobuf.RpcUtil.specializeCallback( done)); @@ -63210,66 +62399,64 @@ public final class MasterProtos { case 25: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest.getDefaultInstance(); case 26: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest.getDefaultInstance(); - case 27: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest.getDefaultInstance(); - case 28: + case 27: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest.getDefaultInstance(); - case 29: + case 28: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest.getDefaultInstance(); - case 30: + case 29: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest.getDefaultInstance(); - case 31: + case 30: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest.getDefaultInstance(); - case 32: + case 31: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest.getDefaultInstance(); - case 33: + case 32: return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest.getDefaultInstance(); - case 34: + case 33: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest.getDefaultInstance(); - case 35: + case 34: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest.getDefaultInstance(); - case 36: + case 35: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest.getDefaultInstance(); - case 37: + case 36: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest.getDefaultInstance(); - case 38: + case 37: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest.getDefaultInstance(); + case 38: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance(); case 39: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance(); case 40: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest.getDefaultInstance(); - case 41: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest.getDefaultInstance(); - case 42: + case 41: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest.getDefaultInstance(); - case 43: + case 42: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest.getDefaultInstance(); - case 44: + case 43: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest.getDefaultInstance(); - case 45: + case 44: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest.getDefaultInstance(); - case 46: + case 45: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest.getDefaultInstance(); - case 47: + case 46: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest.getDefaultInstance(); - case 48: + case 47: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest.getDefaultInstance(); - case 49: + case 48: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance(); - case 50: + case 49: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest.getDefaultInstance(); - case 51: + case 50: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.getDefaultInstance(); - case 52: + case 51: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.getDefaultInstance(); - case 53: + case 52: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest.getDefaultInstance(); - case 54: + case 53: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest.getDefaultInstance(); - case 55: + case 54: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest.getDefaultInstance(); - case 56: + case 55: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); @@ -63338,66 +62525,64 @@ public final class MasterProtos { case 25: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse.getDefaultInstance(); case 26: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse.getDefaultInstance(); - case 27: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeResponse.getDefaultInstance(); - case 28: + case 27: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningResponse.getDefaultInstance(); - case 29: + case 28: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse.getDefaultInstance(); - case 30: + case 29: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse.getDefaultInstance(); - case 31: + case 30: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse.getDefaultInstance(); - case 32: + case 31: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance(); - case 33: + case 32: return org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse.getDefaultInstance(); - case 34: + case 33: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse.getDefaultInstance(); - case 35: + case 34: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse.getDefaultInstance(); - case 36: + case 35: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse.getDefaultInstance(); - case 37: + case 36: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse.getDefaultInstance(); - case 38: + case 37: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse.getDefaultInstance(); + case 38: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance(); case 39: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance(); case 40: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance(); - case 41: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefaultInstance(); - case 42: + case 41: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse.getDefaultInstance(); - case 43: + case 42: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse.getDefaultInstance(); - case 44: + case 43: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse.getDefaultInstance(); - case 45: + case 44: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse.getDefaultInstance(); - case 46: + case 45: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse.getDefaultInstance(); - case 47: + case 46: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse.getDefaultInstance(); - case 48: + case 47: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance(); - case 49: + case 48: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance(); - case 50: + case 49: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance(); + case 50: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(); case 51: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(); case 52: - return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(); - case 53: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance(); - case 54: + case 53: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance(); - case 55: + case 54: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance(); - case 56: + case 55: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); @@ -63810,27 +62995,12 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse.getDefaultInstance())); } - public void releaseSplitOrMergeLockAndRollback( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest request, - com.google.protobuf.RpcCallback done) { - channel.callMethod( - getDescriptor().getMethods().get(26), - controller, - request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse.getDefaultInstance(), - com.google.protobuf.RpcUtil.generalizeCallback( - done, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse.class, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse.getDefaultInstance())); - } - public void normalize( com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(27), + getDescriptor().getMethods().get(26), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeResponse.getDefaultInstance(), @@ -63845,7 +63015,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(28), + getDescriptor().getMethods().get(27), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningResponse.getDefaultInstance(), @@ -63860,7 +63030,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(29), + getDescriptor().getMethods().get(28), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse.getDefaultInstance(), @@ -63875,7 +63045,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(30), + getDescriptor().getMethods().get(29), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse.getDefaultInstance(), @@ -63890,7 +63060,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(31), + getDescriptor().getMethods().get(30), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse.getDefaultInstance(), @@ -63905,7 +63075,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(32), + getDescriptor().getMethods().get(31), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance(), @@ -63920,7 +63090,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(33), + getDescriptor().getMethods().get(32), controller, request, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse.getDefaultInstance(), @@ -63935,7 +63105,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(34), + getDescriptor().getMethods().get(33), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse.getDefaultInstance(), @@ -63950,7 +63120,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(35), + getDescriptor().getMethods().get(34), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse.getDefaultInstance(), @@ -63965,7 +63135,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(36), + getDescriptor().getMethods().get(35), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse.getDefaultInstance(), @@ -63980,7 +63150,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(37), + getDescriptor().getMethods().get(36), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse.getDefaultInstance(), @@ -63995,7 +63165,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(38), + getDescriptor().getMethods().get(37), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse.getDefaultInstance(), @@ -64010,7 +63180,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(39), + getDescriptor().getMethods().get(38), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance(), @@ -64025,7 +63195,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(40), + getDescriptor().getMethods().get(39), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance(), @@ -64040,7 +63210,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(41), + getDescriptor().getMethods().get(40), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefaultInstance(), @@ -64055,7 +63225,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(42), + getDescriptor().getMethods().get(41), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse.getDefaultInstance(), @@ -64070,7 +63240,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(43), + getDescriptor().getMethods().get(42), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse.getDefaultInstance(), @@ -64085,7 +63255,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(44), + getDescriptor().getMethods().get(43), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse.getDefaultInstance(), @@ -64100,7 +63270,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(45), + getDescriptor().getMethods().get(44), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse.getDefaultInstance(), @@ -64115,7 +63285,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(46), + getDescriptor().getMethods().get(45), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse.getDefaultInstance(), @@ -64130,7 +63300,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(47), + getDescriptor().getMethods().get(46), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse.getDefaultInstance(), @@ -64145,7 +63315,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(48), + getDescriptor().getMethods().get(47), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance(), @@ -64160,7 +63330,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(49), + getDescriptor().getMethods().get(48), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance(), @@ -64175,7 +63345,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(50), + getDescriptor().getMethods().get(49), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance(), @@ -64190,7 +63360,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(51), + getDescriptor().getMethods().get(50), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(), @@ -64205,7 +63375,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(52), + getDescriptor().getMethods().get(51), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(), @@ -64220,7 +63390,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(53), + getDescriptor().getMethods().get(52), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance(), @@ -64235,7 +63405,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(54), + getDescriptor().getMethods().get(53), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance(), @@ -64250,7 +63420,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(55), + getDescriptor().getMethods().get(54), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance(), @@ -64265,7 +63435,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest request, com.google.protobuf.RpcCallback done) { channel.callMethod( - getDescriptor().getMethods().get(56), + getDescriptor().getMethods().get(55), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance(), @@ -64412,11 +63582,6 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest request) throws com.google.protobuf.ServiceException; - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse releaseSplitOrMergeLockAndRollback( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest request) - throws com.google.protobuf.ServiceException; - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeResponse normalize( com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest request) @@ -64887,24 +64052,12 @@ public final class MasterProtos { } - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse releaseSplitOrMergeLockAndRollback( - com.google.protobuf.RpcController controller, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest request) - throws com.google.protobuf.ServiceException { - return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(26), - controller, - request, - org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse.getDefaultInstance()); - } - - public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeResponse normalize( com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(27), + getDescriptor().getMethods().get(26), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.NormalizeResponse.getDefaultInstance()); @@ -64916,7 +64069,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(28), + getDescriptor().getMethods().get(27), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningResponse.getDefaultInstance()); @@ -64928,7 +64081,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(29), + getDescriptor().getMethods().get(28), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse.getDefaultInstance()); @@ -64940,7 +64093,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(30), + getDescriptor().getMethods().get(29), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse.getDefaultInstance()); @@ -64952,7 +64105,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(31), + getDescriptor().getMethods().get(30), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse.getDefaultInstance()); @@ -64964,7 +64117,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(32), + getDescriptor().getMethods().get(31), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse.getDefaultInstance()); @@ -64976,7 +64129,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(33), + getDescriptor().getMethods().get(32), controller, request, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse.getDefaultInstance()); @@ -64988,7 +64141,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(34), + getDescriptor().getMethods().get(33), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse.getDefaultInstance()); @@ -65000,7 +64153,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(35), + getDescriptor().getMethods().get(34), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse.getDefaultInstance()); @@ -65012,7 +64165,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(36), + getDescriptor().getMethods().get(35), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse.getDefaultInstance()); @@ -65024,7 +64177,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(37), + getDescriptor().getMethods().get(36), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse.getDefaultInstance()); @@ -65036,7 +64189,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(38), + getDescriptor().getMethods().get(37), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse.getDefaultInstance()); @@ -65048,7 +64201,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(39), + getDescriptor().getMethods().get(38), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance()); @@ -65060,7 +64213,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(40), + getDescriptor().getMethods().get(39), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse.getDefaultInstance()); @@ -65072,7 +64225,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(41), + getDescriptor().getMethods().get(40), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse.getDefaultInstance()); @@ -65084,7 +64237,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(42), + getDescriptor().getMethods().get(41), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse.getDefaultInstance()); @@ -65096,7 +64249,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(43), + getDescriptor().getMethods().get(42), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse.getDefaultInstance()); @@ -65108,7 +64261,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(44), + getDescriptor().getMethods().get(43), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse.getDefaultInstance()); @@ -65120,7 +64273,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(45), + getDescriptor().getMethods().get(44), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse.getDefaultInstance()); @@ -65132,7 +64285,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(46), + getDescriptor().getMethods().get(45), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse.getDefaultInstance()); @@ -65144,7 +64297,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(47), + getDescriptor().getMethods().get(46), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse.getDefaultInstance()); @@ -65156,7 +64309,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(48), + getDescriptor().getMethods().get(47), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance()); @@ -65168,7 +64321,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(49), + getDescriptor().getMethods().get(48), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance()); @@ -65180,7 +64333,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(50), + getDescriptor().getMethods().get(49), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse.getDefaultInstance()); @@ -65192,7 +64345,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(51), + getDescriptor().getMethods().get(50), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance()); @@ -65204,7 +64357,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(52), + getDescriptor().getMethods().get(51), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance()); @@ -65216,7 +64369,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(53), + getDescriptor().getMethods().get(52), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetProcedureResultResponse.getDefaultInstance()); @@ -65228,7 +64381,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(54), + getDescriptor().getMethods().get(53), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.getDefaultInstance()); @@ -65240,7 +64393,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(55), + getDescriptor().getMethods().get(54), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance()); @@ -65252,7 +64405,7 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest request) throws com.google.protobuf.ServiceException { return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse) channel.callBlockingMethod( - getDescriptor().getMethods().get(56), + getDescriptor().getMethods().get(55), controller, request, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance()); @@ -65543,16 +64696,6 @@ public final class MasterProtos { private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hbase_pb_IsSplitOrMergeEnabledResponse_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_hbase_pb_ReleaseSplitOrMergeLockAndRollbackRequest_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_hbase_pb_ReleaseSplitOrMergeLockAndRollbackRequest_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_hbase_pb_ReleaseSplitOrMergeLockAndRollbackResponse_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_hbase_pb_ReleaseSplitOrMergeLockAndRollbackResponse_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_hbase_pb_NormalizeRequest_descriptor; private static @@ -65920,244 +65063,238 @@ public final class MasterProtos { "us\030\002 \001(\010\"8\n\032SetBalancerRunningResponse\022\032" + "\n\022prev_balance_value\030\001 \001(\010\"\032\n\030IsBalancer" + "EnabledRequest\",\n\031IsBalancerEnabledRespo" + - "nse\022\017\n\007enabled\030\001 \002(\010\"\212\001\n\035SetSplitOrMerge" + - "EnabledRequest\022\017\n\007enabled\030\001 \002(\010\022\023\n\013synch" + - "ronous\030\002 \001(\010\0220\n\014switch_types\030\003 \003(\0162\032.hba" + - "se.pb.MasterSwitchType\022\021\n\tskip_lock\030\004 \001(" + - "\010\"4\n\036SetSplitOrMergeEnabledResponse\022\022\n\np" + - "rev_value\030\001 \003(\010\"O\n\034IsSplitOrMergeEnabled" + - "Request\022/\n\013switch_type\030\001 \002(\0162\032.hbase.pb.", - "MasterSwitchType\"0\n\035IsSplitOrMergeEnable" + - "dResponse\022\017\n\007enabled\030\001 \002(\010\"+\n)ReleaseSpl" + - "itOrMergeLockAndRollbackRequest\",\n*Relea" + - "seSplitOrMergeLockAndRollbackResponse\"\022\n" + - "\020NormalizeRequest\"+\n\021NormalizeResponse\022\026" + - "\n\016normalizer_ran\030\001 \002(\010\")\n\033SetNormalizerR" + - "unningRequest\022\n\n\002on\030\001 \002(\010\"=\n\034SetNormaliz" + - "erRunningResponse\022\035\n\025prev_normalizer_val" + - "ue\030\001 \001(\010\"\034\n\032IsNormalizerEnabledRequest\"." + - "\n\033IsNormalizerEnabledResponse\022\017\n\007enabled", - "\030\001 \002(\010\"\027\n\025RunCatalogScanRequest\"-\n\026RunCa" + - "talogScanResponse\022\023\n\013scan_result\030\001 \001(\005\"-" + - "\n\033EnableCatalogJanitorRequest\022\016\n\006enable\030" + - "\001 \002(\010\"2\n\034EnableCatalogJanitorResponse\022\022\n" + - "\nprev_value\030\001 \001(\010\" \n\036IsCatalogJanitorEna" + - "bledRequest\"0\n\037IsCatalogJanitorEnabledRe" + - "sponse\022\r\n\005value\030\001 \002(\010\"B\n\017SnapshotRequest" + - "\022/\n\010snapshot\030\001 \002(\0132\035.hbase.pb.SnapshotDe" + - "scription\",\n\020SnapshotResponse\022\030\n\020expecte" + - "d_timeout\030\001 \002(\003\"\036\n\034GetCompletedSnapshots", - "Request\"Q\n\035GetCompletedSnapshotsResponse" + - "\0220\n\tsnapshots\030\001 \003(\0132\035.hbase.pb.SnapshotD" + - "escription\"H\n\025DeleteSnapshotRequest\022/\n\010s" + - "napshot\030\001 \002(\0132\035.hbase.pb.SnapshotDescrip" + - "tion\"\030\n\026DeleteSnapshotResponse\"s\n\026Restor" + - "eSnapshotRequest\022/\n\010snapshot\030\001 \002(\0132\035.hba" + - "se.pb.SnapshotDescription\022\026\n\013nonce_group" + - "\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"*\n\027RestoreSn" + - "apshotResponse\022\017\n\007proc_id\030\001 \002(\004\"H\n\025IsSna" + - "pshotDoneRequest\022/\n\010snapshot\030\001 \001(\0132\035.hba", - "se.pb.SnapshotDescription\"^\n\026IsSnapshotD" + - "oneResponse\022\023\n\004done\030\001 \001(\010:\005false\022/\n\010snap" + - "shot\030\002 \001(\0132\035.hbase.pb.SnapshotDescriptio" + - "n\"O\n\034IsRestoreSnapshotDoneRequest\022/\n\010sna" + - "pshot\030\001 \001(\0132\035.hbase.pb.SnapshotDescripti" + - "on\"4\n\035IsRestoreSnapshotDoneResponse\022\023\n\004d" + - "one\030\001 \001(\010:\005false\"F\n\033GetSchemaAlterStatus" + - "Request\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb.T" + - "ableName\"T\n\034GetSchemaAlterStatusResponse" + - "\022\035\n\025yet_to_update_regions\030\001 \001(\r\022\025\n\rtotal", - "_regions\030\002 \001(\r\"\213\001\n\032GetTableDescriptorsRe" + - "quest\022(\n\013table_names\030\001 \003(\0132\023.hbase.pb.Ta" + - "bleName\022\r\n\005regex\030\002 \001(\t\022!\n\022include_sys_ta" + - "bles\030\003 \001(\010:\005false\022\021\n\tnamespace\030\004 \001(\t\"J\n\033" + - "GetTableDescriptorsResponse\022+\n\014table_sch" + - "ema\030\001 \003(\0132\025.hbase.pb.TableSchema\"[\n\024GetT" + - "ableNamesRequest\022\r\n\005regex\030\001 \001(\t\022!\n\022inclu" + - "de_sys_tables\030\002 \001(\010:\005false\022\021\n\tnamespace\030" + - "\003 \001(\t\"A\n\025GetTableNamesResponse\022(\n\013table_" + - "names\030\001 \003(\0132\023.hbase.pb.TableName\"?\n\024GetT", - "ableStateRequest\022\'\n\ntable_name\030\001 \002(\0132\023.h" + - "base.pb.TableName\"B\n\025GetTableStateRespon" + - "se\022)\n\013table_state\030\001 \002(\0132\024.hbase.pb.Table" + - "State\"\031\n\027GetClusterStatusRequest\"K\n\030GetC" + - "lusterStatusResponse\022/\n\016cluster_status\030\001" + - " \002(\0132\027.hbase.pb.ClusterStatus\"\030\n\026IsMaste" + - "rRunningRequest\"4\n\027IsMasterRunningRespon" + - "se\022\031\n\021is_master_running\030\001 \002(\010\"I\n\024ExecPro" + - "cedureRequest\0221\n\tprocedure\030\001 \002(\0132\036.hbase" + - ".pb.ProcedureDescription\"F\n\025ExecProcedur", - "eResponse\022\030\n\020expected_timeout\030\001 \001(\003\022\023\n\013r" + - "eturn_data\030\002 \001(\014\"K\n\026IsProcedureDoneReque" + - "st\0221\n\tprocedure\030\001 \001(\0132\036.hbase.pb.Procedu" + - "reDescription\"`\n\027IsProcedureDoneResponse" + - "\022\023\n\004done\030\001 \001(\010:\005false\0220\n\010snapshot\030\002 \001(\0132" + - "\036.hbase.pb.ProcedureDescription\",\n\031GetPr" + - "ocedureResultRequest\022\017\n\007proc_id\030\001 \002(\004\"\371\001" + - "\n\032GetProcedureResultResponse\0229\n\005state\030\001 " + - "\002(\0162*.hbase.pb.GetProcedureResultRespons" + - "e.State\022\022\n\nstart_time\030\002 \001(\004\022\023\n\013last_upda", - "te\030\003 \001(\004\022\016\n\006result\030\004 \001(\014\0224\n\texception\030\005 " + - "\001(\0132!.hbase.pb.ForeignExceptionMessage\"1" + - "\n\005State\022\r\n\tNOT_FOUND\020\000\022\013\n\007RUNNING\020\001\022\014\n\010F" + - "INISHED\020\002\"M\n\025AbortProcedureRequest\022\017\n\007pr" + - "oc_id\030\001 \002(\004\022#\n\025mayInterruptIfRunning\030\002 \001" + - "(\010:\004true\"6\n\026AbortProcedureResponse\022\034\n\024is" + - "_procedure_aborted\030\001 \002(\010\"\027\n\025ListProcedur" + - "esRequest\"@\n\026ListProceduresResponse\022&\n\tp" + - "rocedure\030\001 \003(\0132\023.hbase.pb.Procedure\"\315\001\n\017" + - "SetQuotaRequest\022\021\n\tuser_name\030\001 \001(\t\022\022\n\nus", - "er_group\030\002 \001(\t\022\021\n\tnamespace\030\003 \001(\t\022\'\n\ntab" + - "le_name\030\004 \001(\0132\023.hbase.pb.TableName\022\022\n\nre" + - "move_all\030\005 \001(\010\022\026\n\016bypass_globals\030\006 \001(\010\022+" + - "\n\010throttle\030\007 \001(\0132\031.hbase.pb.ThrottleRequ" + - "est\"\022\n\020SetQuotaResponse\"J\n\037MajorCompacti" + - "onTimestampRequest\022\'\n\ntable_name\030\001 \002(\0132\023" + - ".hbase.pb.TableName\"U\n(MajorCompactionTi" + - "mestampForRegionRequest\022)\n\006region\030\001 \002(\0132" + - "\031.hbase.pb.RegionSpecifier\"@\n MajorCompa" + - "ctionTimestampResponse\022\034\n\024compaction_tim", - "estamp\030\001 \002(\003\"\035\n\033SecurityCapabilitiesRequ" + - "est\"\354\001\n\034SecurityCapabilitiesResponse\022G\n\014" + - "capabilities\030\001 \003(\01621.hbase.pb.SecurityCa" + - "pabilitiesResponse.Capability\"\202\001\n\nCapabi" + - "lity\022\031\n\025SIMPLE_AUTHENTICATION\020\000\022\031\n\025SECUR" + - "E_AUTHENTICATION\020\001\022\021\n\rAUTHORIZATION\020\002\022\026\n" + - "\022CELL_AUTHORIZATION\020\003\022\023\n\017CELL_VISIBILITY" + - "\020\004*(\n\020MasterSwitchType\022\t\n\005SPLIT\020\000\022\t\n\005MER" + - "GE\020\0012\373(\n\rMasterService\022e\n\024GetSchemaAlter" + - "Status\022%.hbase.pb.GetSchemaAlterStatusRe", - "quest\032&.hbase.pb.GetSchemaAlterStatusRes" + - "ponse\022b\n\023GetTableDescriptors\022$.hbase.pb." + - "GetTableDescriptorsRequest\032%.hbase.pb.Ge" + - "tTableDescriptorsResponse\022P\n\rGetTableNam" + - "es\022\036.hbase.pb.GetTableNamesRequest\032\037.hba" + - "se.pb.GetTableNamesResponse\022Y\n\020GetCluste" + - "rStatus\022!.hbase.pb.GetClusterStatusReque" + - "st\032\".hbase.pb.GetClusterStatusResponse\022V" + - "\n\017IsMasterRunning\022 .hbase.pb.IsMasterRun" + - "ningRequest\032!.hbase.pb.IsMasterRunningRe", - "sponse\022D\n\tAddColumn\022\032.hbase.pb.AddColumn" + - "Request\032\033.hbase.pb.AddColumnResponse\022M\n\014" + - "DeleteColumn\022\035.hbase.pb.DeleteColumnRequ" + - "est\032\036.hbase.pb.DeleteColumnResponse\022M\n\014M" + - "odifyColumn\022\035.hbase.pb.ModifyColumnReque" + - "st\032\036.hbase.pb.ModifyColumnResponse\022G\n\nMo" + - "veRegion\022\033.hbase.pb.MoveRegionRequest\032\034." + - "hbase.pb.MoveRegionResponse\022k\n\026DispatchM" + - "ergingRegions\022\'.hbase.pb.DispatchMerging" + - "RegionsRequest\032(.hbase.pb.DispatchMergin", - "gRegionsResponse\022M\n\014AssignRegion\022\035.hbase" + - ".pb.AssignRegionRequest\032\036.hbase.pb.Assig" + - "nRegionResponse\022S\n\016UnassignRegion\022\037.hbas" + - "e.pb.UnassignRegionRequest\032 .hbase.pb.Un" + - "assignRegionResponse\022P\n\rOfflineRegion\022\036." + - "hbase.pb.OfflineRegionRequest\032\037.hbase.pb" + - ".OfflineRegionResponse\022J\n\013DeleteTable\022\034." + - "hbase.pb.DeleteTableRequest\032\035.hbase.pb.D" + - "eleteTableResponse\022P\n\rtruncateTable\022\036.hb" + - "ase.pb.TruncateTableRequest\032\037.hbase.pb.T", - "runcateTableResponse\022J\n\013EnableTable\022\034.hb" + - "ase.pb.EnableTableRequest\032\035.hbase.pb.Ena" + - "bleTableResponse\022M\n\014DisableTable\022\035.hbase" + - ".pb.DisableTableRequest\032\036.hbase.pb.Disab" + - "leTableResponse\022J\n\013ModifyTable\022\034.hbase.p" + - "b.ModifyTableRequest\032\035.hbase.pb.ModifyTa" + - "bleResponse\022J\n\013CreateTable\022\034.hbase.pb.Cr" + - "eateTableRequest\032\035.hbase.pb.CreateTableR" + - "esponse\022A\n\010Shutdown\022\031.hbase.pb.ShutdownR" + - "equest\032\032.hbase.pb.ShutdownResponse\022G\n\nSt", - "opMaster\022\033.hbase.pb.StopMasterRequest\032\034." + - "hbase.pb.StopMasterResponse\022>\n\007Balance\022\030" + - ".hbase.pb.BalanceRequest\032\031.hbase.pb.Bala" + - "nceResponse\022_\n\022SetBalancerRunning\022#.hbas" + - "e.pb.SetBalancerRunningRequest\032$.hbase.p" + - "b.SetBalancerRunningResponse\022\\\n\021IsBalanc" + - "erEnabled\022\".hbase.pb.IsBalancerEnabledRe" + - "quest\032#.hbase.pb.IsBalancerEnabledRespon" + - "se\022k\n\026SetSplitOrMergeEnabled\022\'.hbase.pb." + - "SetSplitOrMergeEnabledRequest\032(.hbase.pb", - ".SetSplitOrMergeEnabledResponse\022h\n\025IsSpl" + - "itOrMergeEnabled\022&.hbase.pb.IsSplitOrMer" + - "geEnabledRequest\032\'.hbase.pb.IsSplitOrMer" + - "geEnabledResponse\022\217\001\n\"ReleaseSplitOrMerg" + - "eLockAndRollback\0223.hbase.pb.ReleaseSplit" + - "OrMergeLockAndRollbackRequest\0324.hbase.pb" + - ".ReleaseSplitOrMergeLockAndRollbackRespo" + - "nse\022D\n\tNormalize\022\032.hbase.pb.NormalizeReq" + - "uest\032\033.hbase.pb.NormalizeResponse\022e\n\024Set" + - "NormalizerRunning\022%.hbase.pb.SetNormaliz", - "erRunningRequest\032&.hbase.pb.SetNormalize" + - "rRunningResponse\022b\n\023IsNormalizerEnabled\022" + - "$.hbase.pb.IsNormalizerEnabledRequest\032%." + - "hbase.pb.IsNormalizerEnabledResponse\022S\n\016" + - "RunCatalogScan\022\037.hbase.pb.RunCatalogScan" + - "Request\032 .hbase.pb.RunCatalogScanRespons" + - "e\022e\n\024EnableCatalogJanitor\022%.hbase.pb.Ena" + - "bleCatalogJanitorRequest\032&.hbase.pb.Enab" + - "leCatalogJanitorResponse\022n\n\027IsCatalogJan" + - "itorEnabled\022(.hbase.pb.IsCatalogJanitorE", - "nabledRequest\032).hbase.pb.IsCatalogJanito" + - "rEnabledResponse\022^\n\021ExecMasterService\022#." + - "hbase.pb.CoprocessorServiceRequest\032$.hba" + - "se.pb.CoprocessorServiceResponse\022A\n\010Snap" + - "shot\022\031.hbase.pb.SnapshotRequest\032\032.hbase." + - "pb.SnapshotResponse\022h\n\025GetCompletedSnaps" + - "hots\022&.hbase.pb.GetCompletedSnapshotsReq" + - "uest\032\'.hbase.pb.GetCompletedSnapshotsRes" + - "ponse\022S\n\016DeleteSnapshot\022\037.hbase.pb.Delet" + - "eSnapshotRequest\032 .hbase.pb.DeleteSnapsh", - "otResponse\022S\n\016IsSnapshotDone\022\037.hbase.pb." + - "IsSnapshotDoneRequest\032 .hbase.pb.IsSnaps" + - "hotDoneResponse\022V\n\017RestoreSnapshot\022 .hba" + - "se.pb.RestoreSnapshotRequest\032!.hbase.pb." + - "RestoreSnapshotResponse\022P\n\rExecProcedure" + - "\022\036.hbase.pb.ExecProcedureRequest\032\037.hbase" + - ".pb.ExecProcedureResponse\022W\n\024ExecProcedu" + - "reWithRet\022\036.hbase.pb.ExecProcedureReques" + - "t\032\037.hbase.pb.ExecProcedureResponse\022V\n\017Is" + - "ProcedureDone\022 .hbase.pb.IsProcedureDone", - "Request\032!.hbase.pb.IsProcedureDoneRespon" + - "se\022V\n\017ModifyNamespace\022 .hbase.pb.ModifyN" + - "amespaceRequest\032!.hbase.pb.ModifyNamespa" + - "ceResponse\022V\n\017CreateNamespace\022 .hbase.pb" + - ".CreateNamespaceRequest\032!.hbase.pb.Creat" + - "eNamespaceResponse\022V\n\017DeleteNamespace\022 ." + - "hbase.pb.DeleteNamespaceRequest\032!.hbase." + - "pb.DeleteNamespaceResponse\022k\n\026GetNamespa" + - "ceDescriptor\022\'.hbase.pb.GetNamespaceDesc" + - "riptorRequest\032(.hbase.pb.GetNamespaceDes", - "criptorResponse\022q\n\030ListNamespaceDescript" + - "ors\022).hbase.pb.ListNamespaceDescriptorsR" + - "equest\032*.hbase.pb.ListNamespaceDescripto" + - "rsResponse\022\206\001\n\037ListTableDescriptorsByNam" + - "espace\0220.hbase.pb.ListTableDescriptorsBy" + - "NamespaceRequest\0321.hbase.pb.ListTableDes" + - "criptorsByNamespaceResponse\022t\n\031ListTable" + - "NamesByNamespace\022*.hbase.pb.ListTableNam" + - "esByNamespaceRequest\032+.hbase.pb.ListTabl" + - "eNamesByNamespaceResponse\022P\n\rGetTableSta", - "te\022\036.hbase.pb.GetTableStateRequest\032\037.hba" + - "se.pb.GetTableStateResponse\022A\n\010SetQuota\022" + - "\031.hbase.pb.SetQuotaRequest\032\032.hbase.pb.Se" + - "tQuotaResponse\022x\n\037getLastMajorCompaction" + - "Timestamp\022).hbase.pb.MajorCompactionTime" + - "stampRequest\032*.hbase.pb.MajorCompactionT" + - "imestampResponse\022\212\001\n(getLastMajorCompact" + - "ionTimestampForRegion\0222.hbase.pb.MajorCo" + - "mpactionTimestampForRegionRequest\032*.hbas" + - "e.pb.MajorCompactionTimestampResponse\022_\n", - "\022getProcedureResult\022#.hbase.pb.GetProced" + - "ureResultRequest\032$.hbase.pb.GetProcedure" + - "ResultResponse\022h\n\027getSecurityCapabilitie" + - "s\022%.hbase.pb.SecurityCapabilitiesRequest" + - "\032&.hbase.pb.SecurityCapabilitiesResponse" + - "\022S\n\016AbortProcedure\022\037.hbase.pb.AbortProce" + - "dureRequest\032 .hbase.pb.AbortProcedureRes" + - "ponse\022S\n\016ListProcedures\022\037.hbase.pb.ListP" + - "roceduresRequest\032 .hbase.pb.ListProcedur" + - "esResponseBB\n*org.apache.hadoop.hbase.pr", - "otobuf.generatedB\014MasterProtosH\001\210\001\001\240\001\001" + "nse\022\017\n\007enabled\030\001 \002(\010\"w\n\035SetSplitOrMergeE" + + "nabledRequest\022\017\n\007enabled\030\001 \002(\010\022\023\n\013synchr" + + "onous\030\002 \001(\010\0220\n\014switch_types\030\003 \003(\0162\032.hbas" + + "e.pb.MasterSwitchType\"4\n\036SetSplitOrMerge" + + "EnabledResponse\022\022\n\nprev_value\030\001 \003(\010\"O\n\034I" + + "sSplitOrMergeEnabledRequest\022/\n\013switch_ty" + + "pe\030\001 \002(\0162\032.hbase.pb.MasterSwitchType\"0\n\035", + "IsSplitOrMergeEnabledResponse\022\017\n\007enabled" + + "\030\001 \002(\010\"\022\n\020NormalizeRequest\"+\n\021NormalizeR" + + "esponse\022\026\n\016normalizer_ran\030\001 \002(\010\")\n\033SetNo" + + "rmalizerRunningRequest\022\n\n\002on\030\001 \002(\010\"=\n\034Se" + + "tNormalizerRunningResponse\022\035\n\025prev_norma" + + "lizer_value\030\001 \001(\010\"\034\n\032IsNormalizerEnabled" + + "Request\".\n\033IsNormalizerEnabledResponse\022\017" + + "\n\007enabled\030\001 \002(\010\"\027\n\025RunCatalogScanRequest" + + "\"-\n\026RunCatalogScanResponse\022\023\n\013scan_resul" + + "t\030\001 \001(\005\"-\n\033EnableCatalogJanitorRequest\022\016", + "\n\006enable\030\001 \002(\010\"2\n\034EnableCatalogJanitorRe" + + "sponse\022\022\n\nprev_value\030\001 \001(\010\" \n\036IsCatalogJ" + + "anitorEnabledRequest\"0\n\037IsCatalogJanitor" + + "EnabledResponse\022\r\n\005value\030\001 \002(\010\"B\n\017Snapsh" + + "otRequest\022/\n\010snapshot\030\001 \002(\0132\035.hbase.pb.S" + + "napshotDescription\",\n\020SnapshotResponse\022\030" + + "\n\020expected_timeout\030\001 \002(\003\"\036\n\034GetCompleted" + + "SnapshotsRequest\"Q\n\035GetCompletedSnapshot" + + "sResponse\0220\n\tsnapshots\030\001 \003(\0132\035.hbase.pb." + + "SnapshotDescription\"H\n\025DeleteSnapshotReq", + "uest\022/\n\010snapshot\030\001 \002(\0132\035.hbase.pb.Snapsh" + + "otDescription\"\030\n\026DeleteSnapshotResponse\"" + + "s\n\026RestoreSnapshotRequest\022/\n\010snapshot\030\001 " + + "\002(\0132\035.hbase.pb.SnapshotDescription\022\026\n\013no" + + "nce_group\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"*\n\027" + + "RestoreSnapshotResponse\022\017\n\007proc_id\030\001 \002(\004" + + "\"H\n\025IsSnapshotDoneRequest\022/\n\010snapshot\030\001 " + + "\001(\0132\035.hbase.pb.SnapshotDescription\"^\n\026Is" + + "SnapshotDoneResponse\022\023\n\004done\030\001 \001(\010:\005fals" + + "e\022/\n\010snapshot\030\002 \001(\0132\035.hbase.pb.SnapshotD", + "escription\"O\n\034IsRestoreSnapshotDoneReque" + + "st\022/\n\010snapshot\030\001 \001(\0132\035.hbase.pb.Snapshot" + + "Description\"4\n\035IsRestoreSnapshotDoneResp" + + "onse\022\023\n\004done\030\001 \001(\010:\005false\"F\n\033GetSchemaAl" + + "terStatusRequest\022\'\n\ntable_name\030\001 \002(\0132\023.h" + + "base.pb.TableName\"T\n\034GetSchemaAlterStatu" + + "sResponse\022\035\n\025yet_to_update_regions\030\001 \001(\r" + + "\022\025\n\rtotal_regions\030\002 \001(\r\"\213\001\n\032GetTableDesc" + + "riptorsRequest\022(\n\013table_names\030\001 \003(\0132\023.hb" + + "ase.pb.TableName\022\r\n\005regex\030\002 \001(\t\022!\n\022inclu", + "de_sys_tables\030\003 \001(\010:\005false\022\021\n\tnamespace\030" + + "\004 \001(\t\"J\n\033GetTableDescriptorsResponse\022+\n\014" + + "table_schema\030\001 \003(\0132\025.hbase.pb.TableSchem" + + "a\"[\n\024GetTableNamesRequest\022\r\n\005regex\030\001 \001(\t" + + "\022!\n\022include_sys_tables\030\002 \001(\010:\005false\022\021\n\tn" + + "amespace\030\003 \001(\t\"A\n\025GetTableNamesResponse\022" + + "(\n\013table_names\030\001 \003(\0132\023.hbase.pb.TableNam" + + "e\"?\n\024GetTableStateRequest\022\'\n\ntable_name\030" + + "\001 \002(\0132\023.hbase.pb.TableName\"B\n\025GetTableSt" + + "ateResponse\022)\n\013table_state\030\001 \002(\0132\024.hbase", + ".pb.TableState\"\031\n\027GetClusterStatusReques" + + "t\"K\n\030GetClusterStatusResponse\022/\n\016cluster" + + "_status\030\001 \002(\0132\027.hbase.pb.ClusterStatus\"\030" + + "\n\026IsMasterRunningRequest\"4\n\027IsMasterRunn" + + "ingResponse\022\031\n\021is_master_running\030\001 \002(\010\"I" + + "\n\024ExecProcedureRequest\0221\n\tprocedure\030\001 \002(" + + "\0132\036.hbase.pb.ProcedureDescription\"F\n\025Exe" + + "cProcedureResponse\022\030\n\020expected_timeout\030\001" + + " \001(\003\022\023\n\013return_data\030\002 \001(\014\"K\n\026IsProcedure" + + "DoneRequest\0221\n\tprocedure\030\001 \001(\0132\036.hbase.p", + "b.ProcedureDescription\"`\n\027IsProcedureDon" + + "eResponse\022\023\n\004done\030\001 \001(\010:\005false\0220\n\010snapsh" + + "ot\030\002 \001(\0132\036.hbase.pb.ProcedureDescription" + + "\",\n\031GetProcedureResultRequest\022\017\n\007proc_id" + + "\030\001 \002(\004\"\371\001\n\032GetProcedureResultResponse\0229\n" + + "\005state\030\001 \002(\0162*.hbase.pb.GetProcedureResu" + + "ltResponse.State\022\022\n\nstart_time\030\002 \001(\004\022\023\n\013" + + "last_update\030\003 \001(\004\022\016\n\006result\030\004 \001(\014\0224\n\texc" + + "eption\030\005 \001(\0132!.hbase.pb.ForeignException" + + "Message\"1\n\005State\022\r\n\tNOT_FOUND\020\000\022\013\n\007RUNNI", + "NG\020\001\022\014\n\010FINISHED\020\002\"M\n\025AbortProcedureRequ" + + "est\022\017\n\007proc_id\030\001 \002(\004\022#\n\025mayInterruptIfRu" + + "nning\030\002 \001(\010:\004true\"6\n\026AbortProcedureRespo" + + "nse\022\034\n\024is_procedure_aborted\030\001 \002(\010\"\027\n\025Lis" + + "tProceduresRequest\"@\n\026ListProceduresResp" + + "onse\022&\n\tprocedure\030\001 \003(\0132\023.hbase.pb.Proce" + + "dure\"\315\001\n\017SetQuotaRequest\022\021\n\tuser_name\030\001 " + + "\001(\t\022\022\n\nuser_group\030\002 \001(\t\022\021\n\tnamespace\030\003 \001" + + "(\t\022\'\n\ntable_name\030\004 \001(\0132\023.hbase.pb.TableN" + + "ame\022\022\n\nremove_all\030\005 \001(\010\022\026\n\016bypass_global", + "s\030\006 \001(\010\022+\n\010throttle\030\007 \001(\0132\031.hbase.pb.Thr" + + "ottleRequest\"\022\n\020SetQuotaResponse\"J\n\037Majo" + + "rCompactionTimestampRequest\022\'\n\ntable_nam" + + "e\030\001 \002(\0132\023.hbase.pb.TableName\"U\n(MajorCom" + + "pactionTimestampForRegionRequest\022)\n\006regi" + + "on\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\"@\n M" + + "ajorCompactionTimestampResponse\022\034\n\024compa" + + "ction_timestamp\030\001 \002(\003\"\035\n\033SecurityCapabil" + + "itiesRequest\"\354\001\n\034SecurityCapabilitiesRes" + + "ponse\022G\n\014capabilities\030\001 \003(\01621.hbase.pb.S", + "ecurityCapabilitiesResponse.Capability\"\202" + + "\001\n\nCapability\022\031\n\025SIMPLE_AUTHENTICATION\020\000" + + "\022\031\n\025SECURE_AUTHENTICATION\020\001\022\021\n\rAUTHORIZA" + + "TION\020\002\022\026\n\022CELL_AUTHORIZATION\020\003\022\023\n\017CELL_V" + + "ISIBILITY\020\004*(\n\020MasterSwitchType\022\t\n\005SPLIT" + + "\020\000\022\t\n\005MERGE\020\0012\351\'\n\rMasterService\022e\n\024GetSc" + + "hemaAlterStatus\022%.hbase.pb.GetSchemaAlte" + + "rStatusRequest\032&.hbase.pb.GetSchemaAlter" + + "StatusResponse\022b\n\023GetTableDescriptors\022$." + + "hbase.pb.GetTableDescriptorsRequest\032%.hb", + "ase.pb.GetTableDescriptorsResponse\022P\n\rGe" + + "tTableNames\022\036.hbase.pb.GetTableNamesRequ" + + "est\032\037.hbase.pb.GetTableNamesResponse\022Y\n\020" + + "GetClusterStatus\022!.hbase.pb.GetClusterSt" + + "atusRequest\032\".hbase.pb.GetClusterStatusR" + + "esponse\022V\n\017IsMasterRunning\022 .hbase.pb.Is" + + "MasterRunningRequest\032!.hbase.pb.IsMaster" + + "RunningResponse\022D\n\tAddColumn\022\032.hbase.pb." + + "AddColumnRequest\032\033.hbase.pb.AddColumnRes" + + "ponse\022M\n\014DeleteColumn\022\035.hbase.pb.DeleteC", + "olumnRequest\032\036.hbase.pb.DeleteColumnResp" + + "onse\022M\n\014ModifyColumn\022\035.hbase.pb.ModifyCo" + + "lumnRequest\032\036.hbase.pb.ModifyColumnRespo" + + "nse\022G\n\nMoveRegion\022\033.hbase.pb.MoveRegionR" + + "equest\032\034.hbase.pb.MoveRegionResponse\022k\n\026" + + "DispatchMergingRegions\022\'.hbase.pb.Dispat" + + "chMergingRegionsRequest\032(.hbase.pb.Dispa" + + "tchMergingRegionsResponse\022M\n\014AssignRegio" + + "n\022\035.hbase.pb.AssignRegionRequest\032\036.hbase" + + ".pb.AssignRegionResponse\022S\n\016UnassignRegi", + "on\022\037.hbase.pb.UnassignRegionRequest\032 .hb" + + "ase.pb.UnassignRegionResponse\022P\n\rOffline" + + "Region\022\036.hbase.pb.OfflineRegionRequest\032\037" + + ".hbase.pb.OfflineRegionResponse\022J\n\013Delet" + + "eTable\022\034.hbase.pb.DeleteTableRequest\032\035.h" + + "base.pb.DeleteTableResponse\022P\n\rtruncateT" + + "able\022\036.hbase.pb.TruncateTableRequest\032\037.h" + + "base.pb.TruncateTableResponse\022J\n\013EnableT" + + "able\022\034.hbase.pb.EnableTableRequest\032\035.hba" + + "se.pb.EnableTableResponse\022M\n\014DisableTabl", + "e\022\035.hbase.pb.DisableTableRequest\032\036.hbase" + + ".pb.DisableTableResponse\022J\n\013ModifyTable\022" + + "\034.hbase.pb.ModifyTableRequest\032\035.hbase.pb" + + ".ModifyTableResponse\022J\n\013CreateTable\022\034.hb" + + "ase.pb.CreateTableRequest\032\035.hbase.pb.Cre" + + "ateTableResponse\022A\n\010Shutdown\022\031.hbase.pb." + + "ShutdownRequest\032\032.hbase.pb.ShutdownRespo" + + "nse\022G\n\nStopMaster\022\033.hbase.pb.StopMasterR" + + "equest\032\034.hbase.pb.StopMasterResponse\022>\n\007" + + "Balance\022\030.hbase.pb.BalanceRequest\032\031.hbas", + "e.pb.BalanceResponse\022_\n\022SetBalancerRunni" + + "ng\022#.hbase.pb.SetBalancerRunningRequest\032" + + "$.hbase.pb.SetBalancerRunningResponse\022\\\n" + + "\021IsBalancerEnabled\022\".hbase.pb.IsBalancer" + + "EnabledRequest\032#.hbase.pb.IsBalancerEnab" + + "ledResponse\022k\n\026SetSplitOrMergeEnabled\022\'." + + "hbase.pb.SetSplitOrMergeEnabledRequest\032(" + + ".hbase.pb.SetSplitOrMergeEnabledResponse" + + "\022h\n\025IsSplitOrMergeEnabled\022&.hbase.pb.IsS" + + "plitOrMergeEnabledRequest\032\'.hbase.pb.IsS", + "plitOrMergeEnabledResponse\022D\n\tNormalize\022" + + "\032.hbase.pb.NormalizeRequest\032\033.hbase.pb.N" + + "ormalizeResponse\022e\n\024SetNormalizerRunning" + + "\022%.hbase.pb.SetNormalizerRunningRequest\032" + + "&.hbase.pb.SetNormalizerRunningResponse\022" + + "b\n\023IsNormalizerEnabled\022$.hbase.pb.IsNorm" + + "alizerEnabledRequest\032%.hbase.pb.IsNormal" + + "izerEnabledResponse\022S\n\016RunCatalogScan\022\037." + + "hbase.pb.RunCatalogScanRequest\032 .hbase.p" + + "b.RunCatalogScanResponse\022e\n\024EnableCatalo", + "gJanitor\022%.hbase.pb.EnableCatalogJanitor" + + "Request\032&.hbase.pb.EnableCatalogJanitorR" + + "esponse\022n\n\027IsCatalogJanitorEnabled\022(.hba" + + "se.pb.IsCatalogJanitorEnabledRequest\032).h" + + "base.pb.IsCatalogJanitorEnabledResponse\022" + + "^\n\021ExecMasterService\022#.hbase.pb.Coproces" + + "sorServiceRequest\032$.hbase.pb.Coprocessor" + + "ServiceResponse\022A\n\010Snapshot\022\031.hbase.pb.S" + + "napshotRequest\032\032.hbase.pb.SnapshotRespon" + + "se\022h\n\025GetCompletedSnapshots\022&.hbase.pb.G", + "etCompletedSnapshotsRequest\032\'.hbase.pb.G" + + "etCompletedSnapshotsResponse\022S\n\016DeleteSn" + + "apshot\022\037.hbase.pb.DeleteSnapshotRequest\032" + + " .hbase.pb.DeleteSnapshotResponse\022S\n\016IsS" + + "napshotDone\022\037.hbase.pb.IsSnapshotDoneReq" + + "uest\032 .hbase.pb.IsSnapshotDoneResponse\022V" + + "\n\017RestoreSnapshot\022 .hbase.pb.RestoreSnap" + + "shotRequest\032!.hbase.pb.RestoreSnapshotRe" + + "sponse\022P\n\rExecProcedure\022\036.hbase.pb.ExecP" + + "rocedureRequest\032\037.hbase.pb.ExecProcedure", + "Response\022W\n\024ExecProcedureWithRet\022\036.hbase" + + ".pb.ExecProcedureRequest\032\037.hbase.pb.Exec" + + "ProcedureResponse\022V\n\017IsProcedureDone\022 .h" + + "base.pb.IsProcedureDoneRequest\032!.hbase.p" + + "b.IsProcedureDoneResponse\022V\n\017ModifyNames" + + "pace\022 .hbase.pb.ModifyNamespaceRequest\032!" + + ".hbase.pb.ModifyNamespaceResponse\022V\n\017Cre" + + "ateNamespace\022 .hbase.pb.CreateNamespaceR" + + "equest\032!.hbase.pb.CreateNamespaceRespons" + + "e\022V\n\017DeleteNamespace\022 .hbase.pb.DeleteNa", + "mespaceRequest\032!.hbase.pb.DeleteNamespac" + + "eResponse\022k\n\026GetNamespaceDescriptor\022\'.hb" + + "ase.pb.GetNamespaceDescriptorRequest\032(.h" + + "base.pb.GetNamespaceDescriptorResponse\022q" + + "\n\030ListNamespaceDescriptors\022).hbase.pb.Li" + + "stNamespaceDescriptorsRequest\032*.hbase.pb" + + ".ListNamespaceDescriptorsResponse\022\206\001\n\037Li" + + "stTableDescriptorsByNamespace\0220.hbase.pb" + + ".ListTableDescriptorsByNamespaceRequest\032" + + "1.hbase.pb.ListTableDescriptorsByNamespa", + "ceResponse\022t\n\031ListTableNamesByNamespace\022" + + "*.hbase.pb.ListTableNamesByNamespaceRequ" + + "est\032+.hbase.pb.ListTableNamesByNamespace" + + "Response\022P\n\rGetTableState\022\036.hbase.pb.Get" + + "TableStateRequest\032\037.hbase.pb.GetTableSta" + + "teResponse\022A\n\010SetQuota\022\031.hbase.pb.SetQuo" + + "taRequest\032\032.hbase.pb.SetQuotaResponse\022x\n" + + "\037getLastMajorCompactionTimestamp\022).hbase" + + ".pb.MajorCompactionTimestampRequest\032*.hb" + + "ase.pb.MajorCompactionTimestampResponse\022", + "\212\001\n(getLastMajorCompactionTimestampForRe" + + "gion\0222.hbase.pb.MajorCompactionTimestamp" + + "ForRegionRequest\032*.hbase.pb.MajorCompact" + + "ionTimestampResponse\022_\n\022getProcedureResu" + + "lt\022#.hbase.pb.GetProcedureResultRequest\032" + + "$.hbase.pb.GetProcedureResultResponse\022h\n" + + "\027getSecurityCapabilities\022%.hbase.pb.Secu" + + "rityCapabilitiesRequest\032&.hbase.pb.Secur" + + "ityCapabilitiesResponse\022S\n\016AbortProcedur" + + "e\022\037.hbase.pb.AbortProcedureRequest\032 .hba", + "se.pb.AbortProcedureResponse\022S\n\016ListProc" + + "edures\022\037.hbase.pb.ListProceduresRequest\032" + + " .hbase.pb.ListProceduresResponseBB\n*org" + + ".apache.hadoop.hbase.protobuf.generatedB" + + "\014MasterProtosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -66481,7 +65618,7 @@ public final class MasterProtos { internal_static_hbase_pb_SetSplitOrMergeEnabledRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_SetSplitOrMergeEnabledRequest_descriptor, - new java.lang.String[] { "Enabled", "Synchronous", "SwitchTypes", "SkipLock", }); + new java.lang.String[] { "Enabled", "Synchronous", "SwitchTypes", }); internal_static_hbase_pb_SetSplitOrMergeEnabledResponse_descriptor = getDescriptor().getMessageTypes().get(53); internal_static_hbase_pb_SetSplitOrMergeEnabledResponse_fieldAccessorTable = new @@ -66500,332 +65637,320 @@ public final class MasterProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_IsSplitOrMergeEnabledResponse_descriptor, new java.lang.String[] { "Enabled", }); - internal_static_hbase_pb_ReleaseSplitOrMergeLockAndRollbackRequest_descriptor = - getDescriptor().getMessageTypes().get(56); - internal_static_hbase_pb_ReleaseSplitOrMergeLockAndRollbackRequest_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_hbase_pb_ReleaseSplitOrMergeLockAndRollbackRequest_descriptor, - new java.lang.String[] { }); - internal_static_hbase_pb_ReleaseSplitOrMergeLockAndRollbackResponse_descriptor = - getDescriptor().getMessageTypes().get(57); - internal_static_hbase_pb_ReleaseSplitOrMergeLockAndRollbackResponse_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_hbase_pb_ReleaseSplitOrMergeLockAndRollbackResponse_descriptor, - new java.lang.String[] { }); internal_static_hbase_pb_NormalizeRequest_descriptor = - getDescriptor().getMessageTypes().get(58); + getDescriptor().getMessageTypes().get(56); internal_static_hbase_pb_NormalizeRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_NormalizeRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_NormalizeResponse_descriptor = - getDescriptor().getMessageTypes().get(59); + getDescriptor().getMessageTypes().get(57); internal_static_hbase_pb_NormalizeResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_NormalizeResponse_descriptor, new java.lang.String[] { "NormalizerRan", }); internal_static_hbase_pb_SetNormalizerRunningRequest_descriptor = - getDescriptor().getMessageTypes().get(60); + getDescriptor().getMessageTypes().get(58); internal_static_hbase_pb_SetNormalizerRunningRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_SetNormalizerRunningRequest_descriptor, new java.lang.String[] { "On", }); internal_static_hbase_pb_SetNormalizerRunningResponse_descriptor = - getDescriptor().getMessageTypes().get(61); + getDescriptor().getMessageTypes().get(59); internal_static_hbase_pb_SetNormalizerRunningResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_SetNormalizerRunningResponse_descriptor, new java.lang.String[] { "PrevNormalizerValue", }); internal_static_hbase_pb_IsNormalizerEnabledRequest_descriptor = - getDescriptor().getMessageTypes().get(62); + getDescriptor().getMessageTypes().get(60); internal_static_hbase_pb_IsNormalizerEnabledRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_IsNormalizerEnabledRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_IsNormalizerEnabledResponse_descriptor = - getDescriptor().getMessageTypes().get(63); + getDescriptor().getMessageTypes().get(61); internal_static_hbase_pb_IsNormalizerEnabledResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_IsNormalizerEnabledResponse_descriptor, new java.lang.String[] { "Enabled", }); internal_static_hbase_pb_RunCatalogScanRequest_descriptor = - getDescriptor().getMessageTypes().get(64); + getDescriptor().getMessageTypes().get(62); internal_static_hbase_pb_RunCatalogScanRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_RunCatalogScanRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_RunCatalogScanResponse_descriptor = - getDescriptor().getMessageTypes().get(65); + getDescriptor().getMessageTypes().get(63); internal_static_hbase_pb_RunCatalogScanResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_RunCatalogScanResponse_descriptor, new java.lang.String[] { "ScanResult", }); internal_static_hbase_pb_EnableCatalogJanitorRequest_descriptor = - getDescriptor().getMessageTypes().get(66); + getDescriptor().getMessageTypes().get(64); internal_static_hbase_pb_EnableCatalogJanitorRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_EnableCatalogJanitorRequest_descriptor, new java.lang.String[] { "Enable", }); internal_static_hbase_pb_EnableCatalogJanitorResponse_descriptor = - getDescriptor().getMessageTypes().get(67); + getDescriptor().getMessageTypes().get(65); internal_static_hbase_pb_EnableCatalogJanitorResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_EnableCatalogJanitorResponse_descriptor, new java.lang.String[] { "PrevValue", }); internal_static_hbase_pb_IsCatalogJanitorEnabledRequest_descriptor = - getDescriptor().getMessageTypes().get(68); + getDescriptor().getMessageTypes().get(66); internal_static_hbase_pb_IsCatalogJanitorEnabledRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_IsCatalogJanitorEnabledRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_IsCatalogJanitorEnabledResponse_descriptor = - getDescriptor().getMessageTypes().get(69); + getDescriptor().getMessageTypes().get(67); internal_static_hbase_pb_IsCatalogJanitorEnabledResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_IsCatalogJanitorEnabledResponse_descriptor, new java.lang.String[] { "Value", }); internal_static_hbase_pb_SnapshotRequest_descriptor = - getDescriptor().getMessageTypes().get(70); + getDescriptor().getMessageTypes().get(68); internal_static_hbase_pb_SnapshotRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_SnapshotRequest_descriptor, new java.lang.String[] { "Snapshot", }); internal_static_hbase_pb_SnapshotResponse_descriptor = - getDescriptor().getMessageTypes().get(71); + getDescriptor().getMessageTypes().get(69); internal_static_hbase_pb_SnapshotResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_SnapshotResponse_descriptor, new java.lang.String[] { "ExpectedTimeout", }); internal_static_hbase_pb_GetCompletedSnapshotsRequest_descriptor = - getDescriptor().getMessageTypes().get(72); + getDescriptor().getMessageTypes().get(70); internal_static_hbase_pb_GetCompletedSnapshotsRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_GetCompletedSnapshotsRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_GetCompletedSnapshotsResponse_descriptor = - getDescriptor().getMessageTypes().get(73); + getDescriptor().getMessageTypes().get(71); internal_static_hbase_pb_GetCompletedSnapshotsResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_GetCompletedSnapshotsResponse_descriptor, new java.lang.String[] { "Snapshots", }); internal_static_hbase_pb_DeleteSnapshotRequest_descriptor = - getDescriptor().getMessageTypes().get(74); + getDescriptor().getMessageTypes().get(72); internal_static_hbase_pb_DeleteSnapshotRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_DeleteSnapshotRequest_descriptor, new java.lang.String[] { "Snapshot", }); internal_static_hbase_pb_DeleteSnapshotResponse_descriptor = - getDescriptor().getMessageTypes().get(75); + getDescriptor().getMessageTypes().get(73); internal_static_hbase_pb_DeleteSnapshotResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_DeleteSnapshotResponse_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_RestoreSnapshotRequest_descriptor = - getDescriptor().getMessageTypes().get(76); + getDescriptor().getMessageTypes().get(74); internal_static_hbase_pb_RestoreSnapshotRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_RestoreSnapshotRequest_descriptor, new java.lang.String[] { "Snapshot", "NonceGroup", "Nonce", }); internal_static_hbase_pb_RestoreSnapshotResponse_descriptor = - getDescriptor().getMessageTypes().get(77); + getDescriptor().getMessageTypes().get(75); internal_static_hbase_pb_RestoreSnapshotResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_RestoreSnapshotResponse_descriptor, new java.lang.String[] { "ProcId", }); internal_static_hbase_pb_IsSnapshotDoneRequest_descriptor = - getDescriptor().getMessageTypes().get(78); + getDescriptor().getMessageTypes().get(76); internal_static_hbase_pb_IsSnapshotDoneRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_IsSnapshotDoneRequest_descriptor, new java.lang.String[] { "Snapshot", }); internal_static_hbase_pb_IsSnapshotDoneResponse_descriptor = - getDescriptor().getMessageTypes().get(79); + getDescriptor().getMessageTypes().get(77); internal_static_hbase_pb_IsSnapshotDoneResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_IsSnapshotDoneResponse_descriptor, new java.lang.String[] { "Done", "Snapshot", }); internal_static_hbase_pb_IsRestoreSnapshotDoneRequest_descriptor = - getDescriptor().getMessageTypes().get(80); + getDescriptor().getMessageTypes().get(78); internal_static_hbase_pb_IsRestoreSnapshotDoneRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_IsRestoreSnapshotDoneRequest_descriptor, new java.lang.String[] { "Snapshot", }); internal_static_hbase_pb_IsRestoreSnapshotDoneResponse_descriptor = - getDescriptor().getMessageTypes().get(81); + getDescriptor().getMessageTypes().get(79); internal_static_hbase_pb_IsRestoreSnapshotDoneResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_IsRestoreSnapshotDoneResponse_descriptor, new java.lang.String[] { "Done", }); internal_static_hbase_pb_GetSchemaAlterStatusRequest_descriptor = - getDescriptor().getMessageTypes().get(82); + getDescriptor().getMessageTypes().get(80); internal_static_hbase_pb_GetSchemaAlterStatusRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_GetSchemaAlterStatusRequest_descriptor, new java.lang.String[] { "TableName", }); internal_static_hbase_pb_GetSchemaAlterStatusResponse_descriptor = - getDescriptor().getMessageTypes().get(83); + getDescriptor().getMessageTypes().get(81); internal_static_hbase_pb_GetSchemaAlterStatusResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_GetSchemaAlterStatusResponse_descriptor, new java.lang.String[] { "YetToUpdateRegions", "TotalRegions", }); internal_static_hbase_pb_GetTableDescriptorsRequest_descriptor = - getDescriptor().getMessageTypes().get(84); + getDescriptor().getMessageTypes().get(82); internal_static_hbase_pb_GetTableDescriptorsRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_GetTableDescriptorsRequest_descriptor, new java.lang.String[] { "TableNames", "Regex", "IncludeSysTables", "Namespace", }); internal_static_hbase_pb_GetTableDescriptorsResponse_descriptor = - getDescriptor().getMessageTypes().get(85); + getDescriptor().getMessageTypes().get(83); internal_static_hbase_pb_GetTableDescriptorsResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_GetTableDescriptorsResponse_descriptor, new java.lang.String[] { "TableSchema", }); internal_static_hbase_pb_GetTableNamesRequest_descriptor = - getDescriptor().getMessageTypes().get(86); + getDescriptor().getMessageTypes().get(84); internal_static_hbase_pb_GetTableNamesRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_GetTableNamesRequest_descriptor, new java.lang.String[] { "Regex", "IncludeSysTables", "Namespace", }); internal_static_hbase_pb_GetTableNamesResponse_descriptor = - getDescriptor().getMessageTypes().get(87); + getDescriptor().getMessageTypes().get(85); internal_static_hbase_pb_GetTableNamesResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_GetTableNamesResponse_descriptor, new java.lang.String[] { "TableNames", }); internal_static_hbase_pb_GetTableStateRequest_descriptor = - getDescriptor().getMessageTypes().get(88); + getDescriptor().getMessageTypes().get(86); internal_static_hbase_pb_GetTableStateRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_GetTableStateRequest_descriptor, new java.lang.String[] { "TableName", }); internal_static_hbase_pb_GetTableStateResponse_descriptor = - getDescriptor().getMessageTypes().get(89); + getDescriptor().getMessageTypes().get(87); internal_static_hbase_pb_GetTableStateResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_GetTableStateResponse_descriptor, new java.lang.String[] { "TableState", }); internal_static_hbase_pb_GetClusterStatusRequest_descriptor = - getDescriptor().getMessageTypes().get(90); + getDescriptor().getMessageTypes().get(88); internal_static_hbase_pb_GetClusterStatusRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_GetClusterStatusRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_GetClusterStatusResponse_descriptor = - getDescriptor().getMessageTypes().get(91); + getDescriptor().getMessageTypes().get(89); internal_static_hbase_pb_GetClusterStatusResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_GetClusterStatusResponse_descriptor, new java.lang.String[] { "ClusterStatus", }); internal_static_hbase_pb_IsMasterRunningRequest_descriptor = - getDescriptor().getMessageTypes().get(92); + getDescriptor().getMessageTypes().get(90); internal_static_hbase_pb_IsMasterRunningRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_IsMasterRunningRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_IsMasterRunningResponse_descriptor = - getDescriptor().getMessageTypes().get(93); + getDescriptor().getMessageTypes().get(91); internal_static_hbase_pb_IsMasterRunningResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_IsMasterRunningResponse_descriptor, new java.lang.String[] { "IsMasterRunning", }); internal_static_hbase_pb_ExecProcedureRequest_descriptor = - getDescriptor().getMessageTypes().get(94); + getDescriptor().getMessageTypes().get(92); internal_static_hbase_pb_ExecProcedureRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_ExecProcedureRequest_descriptor, new java.lang.String[] { "Procedure", }); internal_static_hbase_pb_ExecProcedureResponse_descriptor = - getDescriptor().getMessageTypes().get(95); + getDescriptor().getMessageTypes().get(93); internal_static_hbase_pb_ExecProcedureResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_ExecProcedureResponse_descriptor, new java.lang.String[] { "ExpectedTimeout", "ReturnData", }); internal_static_hbase_pb_IsProcedureDoneRequest_descriptor = - getDescriptor().getMessageTypes().get(96); + getDescriptor().getMessageTypes().get(94); internal_static_hbase_pb_IsProcedureDoneRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_IsProcedureDoneRequest_descriptor, new java.lang.String[] { "Procedure", }); internal_static_hbase_pb_IsProcedureDoneResponse_descriptor = - getDescriptor().getMessageTypes().get(97); + getDescriptor().getMessageTypes().get(95); internal_static_hbase_pb_IsProcedureDoneResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_IsProcedureDoneResponse_descriptor, new java.lang.String[] { "Done", "Snapshot", }); internal_static_hbase_pb_GetProcedureResultRequest_descriptor = - getDescriptor().getMessageTypes().get(98); + getDescriptor().getMessageTypes().get(96); internal_static_hbase_pb_GetProcedureResultRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_GetProcedureResultRequest_descriptor, new java.lang.String[] { "ProcId", }); internal_static_hbase_pb_GetProcedureResultResponse_descriptor = - getDescriptor().getMessageTypes().get(99); + getDescriptor().getMessageTypes().get(97); internal_static_hbase_pb_GetProcedureResultResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_GetProcedureResultResponse_descriptor, new java.lang.String[] { "State", "StartTime", "LastUpdate", "Result", "Exception", }); internal_static_hbase_pb_AbortProcedureRequest_descriptor = - getDescriptor().getMessageTypes().get(100); + getDescriptor().getMessageTypes().get(98); internal_static_hbase_pb_AbortProcedureRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_AbortProcedureRequest_descriptor, new java.lang.String[] { "ProcId", "MayInterruptIfRunning", }); internal_static_hbase_pb_AbortProcedureResponse_descriptor = - getDescriptor().getMessageTypes().get(101); + getDescriptor().getMessageTypes().get(99); internal_static_hbase_pb_AbortProcedureResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_AbortProcedureResponse_descriptor, new java.lang.String[] { "IsProcedureAborted", }); internal_static_hbase_pb_ListProceduresRequest_descriptor = - getDescriptor().getMessageTypes().get(102); + getDescriptor().getMessageTypes().get(100); internal_static_hbase_pb_ListProceduresRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_ListProceduresRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_ListProceduresResponse_descriptor = - getDescriptor().getMessageTypes().get(103); + getDescriptor().getMessageTypes().get(101); internal_static_hbase_pb_ListProceduresResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_ListProceduresResponse_descriptor, new java.lang.String[] { "Procedure", }); internal_static_hbase_pb_SetQuotaRequest_descriptor = - getDescriptor().getMessageTypes().get(104); + getDescriptor().getMessageTypes().get(102); internal_static_hbase_pb_SetQuotaRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_SetQuotaRequest_descriptor, new java.lang.String[] { "UserName", "UserGroup", "Namespace", "TableName", "RemoveAll", "BypassGlobals", "Throttle", }); internal_static_hbase_pb_SetQuotaResponse_descriptor = - getDescriptor().getMessageTypes().get(105); + getDescriptor().getMessageTypes().get(103); internal_static_hbase_pb_SetQuotaResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_SetQuotaResponse_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_MajorCompactionTimestampRequest_descriptor = - getDescriptor().getMessageTypes().get(106); + getDescriptor().getMessageTypes().get(104); internal_static_hbase_pb_MajorCompactionTimestampRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_MajorCompactionTimestampRequest_descriptor, new java.lang.String[] { "TableName", }); internal_static_hbase_pb_MajorCompactionTimestampForRegionRequest_descriptor = - getDescriptor().getMessageTypes().get(107); + getDescriptor().getMessageTypes().get(105); internal_static_hbase_pb_MajorCompactionTimestampForRegionRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_MajorCompactionTimestampForRegionRequest_descriptor, new java.lang.String[] { "Region", }); internal_static_hbase_pb_MajorCompactionTimestampResponse_descriptor = - getDescriptor().getMessageTypes().get(108); + getDescriptor().getMessageTypes().get(106); internal_static_hbase_pb_MajorCompactionTimestampResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_MajorCompactionTimestampResponse_descriptor, new java.lang.String[] { "CompactionTimestamp", }); internal_static_hbase_pb_SecurityCapabilitiesRequest_descriptor = - getDescriptor().getMessageTypes().get(109); + getDescriptor().getMessageTypes().get(107); internal_static_hbase_pb_SecurityCapabilitiesRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_SecurityCapabilitiesRequest_descriptor, new java.lang.String[] { }); internal_static_hbase_pb_SecurityCapabilitiesResponse_descriptor = - getDescriptor().getMessageTypes().get(110); + getDescriptor().getMessageTypes().get(108); internal_static_hbase_pb_SecurityCapabilitiesResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_SecurityCapabilitiesResponse_descriptor, diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java index a45c42194cf..f64d0c1b412 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java @@ -9733,540 +9733,6 @@ public final class ZooKeeperProtos { // @@protoc_insertion_point(class_scope:hbase.pb.SwitchState) } - public interface SplitAndMergeStateOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // optional bool split_enabled = 1; - /** - * optional bool split_enabled = 1; - */ - boolean hasSplitEnabled(); - /** - * optional bool split_enabled = 1; - */ - boolean getSplitEnabled(); - - // optional bool merge_enabled = 2; - /** - * optional bool merge_enabled = 2; - */ - boolean hasMergeEnabled(); - /** - * optional bool merge_enabled = 2; - */ - boolean getMergeEnabled(); - } - /** - * Protobuf type {@code hbase.pb.SplitAndMergeState} - * - *
-   **
-   * State for split and merge, used in hbck
-   * 
- */ - public static final class SplitAndMergeState extends - com.google.protobuf.GeneratedMessage - implements SplitAndMergeStateOrBuilder { - // Use SplitAndMergeState.newBuilder() to construct. - private SplitAndMergeState(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private SplitAndMergeState(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final SplitAndMergeState defaultInstance; - public static SplitAndMergeState getDefaultInstance() { - return defaultInstance; - } - - public SplitAndMergeState getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private SplitAndMergeState( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; - } - case 8: { - bitField0_ |= 0x00000001; - splitEnabled_ = input.readBool(); - break; - } - case 16: { - bitField0_ |= 0x00000002; - mergeEnabled_ = input.readBool(); - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SplitAndMergeState_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SplitAndMergeState_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public SplitAndMergeState parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new SplitAndMergeState(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - private int bitField0_; - // optional bool split_enabled = 1; - public static final int SPLIT_ENABLED_FIELD_NUMBER = 1; - private boolean splitEnabled_; - /** - * optional bool split_enabled = 1; - */ - public boolean hasSplitEnabled() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * optional bool split_enabled = 1; - */ - public boolean getSplitEnabled() { - return splitEnabled_; - } - - // optional bool merge_enabled = 2; - public static final int MERGE_ENABLED_FIELD_NUMBER = 2; - private boolean mergeEnabled_; - /** - * optional bool merge_enabled = 2; - */ - public boolean hasMergeEnabled() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional bool merge_enabled = 2; - */ - public boolean getMergeEnabled() { - return mergeEnabled_; - } - - private void initFields() { - splitEnabled_ = false; - mergeEnabled_ = false; - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeBool(1, splitEnabled_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeBool(2, mergeEnabled_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(1, splitEnabled_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeBoolSize(2, mergeEnabled_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState)) { - return super.equals(obj); - } - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState) obj; - - boolean result = true; - result = result && (hasSplitEnabled() == other.hasSplitEnabled()); - if (hasSplitEnabled()) { - result = result && (getSplitEnabled() - == other.getSplitEnabled()); - } - result = result && (hasMergeEnabled() == other.hasMergeEnabled()); - if (hasMergeEnabled()) { - result = result && (getMergeEnabled() - == other.getMergeEnabled()); - } - result = result && - getUnknownFields().equals(other.getUnknownFields()); - return result; - } - - private int memoizedHashCode = 0; - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; - } - int hash = 41; - hash = (19 * hash) + getDescriptorForType().hashCode(); - if (hasSplitEnabled()) { - hash = (37 * hash) + SPLIT_ENABLED_FIELD_NUMBER; - hash = (53 * hash) + hashBoolean(getSplitEnabled()); - } - if (hasMergeEnabled()) { - hash = (37 * hash) + MERGE_ENABLED_FIELD_NUMBER; - hash = (53 * hash) + hashBoolean(getMergeEnabled()); - } - hash = (29 * hash) + getUnknownFields().hashCode(); - memoizedHashCode = hash; - return hash; - } - - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code hbase.pb.SplitAndMergeState} - * - *
-     **
-     * State for split and merge, used in hbck
-     * 
- */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeStateOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SplitAndMergeState_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SplitAndMergeState_fieldAccessorTable - .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState.Builder.class); - } - - // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - splitEnabled_ = false; - bitField0_ = (bitField0_ & ~0x00000001); - mergeEnabled_ = false; - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SplitAndMergeState_descriptor; - } - - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState.getDefaultInstance(); - } - - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState build() { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - result.splitEnabled_ = splitEnabled_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.mergeEnabled_ = mergeEnabled_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState.getDefaultInstance()) return this; - if (other.hasSplitEnabled()) { - setSplitEnabled(other.getSplitEnabled()); - } - if (other.hasMergeEnabled()) { - setMergeEnabled(other.getMergeEnabled()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - // optional bool split_enabled = 1; - private boolean splitEnabled_ ; - /** - * optional bool split_enabled = 1; - */ - public boolean hasSplitEnabled() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * optional bool split_enabled = 1; - */ - public boolean getSplitEnabled() { - return splitEnabled_; - } - /** - * optional bool split_enabled = 1; - */ - public Builder setSplitEnabled(boolean value) { - bitField0_ |= 0x00000001; - splitEnabled_ = value; - onChanged(); - return this; - } - /** - * optional bool split_enabled = 1; - */ - public Builder clearSplitEnabled() { - bitField0_ = (bitField0_ & ~0x00000001); - splitEnabled_ = false; - onChanged(); - return this; - } - - // optional bool merge_enabled = 2; - private boolean mergeEnabled_ ; - /** - * optional bool merge_enabled = 2; - */ - public boolean hasMergeEnabled() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional bool merge_enabled = 2; - */ - public boolean getMergeEnabled() { - return mergeEnabled_; - } - /** - * optional bool merge_enabled = 2; - */ - public Builder setMergeEnabled(boolean value) { - bitField0_ |= 0x00000002; - mergeEnabled_ = value; - onChanged(); - return this; - } - /** - * optional bool merge_enabled = 2; - */ - public Builder clearMergeEnabled() { - bitField0_ = (bitField0_ & ~0x00000002); - mergeEnabled_ = false; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:hbase.pb.SplitAndMergeState) - } - - static { - defaultInstance = new SplitAndMergeState(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:hbase.pb.SplitAndMergeState) - } - private static com.google.protobuf.Descriptors.Descriptor internal_static_hbase_pb_MetaRegionServer_descriptor; private static @@ -10327,11 +9793,6 @@ public final class ZooKeeperProtos { private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_hbase_pb_SwitchState_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_hbase_pb_SplitAndMergeState_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_hbase_pb_SplitAndMergeState_fieldAccessorTable; public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { @@ -10376,10 +9837,9 @@ public final class ZooKeeperProtos { "ner\030\002 \001(\0132\024.hbase.pb.ServerName\022\021\n\tthrea" + "d_id\030\003 \001(\003\022\021\n\tis_shared\030\004 \001(\010\022\017\n\007purpose" + "\030\005 \001(\t\022\023\n\013create_time\030\006 \001(\003\"\036\n\013SwitchSta" + - "te\022\017\n\007enabled\030\001 \001(\010\"B\n\022SplitAndMergeStat" + - "e\022\025\n\rsplit_enabled\030\001 \001(\010\022\025\n\rmerge_enable" + - "d\030\002 \001(\010BE\n*org.apache.hadoop.hbase.proto" + - "buf.generatedB\017ZooKeeperProtosH\001\210\001\001\240\001\001" + "te\022\017\n\007enabled\030\001 \001(\010BE\n*org.apache.hadoop" + + ".hbase.protobuf.generatedB\017ZooKeeperProt" + + "osH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -10458,12 +9918,6 @@ public final class ZooKeeperProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_hbase_pb_SwitchState_descriptor, new java.lang.String[] { "Enabled", }); - internal_static_hbase_pb_SplitAndMergeState_descriptor = - getDescriptor().getMessageTypes().get(12); - internal_static_hbase_pb_SplitAndMergeState_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_hbase_pb_SplitAndMergeState_descriptor, - new java.lang.String[] { "SplitEnabled", "MergeEnabled", }); return null; } }; diff --git a/hbase-protocol/src/main/protobuf/Master.proto b/hbase-protocol/src/main/protobuf/Master.proto index 23bbbf8742d..2bf36b48d2c 100644 --- a/hbase-protocol/src/main/protobuf/Master.proto +++ b/hbase-protocol/src/main/protobuf/Master.proto @@ -291,7 +291,6 @@ message SetSplitOrMergeEnabledRequest { required bool enabled = 1; optional bool synchronous = 2; repeated MasterSwitchType switch_types = 3; - optional bool skip_lock = 4; } message SetSplitOrMergeEnabledResponse { @@ -306,12 +305,6 @@ message IsSplitOrMergeEnabledResponse { required bool enabled = 1; } -message ReleaseSplitOrMergeLockAndRollbackRequest { -} - -message ReleaseSplitOrMergeLockAndRollbackResponse { -} - message NormalizeRequest { } @@ -681,12 +674,6 @@ service MasterService { rpc IsSplitOrMergeEnabled(IsSplitOrMergeEnabledRequest) returns(IsSplitOrMergeEnabledResponse); - /** - * Release lock and rollback state. - */ - rpc ReleaseSplitOrMergeLockAndRollback(ReleaseSplitOrMergeLockAndRollbackRequest) - returns(ReleaseSplitOrMergeLockAndRollbackResponse); - /** * Run region normalizer. Can NOT run for various reasons. Check logs. */ diff --git a/hbase-protocol/src/main/protobuf/ZooKeeper.proto b/hbase-protocol/src/main/protobuf/ZooKeeper.proto index 41c0e0eebf9..8713cbdf9e4 100644 --- a/hbase-protocol/src/main/protobuf/ZooKeeper.proto +++ b/hbase-protocol/src/main/protobuf/ZooKeeper.proto @@ -166,11 +166,3 @@ message TableLock { message SwitchState { optional bool enabled = 1; } - -/** - * State for split and merge, used in hbck - */ -message SplitAndMergeState { - optional bool split_enabled = 1; - optional bool merge_enabled = 2; -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index 986ff6e1acc..90af7c7935d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -53,7 +53,6 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.MasterSwitchType; import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.errorhandling.ForeignException; -import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.exceptions.UnknownProtocolException; import org.apache.hadoop.hbase.ipc.PriorityFunction; import org.apache.hadoop.hbase.ipc.QosPriority; @@ -1474,10 +1473,6 @@ public class MasterRpcServices extends RSRpcServices try { master.checkInitialized(); boolean newValue = request.getEnabled(); - boolean skipLock = request.getSkipLock(); - if (!master.getSplitOrMergeTracker().lock(skipLock)) { - throw new DoNotRetryIOException("can't set splitOrMerge switch due to lock"); - } for (MasterProtos.MasterSwitchType masterSwitchType : request.getSwitchTypesList()) { MasterSwitchType switchType = convert(masterSwitchType); boolean oldValue = master.isSplitOrMergeEnabled(switchType); @@ -1509,24 +1504,6 @@ public class MasterRpcServices extends RSRpcServices return response.build(); } - @Override - public ReleaseSplitOrMergeLockAndRollbackResponse - releaseSplitOrMergeLockAndRollback(RpcController controller, - ReleaseSplitOrMergeLockAndRollbackRequest request) throws ServiceException { - try { - master.getSplitOrMergeTracker().releaseLockAndRollback(); - } catch (KeeperException e) { - throw new ServiceException(e); - } catch (DeserializationException e) { - throw new ServiceException(e); - } catch (InterruptedException e) { - throw new ServiceException(e); - } - ReleaseSplitOrMergeLockAndRollbackResponse.Builder builder = - ReleaseSplitOrMergeLockAndRollbackResponse.newBuilder(); - return builder.build(); - } - @Override public NormalizeResponse normalize(RpcController controller, NormalizeRequest request) throws ServiceException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index 82f39e357b3..c05973b1d4f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hbase.util; -import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Joiner; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; @@ -311,6 +310,7 @@ public class HBaseFsck extends Configured implements Closeable { private Map> skippedRegions = new HashMap>(); ZooKeeperWatcher zkw = null; + /** * Constructor * @@ -691,8 +691,7 @@ public class HBaseFsck extends Configured implements Closeable { } boolean[] oldSplitAndMerge = null; if (shouldDisableSplitAndMerge()) { - admin.releaseSplitOrMergeLockAndRollback(); - oldSplitAndMerge = admin.setSplitOrMergeEnabled(false, false, false, + oldSplitAndMerge = admin.setSplitOrMergeEnabled(false, false, MasterSwitchType.SPLIT, MasterSwitchType.MERGE); } @@ -709,7 +708,14 @@ public class HBaseFsck extends Configured implements Closeable { if (shouldDisableSplitAndMerge()) { if (oldSplitAndMerge != null) { - admin.releaseSplitOrMergeLockAndRollback(); + if (oldSplitAndMerge[0] && oldSplitAndMerge[1]) { + admin.setSplitOrMergeEnabled(true, false, + MasterSwitchType.SPLIT, MasterSwitchType.MERGE); + } else if (oldSplitAndMerge[0]) { + admin.setSplitOrMergeEnabled(true, false, MasterSwitchType.SPLIT); + } else if (oldSplitAndMerge[1]) { + admin.setSplitOrMergeEnabled(true, false, MasterSwitchType.MERGE); + } } } } @@ -4226,12 +4232,7 @@ public class HBaseFsck extends Configured implements Closeable { * Disable the split and merge */ public static void setDisableSplitAndMerge() { - setDisableSplitAndMerge(true); - } - - @VisibleForTesting - public static void setDisableSplitAndMerge(boolean flag) { - disableSplitAndMerge = flag; + disableSplitAndMerge = true; } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/SplitOrMergeTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/SplitOrMergeTracker.java index b975c4330ad..2dad9878a6e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/SplitOrMergeTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/SplitOrMergeTracker.java @@ -18,8 +18,6 @@ package org.apache.hadoop.hbase.zookeeper; import java.io.IOException; -import java.util.ArrayList; -import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; @@ -27,7 +25,6 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.MasterSwitchType; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SwitchState; import org.apache.hadoop.hbase.util.Bytes; import org.apache.zookeeper.KeeperException; @@ -40,13 +37,8 @@ import org.apache.zookeeper.KeeperException; @InterfaceAudience.Private public class SplitOrMergeTracker { - public static final String LOCK = "splitOrMergeLock"; - public static final String STATE = "splitOrMergeState"; - private String splitZnode; private String mergeZnode; - private String splitOrMergeLock; - private ZooKeeperWatcher watcher; private SwitchStateTracker splitStateTracker; private SwitchStateTracker mergeStateTracker; @@ -57,9 +49,6 @@ public class SplitOrMergeTracker { if (ZKUtil.checkExists(watcher, watcher.getSwitchZNode()) < 0) { ZKUtil.createAndFailSilent(watcher, watcher.getSwitchZNode()); } - if (ZKUtil.checkExists(watcher, watcher.getSwitchLockZNode()) < 0) { - ZKUtil.createAndFailSilent(watcher, watcher.getSwitchLockZNode()); - } } catch (KeeperException e) { throw new RuntimeException(e); } @@ -67,12 +56,8 @@ public class SplitOrMergeTracker { conf.get("zookeeper.znode.switch.split", "split")); mergeZnode = ZKUtil.joinZNode(watcher.getSwitchZNode(), conf.get("zookeeper.znode.switch.merge", "merge")); - - splitOrMergeLock = ZKUtil.joinZNode(watcher.getSwitchLockZNode(), LOCK); - splitStateTracker = new SwitchStateTracker(watcher, splitZnode, abortable); mergeStateTracker = new SwitchStateTracker(watcher, mergeZnode, abortable); - this.watcher = watcher; } public void start() { @@ -106,76 +91,6 @@ public class SplitOrMergeTracker { } } - /** - * rollback the original state and delete lock node. - * */ - public void releaseLockAndRollback() - throws KeeperException, DeserializationException, InterruptedException { - if (ZKUtil.checkExists(watcher, splitOrMergeLock) != -1) { - List ops = new ArrayList<>(); - rollback(ops); - ops.add(ZKUtil.ZKUtilOp.deleteNodeFailSilent(splitOrMergeLock)); - ZKUtil.multiOrSequential(watcher, ops, false); - } - } - - // If there is old states of switch on zk, do rollback - private void rollback(List ops) throws KeeperException, InterruptedException, DeserializationException { - String splitOrMergeState = ZKUtil.joinZNode(watcher.getSwitchLockZNode(), - SplitOrMergeTracker.STATE); - if (ZKUtil.checkExists(watcher, splitOrMergeState) != -1) { - byte[] bytes = ZKUtil.getData(watcher, splitOrMergeState); - ProtobufUtil.expectPBMagicPrefix(bytes); - ZooKeeperProtos.SplitAndMergeState.Builder builder = - ZooKeeperProtos.SplitAndMergeState.newBuilder(); - try { - int magicLen = ProtobufUtil.lengthOfPBMagic(); - ProtobufUtil.mergeFrom(builder, bytes, magicLen, bytes.length - magicLen); - } catch (IOException e) { - throw new DeserializationException(e); - } - ZooKeeperProtos.SplitAndMergeState splitAndMergeState = builder.build(); - splitStateTracker.setSwitchEnabled(splitAndMergeState.hasSplitEnabled()); - mergeStateTracker.setSwitchEnabled(splitAndMergeState.hasMergeEnabled()); - ops.add(ZKUtil.ZKUtilOp.deleteNodeFailSilent(splitOrMergeState)); - } - } - - /** - * If there is no lock, you could acquire the lock. - * After we create lock on zk, we save original splitOrMerge switches on zk. - * @param skipLock if true, it means we will skip the lock action - * but we still need to check whether the lock exists or not. - * @return true, lock successfully. otherwise, false - * */ - public boolean lock(boolean skipLock) throws KeeperException { - if (ZKUtil.checkExists(watcher, splitOrMergeLock) != -1) { - return false; - } - if (skipLock) { - return true; - } - ZKUtil.createAndFailSilent(watcher, splitOrMergeLock); - if (ZKUtil.checkExists(watcher, splitOrMergeLock) != -1) { - saveOriginalState(); - return true; - } - return false; - } - - private void saveOriginalState() throws KeeperException { - boolean splitEnabled = isSplitOrMergeEnabled(MasterSwitchType.SPLIT); - boolean mergeEnabled = isSplitOrMergeEnabled(MasterSwitchType.MERGE); - String splitOrMergeStates = ZKUtil.joinZNode(watcher.getSwitchLockZNode(), - SplitOrMergeTracker.STATE); - ZooKeeperProtos.SplitAndMergeState.Builder builder - = ZooKeeperProtos.SplitAndMergeState.newBuilder(); - builder.setSplitEnabled(splitEnabled); - builder.setMergeEnabled(mergeEnabled); - ZKUtil.createSetData(watcher, splitOrMergeStates, - ProtobufUtil.prependPBMagic(builder.build().toByteArray())); - } - private static class SwitchStateTracker extends ZooKeeperNodeTracker { public SwitchStateTracker(ZooKeeperWatcher watcher, String node, Abortable abortable) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSplitOrMergeStatus.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSplitOrMergeStatus.java index e10cde5f9db..f6b63615f69 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSplitOrMergeStatus.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSplitOrMergeStatus.java @@ -41,7 +41,6 @@ import java.util.List; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; @Category({MediumTests.class, ClientTests.class}) public class TestSplitOrMergeStatus { @@ -77,15 +76,13 @@ public class TestSplitOrMergeStatus { Admin admin = TEST_UTIL.getAdmin(); initSwitchStatus(admin); - boolean[] results = admin.setSplitOrMergeEnabled(false, false, - true, MasterSwitchType.SPLIT); + boolean[] results = admin.setSplitOrMergeEnabled(false, false, MasterSwitchType.SPLIT); assertEquals(results.length, 1); assertTrue(results[0]); admin.split(t.getName()); int count = waitOnSplitOrMerge(t).size(); assertTrue(orignalCount == count); - - results = admin.setSplitOrMergeEnabled(true, false, true, MasterSwitchType.SPLIT); + results = admin.setSplitOrMergeEnabled(true, false, MasterSwitchType.SPLIT); assertEquals(results.length, 1); assertFalse(results[0]); admin.split(t.getName()); @@ -110,8 +107,7 @@ public class TestSplitOrMergeStatus { waitForMergable(admin, name); int orignalCount = locator.getAllRegionLocations().size(); - boolean[] results = admin.setSplitOrMergeEnabled(false, false, - true, MasterSwitchType.MERGE); + boolean[] results = admin.setSplitOrMergeEnabled(false, false, MasterSwitchType.MERGE); assertEquals(results.length, 1); assertTrue(results[0]); List regions = admin.getTableRegions(t.getName()); @@ -122,7 +118,7 @@ public class TestSplitOrMergeStatus { assertTrue(orignalCount == count); waitForMergable(admin, name); - results = admin.setSplitOrMergeEnabled(true, false, true, MasterSwitchType.MERGE); + results = admin.setSplitOrMergeEnabled(true, false, MasterSwitchType.MERGE); assertEquals(results.length, 1); assertFalse(results[0]); admin.mergeRegionsAsync(regions.get(0).getEncodedNameAsBytes(), @@ -135,7 +131,7 @@ public class TestSplitOrMergeStatus { @Test public void testMultiSwitches() throws IOException { Admin admin = TEST_UTIL.getAdmin(); - boolean[] switches = admin.setSplitOrMergeEnabled(false, false, true, + boolean[] switches = admin.setSplitOrMergeEnabled(false, false, MasterSwitchType.SPLIT, MasterSwitchType.MERGE); for (boolean s : switches){ assertTrue(s); @@ -145,34 +141,12 @@ public class TestSplitOrMergeStatus { admin.close(); } - @Test - public void testSwitchLock() throws IOException { - Admin admin = TEST_UTIL.getAdmin(); - admin.setSplitOrMergeEnabled(false, false, false, - MasterSwitchType.SPLIT, MasterSwitchType.MERGE); - try { - admin.setSplitOrMergeEnabled(false, false, true, - MasterSwitchType.SPLIT, MasterSwitchType.MERGE); - fail(); - } catch (IOException e) { - LOG.info("", e); - } - admin.releaseSplitOrMergeLockAndRollback(); - try { - admin.setSplitOrMergeEnabled(true, false, true, - MasterSwitchType.SPLIT, MasterSwitchType.MERGE); - } catch (IOException e) { - fail(); - } - admin.close(); - } - private void initSwitchStatus(Admin admin) throws IOException { if (!admin.isSplitOrMergeEnabled(MasterSwitchType.SPLIT)) { - admin.setSplitOrMergeEnabled(true, false, true, MasterSwitchType.SPLIT); + admin.setSplitOrMergeEnabled(true, false, MasterSwitchType.SPLIT); } if (!admin.isSplitOrMergeEnabled(MasterSwitchType.MERGE)) { - admin.setSplitOrMergeEnabled(true, false, true, MasterSwitchType.MERGE); + admin.setSplitOrMergeEnabled(true, false, MasterSwitchType.MERGE); } assertTrue(admin.isSplitOrMergeEnabled(MasterSwitchType.SPLIT)); assertTrue(admin.isSplitOrMergeEnabled(MasterSwitchType.MERGE)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java index ecfe521e7f0..7f0f6dbcfa4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java @@ -63,7 +63,6 @@ import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker; import org.apache.hadoop.hbase.util.hbck.HbckTestingUtil; -import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.junit.AfterClass; import org.junit.Assert; @@ -72,8 +71,6 @@ import org.junit.BeforeClass; import org.junit.Ignore; import org.junit.Test; import org.junit.experimental.categories.Category; -import org.mockito.invocation.InvocationOnMock; -import org.mockito.stubbing.Answer; import java.io.IOException; import java.util.ArrayList; @@ -95,8 +92,6 @@ import java.util.concurrent.atomic.AtomicBoolean; import static org.apache.hadoop.hbase.util.hbck.HbckTestingUtil.*; import static org.junit.Assert.*; -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.spy; @Category({MiscTests.class, LargeTests.class}) public class TestHBaseFsckOneRS extends BaseTestHBaseFsck { @@ -1851,53 +1846,4 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck { doQuarantineTest(table, hbck, 3, 0, 0, 0, 1); hbck.close(); } - - /** - * See HBASE-15406 - * */ - @Test - public void testSplitOrMergeStatWhenHBCKAbort() throws Exception { - admin.setSplitOrMergeEnabled(true, false, true, - MasterSwitchType.SPLIT, MasterSwitchType.MERGE); - boolean oldSplit = admin.isSplitOrMergeEnabled(MasterSwitchType.SPLIT); - boolean oldMerge = admin.isSplitOrMergeEnabled(MasterSwitchType.MERGE); - - assertTrue(oldSplit); - assertTrue(oldMerge); - - ExecutorService exec = new ScheduledThreadPoolExecutor(10); - HBaseFsck hbck = new HBaseFsck(conf, exec); - HBaseFsck.setDisplayFullReport(); // i.e. -details - final HBaseFsck spiedHbck = spy(hbck); - doAnswer(new Answer() { - @Override - public Object answer(InvocationOnMock invocation) throws Throwable { - // we close splitOrMerge flag in hbck, so in finally hbck will not set splitOrMerge back. - spiedHbck.setDisableSplitAndMerge(false); - return null; - } - }).when(spiedHbck).onlineConsistencyRepair(); - spiedHbck.setDisableSplitAndMerge(); - spiedHbck.connect(); - spiedHbck.onlineHbck(); - spiedHbck.close(); - - boolean split = admin.isSplitOrMergeEnabled(MasterSwitchType.SPLIT); - boolean merge = admin.isSplitOrMergeEnabled(MasterSwitchType.MERGE); - assertFalse(split); - assertFalse(merge); - - // rerun hbck to repair the switches state - hbck = new HBaseFsck(conf, exec); - hbck.setDisableSplitAndMerge(); - hbck.connect(); - hbck.onlineHbck(); - hbck.close(); - - split = admin.isSplitOrMergeEnabled(MasterSwitchType.SPLIT); - merge = admin.isSplitOrMergeEnabled(MasterSwitchType.MERGE); - - assertTrue(split); - assertTrue(merge); - } } diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb b/hbase-shell/src/main/ruby/hbase/admin.rb index c6e6f484fd9..e4f52e9d4b3 100644 --- a/hbase-shell/src/main/ruby/hbase/admin.rb +++ b/hbase-shell/src/main/ruby/hbase/admin.rb @@ -151,7 +151,7 @@ module Hbase end @admin.setSplitOrMergeEnabled( java.lang.Boolean.valueOf(enabled), java.lang.Boolean.valueOf(false), - java.lang.Boolean.valueOf(true), switch_type)[0] + switch_type)[0] end #----------------------------------------------------------------------------------------------